You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by ja...@apache.org on 2017/10/09 18:34:15 UTC
[01/50] [abbrv] ambari git commit: AMBARI-21695. Problem in starting
accumulo in upgraded cluster (IOP-HDP). UT fix. (swagle)
Repository: ambari
Updated Branches:
refs/heads/2.5-maint [created] fe71b2224
AMBARI-21695. Problem in starting accumulo in upgraded cluster (IOP-HDP). UT fix. (swagle)
(cherry picked from commit b02146cd5b3172de9081cbb43b46e54c715d0876)
Change-Id: I6f97a36fd0266c5a11146f002f7bb82b262d4baf
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/816ebb58
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/816ebb58
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/816ebb58
Branch: refs/heads/2.5-maint
Commit: 816ebb5880857bf3f8376d8d1d4e4bffcdb72cca
Parents: b8cb590
Author: Siddharth Wagle <sw...@hortonworks.com>
Authored: Thu Aug 10 09:55:19 2017 -0700
Committer: swagle@hortonworks.com <je...@hortonworks.com>
Committed: Thu Aug 10 17:16:37 2017 +0000
----------------------------------------------------------------------
.../listeners/upgrade/StackUpgradeFinishListener.java | 6 +++---
.../upgrade/StackUpgradeFinishListenerTest.java | 14 ++++++--------
2 files changed, 9 insertions(+), 11 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/816ebb58/ambari-server/src/main/java/org/apache/ambari/server/events/listeners/upgrade/StackUpgradeFinishListener.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/events/listeners/upgrade/StackUpgradeFinishListener.java b/ambari-server/src/main/java/org/apache/ambari/server/events/listeners/upgrade/StackUpgradeFinishListener.java
index 41a0f35..7dd9754 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/events/listeners/upgrade/StackUpgradeFinishListener.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/events/listeners/upgrade/StackUpgradeFinishListener.java
@@ -53,7 +53,7 @@ public class StackUpgradeFinishListener {
Provider<AmbariMetaInfo> ambariMetaInfo;
@Inject
- RoleCommandOrderProvider roleCommandOrderProvider;
+ Provider<RoleCommandOrderProvider> roleCommandOrderProvider;
/**
* Constructor.
@@ -88,9 +88,9 @@ public class StackUpgradeFinishListener {
}
// Clear the RoleCommandOrder cache on upgrade
- if (roleCommandOrderProvider instanceof CachedRoleCommandOrderProvider) {
+ if (roleCommandOrderProvider.get() instanceof CachedRoleCommandOrderProvider) {
LOG.info("Clearing RCO cache");
- CachedRoleCommandOrderProvider cachedRcoProvider = (CachedRoleCommandOrderProvider) roleCommandOrderProvider;
+ CachedRoleCommandOrderProvider cachedRcoProvider = (CachedRoleCommandOrderProvider) roleCommandOrderProvider.get();
cachedRcoProvider.clearRoleCommandOrderCache();
}
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/816ebb58/ambari-server/src/test/java/org/apache/ambari/server/events/listeners/upgrade/StackUpgradeFinishListenerTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/events/listeners/upgrade/StackUpgradeFinishListenerTest.java b/ambari-server/src/test/java/org/apache/ambari/server/events/listeners/upgrade/StackUpgradeFinishListenerTest.java
index 94e5890..360f3f0 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/events/listeners/upgrade/StackUpgradeFinishListenerTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/events/listeners/upgrade/StackUpgradeFinishListenerTest.java
@@ -17,35 +17,31 @@
*/
package org.apache.ambari.server.events.listeners.upgrade;
-import static org.easymock.EasyMock.anyString;
import static org.easymock.EasyMock.expect;
-import static org.easymock.EasyMock.expectLastCall;
-import java.lang.reflect.Field;
import java.util.HashMap;
import java.util.Map;
import org.apache.ambari.server.AmbariException;
-import org.apache.ambari.server.api.services.AmbariMetaInfo;
import org.apache.ambari.server.events.StackUpgradeFinishEvent;
import org.apache.ambari.server.events.publishers.VersionEventPublisher;
-import org.apache.ambari.server.orm.dao.RepositoryVersionDAO;
+import org.apache.ambari.server.metadata.RoleCommandOrderProvider;
import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
import org.apache.ambari.server.orm.entities.UpgradeEntity;
import org.apache.ambari.server.state.Cluster;
-import org.apache.ambari.server.state.ComponentInfo;
import org.apache.ambari.server.state.Service;
import org.apache.ambari.server.state.ServiceComponent;
import org.apache.ambari.server.state.ServiceComponentHost;
-import org.apache.ambari.server.state.StackId;
-import org.apache.ambari.server.state.UpgradeState;
import org.easymock.EasyMockRunner;
import org.easymock.EasyMockSupport;
+import org.easymock.Mock;
+import org.easymock.MockType;
import org.easymock.TestSubject;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
+import com.google.inject.Provider;
/**
@@ -75,6 +71,8 @@ public class StackUpgradeFinishListenerTest extends EasyMockSupport {
@TestSubject
private StackUpgradeFinishListener listener = new StackUpgradeFinishListener(publisher);
+ @Mock(type = MockType.NICE)
+ private Provider<RoleCommandOrderProvider> roleCommandOrderProviderProviderMock;
@Before
public void setup() throws Exception {
[35/50] [abbrv] ambari git commit: Update ambari.version to 2.5.2.0.0
in pom.xml files. (avijayan)
Posted by ja...@apache.org.
Update ambari.version to 2.5.2.0.0 in pom.xml files. (avijayan)
(cherry picked from commit 6e5fbe519f6b94ed7626d13491b9fe889492f034)
Change-Id: I7e46b17499f03beb62df2d8d78e604bc6bc9bc9d
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/bf66bf8e
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/bf66bf8e
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/bf66bf8e
Branch: refs/heads/2.5-maint
Commit: bf66bf8e533551ecd3f8cd445029016477a5f1e5
Parents: a0b3f4f
Author: Aravindan Vijayan <av...@hortonworks.com>
Authored: Tue Aug 22 10:21:34 2017 -0700
Committer: avijayan@hortonworks.com <je...@hortonworks.com>
Committed: Tue Aug 22 17:56:14 2017 +0000
----------------------------------------------------------------------
ambari-admin/pom.xml | 4 ++--
ambari-agent/pom.xml | 4 ++--
ambari-client/groovy-client/pom.xml | 4 ++--
ambari-client/pom.xml | 4 ++--
ambari-client/python-client/pom.xml | 4 ++--
ambari-funtest/pom.xml | 4 ++--
ambari-logsearch/ambari-infra-solr-plugin/pom.xml | 2 +-
ambari-logsearch/ambari-logsearch-appender/pom.xml | 4 ++--
ambari-logsearch/ambari-logsearch-assembly/pom.xml | 2 +-
ambari-logsearch/ambari-logsearch-it/pom.xml | 2 +-
ambari-logsearch/ambari-logsearch-logfeeder/pom.xml | 4 ++--
ambari-logsearch/ambari-logsearch-portal/pom.xml | 4 ++--
ambari-logsearch/ambari-logsearch-solr-client/pom.xml | 2 +-
ambari-logsearch/pom.xml | 4 ++--
ambari-metrics/ambari-metrics-assembly/pom.xml | 4 ++--
ambari-metrics/ambari-metrics-common/pom.xml | 2 +-
ambari-metrics/ambari-metrics-flume-sink/pom.xml | 4 ++--
ambari-metrics/ambari-metrics-grafana/pom.xml | 2 +-
ambari-metrics/ambari-metrics-hadoop-sink/pom.xml | 4 ++--
ambari-metrics/ambari-metrics-host-monitoring/pom.xml | 4 ++--
ambari-metrics/ambari-metrics-kafka-sink/pom.xml | 4 ++--
ambari-metrics/ambari-metrics-storm-sink-legacy/pom.xml | 4 ++--
ambari-metrics/ambari-metrics-storm-sink/pom.xml | 4 ++--
ambari-metrics/ambari-metrics-timelineservice/pom.xml | 6 +++---
ambari-metrics/pom.xml | 2 +-
ambari-project/pom.xml | 4 ++--
ambari-server/pom.xml | 4 ++--
ambari-shell/ambari-groovy-shell/pom.xml | 6 +++---
ambari-shell/ambari-python-shell/pom.xml | 4 ++--
ambari-shell/pom.xml | 4 ++--
ambari-views/examples/calculator-view/pom.xml | 4 ++--
ambari-views/examples/cluster-view/pom.xml | 6 +++---
ambari-views/examples/favorite-view/pom.xml | 4 ++--
ambari-views/examples/hello-servlet-view/pom.xml | 6 +++---
ambari-views/examples/hello-spring-view/pom.xml | 6 +++---
ambari-views/examples/helloworld-view/pom.xml | 6 +++---
ambari-views/examples/phone-list-upgrade-view/pom.xml | 6 +++---
ambari-views/examples/phone-list-view/pom.xml | 4 ++--
ambari-views/examples/pom.xml | 4 ++--
ambari-views/examples/property-validator-view/pom.xml | 6 +++---
ambari-views/examples/property-view/pom.xml | 6 +++---
ambari-views/examples/restricted-view/pom.xml | 4 ++--
ambari-views/examples/simple-view/pom.xml | 6 +++---
ambari-views/examples/weather-view/pom.xml | 4 ++--
ambari-views/pom.xml | 4 ++--
ambari-web/pom.xml | 4 ++--
contrib/ambari-log4j/pom.xml | 2 +-
contrib/ambari-scom/ambari-scom-server/pom.xml | 4 ++--
contrib/ambari-scom/metrics-sink/pom.xml | 2 +-
contrib/ambari-scom/pom.xml | 2 +-
contrib/management-packs/microsoft-r_mpack/pom.xml | 2 +-
contrib/management-packs/pom.xml | 2 +-
contrib/views/ambari-views-package/pom.xml | 4 ++--
contrib/views/capacity-scheduler/pom.xml | 4 ++--
contrib/views/commons/pom.xml | 6 +++---
contrib/views/files/pom.xml | 6 +++---
contrib/views/hawq/pom.xml | 4 ++--
contrib/views/hive-next/pom.xml | 8 ++++----
contrib/views/hive20/pom.xml | 8 ++++----
contrib/views/hueambarimigration/pom.xml | 4 ++--
contrib/views/jobs/pom.xml | 4 ++--
contrib/views/pig/pom.xml | 8 ++++----
contrib/views/pom.xml | 8 ++++----
contrib/views/slider/pom.xml | 6 +++---
contrib/views/storm/pom.xml | 2 +-
contrib/views/tez/pom.xml | 4 ++--
contrib/views/utils/pom.xml | 4 ++--
contrib/views/wfmanager/pom.xml | 6 +++---
docs/pom.xml | 4 ++--
pom.xml | 2 +-
utility/pom.xml | 2 +-
71 files changed, 149 insertions(+), 149 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/bf66bf8e/ambari-admin/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-admin/pom.xml b/ambari-admin/pom.xml
index bab1bef..8fc94b8 100644
--- a/ambari-admin/pom.xml
+++ b/ambari-admin/pom.xml
@@ -19,7 +19,7 @@
<parent>
<groupId>org.apache.ambari</groupId>
<artifactId>ambari-project</artifactId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
<relativePath>../ambari-project</relativePath>
</parent>
<modelVersion>4.0.0</modelVersion>
@@ -27,7 +27,7 @@
<artifactId>ambari-admin</artifactId>
<packaging>jar</packaging>
<name>Ambari Admin View</name>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
<description>Admin control panel</description>
<build>
<plugins>
http://git-wip-us.apache.org/repos/asf/ambari/blob/bf66bf8e/ambari-agent/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-agent/pom.xml b/ambari-agent/pom.xml
index fb254fc..fdef897 100644
--- a/ambari-agent/pom.xml
+++ b/ambari-agent/pom.xml
@@ -19,13 +19,13 @@
<parent>
<groupId>org.apache.ambari</groupId>
<artifactId>ambari-project</artifactId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
<relativePath>../ambari-project</relativePath>
</parent>
<modelVersion>4.0.0</modelVersion>
<groupId>org.apache.ambari</groupId>
<artifactId>ambari-agent</artifactId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
<name>Ambari Agent</name>
<description>Ambari Agent</description>
<properties>
http://git-wip-us.apache.org/repos/asf/ambari/blob/bf66bf8e/ambari-client/groovy-client/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-client/groovy-client/pom.xml b/ambari-client/groovy-client/pom.xml
index 828de5a..874becd 100644
--- a/ambari-client/groovy-client/pom.xml
+++ b/ambari-client/groovy-client/pom.xml
@@ -12,14 +12,14 @@
<parent>
<artifactId>ambari-client</artifactId>
<groupId>org.apache.ambari</groupId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
<relativePath>../../ambari-client/</relativePath>
</parent>
<modelVersion>4.0.0</modelVersion>
<groupId>org.apache.ambari</groupId>
<artifactId>groovy-client</artifactId>
<packaging>jar</packaging>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
<name>Ambari Groovy Client</name>
<description>Ambari Groovy client</description>
<properties>
http://git-wip-us.apache.org/repos/asf/ambari/blob/bf66bf8e/ambari-client/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-client/pom.xml b/ambari-client/pom.xml
index 74a133d..5f5e457 100755
--- a/ambari-client/pom.xml
+++ b/ambari-client/pom.xml
@@ -20,14 +20,14 @@
<parent>
<groupId>org.apache.ambari</groupId>
<artifactId>ambari-project</artifactId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
<relativePath>../ambari-project</relativePath>
</parent>
<modelVersion>4.0.0</modelVersion>
<groupId>org.apache.ambari</groupId>
<artifactId>ambari-client</artifactId>
<packaging>pom</packaging>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
<name>Ambari Client</name>
<description>Ambari client</description>
<modules>
http://git-wip-us.apache.org/repos/asf/ambari/blob/bf66bf8e/ambari-client/python-client/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-client/python-client/pom.xml b/ambari-client/python-client/pom.xml
index cddb191..990d8ad 100644
--- a/ambari-client/python-client/pom.xml
+++ b/ambari-client/python-client/pom.xml
@@ -19,14 +19,14 @@
<parent>
<groupId>org.apache.ambari</groupId>
<artifactId>ambari-client</artifactId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
<relativePath>../../ambari-client</relativePath>
</parent>
<modelVersion>4.0.0</modelVersion>
<groupId>org.apache.ambari</groupId>
<artifactId>python-client</artifactId>
<packaging>pom</packaging>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
<name>Ambari Python Client</name>
<description>Ambari Python client</description>
<properties>
http://git-wip-us.apache.org/repos/asf/ambari/blob/bf66bf8e/ambari-funtest/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-funtest/pom.xml b/ambari-funtest/pom.xml
index f7d63d8..df0f934 100644
--- a/ambari-funtest/pom.xml
+++ b/ambari-funtest/pom.xml
@@ -13,12 +13,12 @@
<parent>
<groupId>org.apache.ambari</groupId>
<artifactId>ambari-project</artifactId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
<relativePath>../ambari-project</relativePath>
</parent>
<groupId>org.apache.ambari</groupId>
<artifactId>ambari-funtest</artifactId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
<packaging>${packagingFormat}</packaging>
<name>Ambari Functional Tests</name>
<description>Ambari Functional Tests</description>
http://git-wip-us.apache.org/repos/asf/ambari/blob/bf66bf8e/ambari-logsearch/ambari-infra-solr-plugin/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-infra-solr-plugin/pom.xml b/ambari-logsearch/ambari-infra-solr-plugin/pom.xml
index d475a94..87f84ca 100644
--- a/ambari-logsearch/ambari-infra-solr-plugin/pom.xml
+++ b/ambari-logsearch/ambari-infra-solr-plugin/pom.xml
@@ -21,7 +21,7 @@
<parent>
<artifactId>ambari-logsearch</artifactId>
<groupId>org.apache.ambari</groupId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
</parent>
<name>Ambari Infra Solr Plugin</name>
<url>http://maven.apache.org</url>
http://git-wip-us.apache.org/repos/asf/ambari/blob/bf66bf8e/ambari-logsearch/ambari-logsearch-appender/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-appender/pom.xml b/ambari-logsearch/ambari-logsearch-appender/pom.xml
index 51e7984..9dc55b1 100644
--- a/ambari-logsearch/ambari-logsearch-appender/pom.xml
+++ b/ambari-logsearch/ambari-logsearch-appender/pom.xml
@@ -21,11 +21,11 @@
<parent>
<artifactId>ambari-logsearch</artifactId>
<groupId>org.apache.ambari</groupId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
</parent>
<groupId>org.apache.ambari</groupId>
<artifactId>ambari-logsearch-appender</artifactId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
<packaging>jar</packaging>
<name>Ambari Logsearch Appender</name>
http://git-wip-us.apache.org/repos/asf/ambari/blob/bf66bf8e/ambari-logsearch/ambari-logsearch-assembly/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-assembly/pom.xml b/ambari-logsearch/ambari-logsearch-assembly/pom.xml
index 8878997..ece878f 100644
--- a/ambari-logsearch/ambari-logsearch-assembly/pom.xml
+++ b/ambari-logsearch/ambari-logsearch-assembly/pom.xml
@@ -21,7 +21,7 @@
<parent>
<artifactId>ambari-logsearch</artifactId>
<groupId>org.apache.ambari</groupId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
</parent>
<name>Ambari Logsearch Assembly</name>
<url>http://maven.apache.org</url>
http://git-wip-us.apache.org/repos/asf/ambari/blob/bf66bf8e/ambari-logsearch/ambari-logsearch-it/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-it/pom.xml b/ambari-logsearch/ambari-logsearch-it/pom.xml
index e426ab9..c8f754b 100644
--- a/ambari-logsearch/ambari-logsearch-it/pom.xml
+++ b/ambari-logsearch/ambari-logsearch-it/pom.xml
@@ -21,7 +21,7 @@
<parent>
<artifactId>ambari-logsearch</artifactId>
<groupId>org.apache.ambari</groupId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
</parent>
<modelVersion>4.0.0</modelVersion>
http://git-wip-us.apache.org/repos/asf/ambari/blob/bf66bf8e/ambari-logsearch/ambari-logsearch-logfeeder/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/pom.xml b/ambari-logsearch/ambari-logsearch-logfeeder/pom.xml
index 49122e8..48eddba 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/pom.xml
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/pom.xml
@@ -21,13 +21,13 @@
<parent>
<artifactId>ambari-logsearch</artifactId>
<groupId>org.apache.ambari</groupId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
</parent>
<modelVersion>4.0.0</modelVersion>
<artifactId>ambari-logsearch-logfeeder</artifactId>
<groupId>org.apache.ambari</groupId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
<packaging>jar</packaging>
<name>Ambari Logsearch Log Feeder</name>
<url>http://maven.apache.org</url>
http://git-wip-us.apache.org/repos/asf/ambari/blob/bf66bf8e/ambari-logsearch/ambari-logsearch-portal/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-portal/pom.xml b/ambari-logsearch/ambari-logsearch-portal/pom.xml
index a439266..dbbe4f5 100755
--- a/ambari-logsearch/ambari-logsearch-portal/pom.xml
+++ b/ambari-logsearch/ambari-logsearch-portal/pom.xml
@@ -20,13 +20,13 @@
<parent>
<artifactId>ambari-logsearch</artifactId>
<groupId>org.apache.ambari</groupId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
</parent>
<modelVersion>4.0.0</modelVersion>
<groupId>org.apache.ambari</groupId>
<artifactId>ambari-logsearch-portal</artifactId>
<packaging>jar</packaging>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
<url>http://maven.apache.org</url>
<name>Ambari Logsearch Portal</name>
<properties>
http://git-wip-us.apache.org/repos/asf/ambari/blob/bf66bf8e/ambari-logsearch/ambari-logsearch-solr-client/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-solr-client/pom.xml b/ambari-logsearch/ambari-logsearch-solr-client/pom.xml
index a3d5823..804fda3 100644
--- a/ambari-logsearch/ambari-logsearch-solr-client/pom.xml
+++ b/ambari-logsearch/ambari-logsearch-solr-client/pom.xml
@@ -21,7 +21,7 @@
<parent>
<artifactId>ambari-logsearch</artifactId>
<groupId>org.apache.ambari</groupId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
</parent>
<modelVersion>4.0.0</modelVersion>
<url>http://maven.apache.org</url>
http://git-wip-us.apache.org/repos/asf/ambari/blob/bf66bf8e/ambari-logsearch/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-logsearch/pom.xml b/ambari-logsearch/pom.xml
index 8a85f82..808283c 100644
--- a/ambari-logsearch/pom.xml
+++ b/ambari-logsearch/pom.xml
@@ -18,13 +18,13 @@
<parent>
<groupId>org.apache.ambari</groupId>
<artifactId>ambari-project</artifactId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
<relativePath>../ambari-project</relativePath>
</parent>
<groupId>org.apache.ambari</groupId>
<modelVersion>4.0.0</modelVersion>
<artifactId>ambari-logsearch</artifactId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
<packaging>pom</packaging>
<url>http://maven.apache.org</url>
http://git-wip-us.apache.org/repos/asf/ambari/blob/bf66bf8e/ambari-metrics/ambari-metrics-assembly/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-assembly/pom.xml b/ambari-metrics/ambari-metrics-assembly/pom.xml
index b6b9ad7..42f61b5 100644
--- a/ambari-metrics/ambari-metrics-assembly/pom.xml
+++ b/ambari-metrics/ambari-metrics-assembly/pom.xml
@@ -23,13 +23,13 @@
<parent>
<artifactId>ambari-metrics</artifactId>
<groupId>org.apache.ambari</groupId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
</parent>
<modelVersion>4.0.0</modelVersion>
<artifactId>ambari-metrics-assembly</artifactId>
<name>Ambari Metrics Assembly</name>
<packaging>pom</packaging>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
<description>Ambari Metrics Assembly</description>
<properties>
http://git-wip-us.apache.org/repos/asf/ambari/blob/bf66bf8e/ambari-metrics/ambari-metrics-common/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-common/pom.xml b/ambari-metrics/ambari-metrics-common/pom.xml
index b52b1d9..f819684 100644
--- a/ambari-metrics/ambari-metrics-common/pom.xml
+++ b/ambari-metrics/ambari-metrics-common/pom.xml
@@ -21,7 +21,7 @@
<parent>
<artifactId>ambari-metrics</artifactId>
<groupId>org.apache.ambari</groupId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
</parent>
<modelVersion>4.0.0</modelVersion>
<artifactId>ambari-metrics-common</artifactId>
http://git-wip-us.apache.org/repos/asf/ambari/blob/bf66bf8e/ambari-metrics/ambari-metrics-flume-sink/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-flume-sink/pom.xml b/ambari-metrics/ambari-metrics-flume-sink/pom.xml
index 78da829..76b934a 100644
--- a/ambari-metrics/ambari-metrics-flume-sink/pom.xml
+++ b/ambari-metrics/ambari-metrics-flume-sink/pom.xml
@@ -22,11 +22,11 @@ limitations under the License.
<parent>
<artifactId>ambari-metrics</artifactId>
<groupId>org.apache.ambari</groupId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
</parent>
<modelVersion>4.0.0</modelVersion>
<artifactId>ambari-metrics-flume-sink</artifactId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
<name>Ambari Metrics Flume Sink</name>
<packaging>jar</packaging>
<build>
http://git-wip-us.apache.org/repos/asf/ambari/blob/bf66bf8e/ambari-metrics/ambari-metrics-grafana/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-grafana/pom.xml b/ambari-metrics/ambari-metrics-grafana/pom.xml
index 9f9e86c..4064b7c 100644
--- a/ambari-metrics/ambari-metrics-grafana/pom.xml
+++ b/ambari-metrics/ambari-metrics-grafana/pom.xml
@@ -21,7 +21,7 @@
<parent>
<artifactId>ambari-metrics</artifactId>
<groupId>org.apache.ambari</groupId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
</parent>
<modelVersion>4.0.0</modelVersion>
http://git-wip-us.apache.org/repos/asf/ambari/blob/bf66bf8e/ambari-metrics/ambari-metrics-hadoop-sink/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-sink/pom.xml b/ambari-metrics/ambari-metrics-hadoop-sink/pom.xml
index 0f76e95..7fa441f 100644
--- a/ambari-metrics/ambari-metrics-hadoop-sink/pom.xml
+++ b/ambari-metrics/ambari-metrics-hadoop-sink/pom.xml
@@ -22,11 +22,11 @@ limitations under the License.
<parent>
<artifactId>ambari-metrics</artifactId>
<groupId>org.apache.ambari</groupId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
</parent>
<modelVersion>4.0.0</modelVersion>
<artifactId>ambari-metrics-hadoop-sink</artifactId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
<name>Ambari Metrics Hadoop Sink</name>
<packaging>jar</packaging>
<properties>
http://git-wip-us.apache.org/repos/asf/ambari/blob/bf66bf8e/ambari-metrics/ambari-metrics-host-monitoring/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/pom.xml b/ambari-metrics/ambari-metrics-host-monitoring/pom.xml
index 3da4b8a..b0c6157 100644
--- a/ambari-metrics/ambari-metrics-host-monitoring/pom.xml
+++ b/ambari-metrics/ambari-metrics-host-monitoring/pom.xml
@@ -22,11 +22,11 @@
<parent>
<artifactId>ambari-metrics</artifactId>
<groupId>org.apache.ambari</groupId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
</parent>
<modelVersion>4.0.0</modelVersion>
<packaging>pom</packaging>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
<artifactId>ambari-metrics-host-monitoring</artifactId>
<name>Ambari Metrics Monitor</name>
<properties>
http://git-wip-us.apache.org/repos/asf/ambari/blob/bf66bf8e/ambari-metrics/ambari-metrics-kafka-sink/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-kafka-sink/pom.xml b/ambari-metrics/ambari-metrics-kafka-sink/pom.xml
index c4bf2e7..fd33f09 100644
--- a/ambari-metrics/ambari-metrics-kafka-sink/pom.xml
+++ b/ambari-metrics/ambari-metrics-kafka-sink/pom.xml
@@ -22,11 +22,11 @@ limitations under the License.
<parent>
<artifactId>ambari-metrics</artifactId>
<groupId>org.apache.ambari</groupId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
</parent>
<modelVersion>4.0.0</modelVersion>
<artifactId>ambari-metrics-kafka-sink</artifactId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
<name>Ambari Metrics Kafka Sink</name>
<packaging>jar</packaging>
<build>
http://git-wip-us.apache.org/repos/asf/ambari/blob/bf66bf8e/ambari-metrics/ambari-metrics-storm-sink-legacy/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-storm-sink-legacy/pom.xml b/ambari-metrics/ambari-metrics-storm-sink-legacy/pom.xml
index 97c7e21..fa55be8 100644
--- a/ambari-metrics/ambari-metrics-storm-sink-legacy/pom.xml
+++ b/ambari-metrics/ambari-metrics-storm-sink-legacy/pom.xml
@@ -22,11 +22,11 @@ limitations under the License.
<parent>
<artifactId>ambari-metrics</artifactId>
<groupId>org.apache.ambari</groupId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
</parent>
<modelVersion>4.0.0</modelVersion>
<artifactId>ambari-metrics-storm-sink-legacy</artifactId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
<name>Ambari Metrics Storm Sink (Legacy)</name>
<packaging>jar</packaging>
http://git-wip-us.apache.org/repos/asf/ambari/blob/bf66bf8e/ambari-metrics/ambari-metrics-storm-sink/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-storm-sink/pom.xml b/ambari-metrics/ambari-metrics-storm-sink/pom.xml
index 6c3dc77..779ee1c 100644
--- a/ambari-metrics/ambari-metrics-storm-sink/pom.xml
+++ b/ambari-metrics/ambari-metrics-storm-sink/pom.xml
@@ -22,11 +22,11 @@ limitations under the License.
<parent>
<artifactId>ambari-metrics</artifactId>
<groupId>org.apache.ambari</groupId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
</parent>
<modelVersion>4.0.0</modelVersion>
<artifactId>ambari-metrics-storm-sink</artifactId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
<name>Ambari Metrics Storm Sink</name>
<packaging>jar</packaging>
http://git-wip-us.apache.org/repos/asf/ambari/blob/bf66bf8e/ambari-metrics/ambari-metrics-timelineservice/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/pom.xml b/ambari-metrics/ambari-metrics-timelineservice/pom.xml
index 25373e7..0da049e 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/pom.xml
+++ b/ambari-metrics/ambari-metrics-timelineservice/pom.xml
@@ -22,11 +22,11 @@
<parent>
<artifactId>ambari-metrics</artifactId>
<groupId>org.apache.ambari</groupId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
</parent>
<modelVersion>4.0.0</modelVersion>
<artifactId>ambari-metrics-timelineservice</artifactId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
<name>Ambari Metrics Collector</name>
<packaging>jar</packaging>
@@ -338,7 +338,7 @@
<dependency>
<groupId>org.apache.ambari</groupId>
<artifactId>ambari-metrics-common</artifactId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
</dependency>
<dependency>
http://git-wip-us.apache.org/repos/asf/ambari/blob/bf66bf8e/ambari-metrics/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-metrics/pom.xml b/ambari-metrics/pom.xml
index 835393a..8f9140e 100644
--- a/ambari-metrics/pom.xml
+++ b/ambari-metrics/pom.xml
@@ -19,7 +19,7 @@
<groupId>org.apache.ambari</groupId>
<modelVersion>4.0.0</modelVersion>
<artifactId>ambari-metrics</artifactId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
<packaging>pom</packaging>
<modules>
<module>../utility</module>
http://git-wip-us.apache.org/repos/asf/ambari/blob/bf66bf8e/ambari-project/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-project/pom.xml b/ambari-project/pom.xml
index 91ddb45..c427858 100644
--- a/ambari-project/pom.xml
+++ b/ambari-project/pom.xml
@@ -18,11 +18,11 @@
<parent>
<groupId>org.apache.ambari</groupId>
<artifactId>ambari</artifactId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
</parent>
<groupId>org.apache.ambari</groupId>
<artifactId>ambari-project</artifactId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
<description>Apache Ambari Project POM</description>
<name>Apache Ambari Project POM</name>
<packaging>pom</packaging>
http://git-wip-us.apache.org/repos/asf/ambari/blob/bf66bf8e/ambari-server/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-server/pom.xml b/ambari-server/pom.xml
index 3ff8218..2b93300 100644
--- a/ambari-server/pom.xml
+++ b/ambari-server/pom.xml
@@ -12,7 +12,7 @@
<parent>
<groupId>org.apache.ambari</groupId>
<artifactId>ambari-project</artifactId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
<relativePath>../ambari-project</relativePath>
</parent>
<modelVersion>4.0.0</modelVersion>
@@ -20,7 +20,7 @@
<artifactId>ambari-server</artifactId>
<packaging>${packagingFormat}</packaging>
<name>Ambari Server</name>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
<description>Ambari Server</description>
<properties>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
http://git-wip-us.apache.org/repos/asf/ambari/blob/bf66bf8e/ambari-shell/ambari-groovy-shell/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-shell/ambari-groovy-shell/pom.xml b/ambari-shell/ambari-groovy-shell/pom.xml
index 36ee1bc..9967181 100644
--- a/ambari-shell/ambari-groovy-shell/pom.xml
+++ b/ambari-shell/ambari-groovy-shell/pom.xml
@@ -12,7 +12,7 @@
<parent>
<groupId>org.apache.ambari</groupId>
<artifactId>ambari-shell</artifactId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
<relativePath>../../ambari-shell</relativePath>
</parent>
<modelVersion>4.0.0</modelVersion>
@@ -20,7 +20,7 @@
<artifactId>ambari-groovy-shell</artifactId>
<packaging>jar</packaging>
<name>Ambari Groovy Shell</name>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
<description>Ambari Groovy Shell</description>
<properties>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
@@ -51,7 +51,7 @@
<dependency>
<groupId>org.apache.ambari</groupId>
<artifactId>groovy-client</artifactId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
</dependency>
<dependency>
<groupId>org.mockito</groupId>
http://git-wip-us.apache.org/repos/asf/ambari/blob/bf66bf8e/ambari-shell/ambari-python-shell/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-shell/ambari-python-shell/pom.xml b/ambari-shell/ambari-python-shell/pom.xml
index 859a358..0c4baf6 100644
--- a/ambari-shell/ambari-python-shell/pom.xml
+++ b/ambari-shell/ambari-python-shell/pom.xml
@@ -19,14 +19,14 @@
<parent>
<groupId>org.apache.ambari</groupId>
<artifactId>ambari-shell</artifactId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
<relativePath>../../ambari-shell</relativePath>
</parent>
<modelVersion>4.0.0</modelVersion>
<groupId>org.apache.ambari</groupId>
<artifactId>ambari-python-shell</artifactId>
<packaging>pom</packaging>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
<name>Ambari Python Shell</name>
<description>Ambari Python Shell</description>
<properties>
http://git-wip-us.apache.org/repos/asf/ambari/blob/bf66bf8e/ambari-shell/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-shell/pom.xml b/ambari-shell/pom.xml
index 88369e0..ccdead1 100644
--- a/ambari-shell/pom.xml
+++ b/ambari-shell/pom.xml
@@ -21,14 +21,14 @@
<parent>
<groupId>org.apache.ambari</groupId>
<artifactId>ambari-project</artifactId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
<relativePath>../ambari-project</relativePath>
</parent>
<modelVersion>4.0.0</modelVersion>
<groupId>org.apache.ambari</groupId>
<artifactId>ambari-shell</artifactId>
<packaging>pom</packaging>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
<name>Ambari Shell</name>
<description>Ambari Shell</description>
<modules>
http://git-wip-us.apache.org/repos/asf/ambari/blob/bf66bf8e/ambari-views/examples/calculator-view/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-views/examples/calculator-view/pom.xml b/ambari-views/examples/calculator-view/pom.xml
index 41f74aa..101b11f 100644
--- a/ambari-views/examples/calculator-view/pom.xml
+++ b/ambari-views/examples/calculator-view/pom.xml
@@ -19,7 +19,7 @@
<parent>
<groupId>org.apache.ambari</groupId>
<artifactId>ambari-view-examples</artifactId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
</parent>
<modelVersion>4.0.0</modelVersion>
<artifactId>calculator-view</artifactId>
@@ -45,7 +45,7 @@
<dependency>
<groupId>org.apache.ambari</groupId>
<artifactId>ambari-views</artifactId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
</dependency>
<dependency>
<groupId>com.sun.jersey</groupId>
http://git-wip-us.apache.org/repos/asf/ambari/blob/bf66bf8e/ambari-views/examples/cluster-view/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-views/examples/cluster-view/pom.xml b/ambari-views/examples/cluster-view/pom.xml
index 1be217b..86c6799 100644
--- a/ambari-views/examples/cluster-view/pom.xml
+++ b/ambari-views/examples/cluster-view/pom.xml
@@ -19,11 +19,11 @@
<parent>
<groupId>org.apache.ambari</groupId>
<artifactId>ambari-view-examples</artifactId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
</parent>
<modelVersion>4.0.0</modelVersion>
<artifactId>cluster-view</artifactId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
<packaging>jar</packaging>
<name>Ambari Cluster View</name>
<url>http://maven.apache.org</url>
@@ -34,7 +34,7 @@
<dependency>
<groupId>org.apache.ambari</groupId>
<artifactId>ambari-views</artifactId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
</dependency>
<dependency>
<groupId>com.sun.jersey</groupId>
http://git-wip-us.apache.org/repos/asf/ambari/blob/bf66bf8e/ambari-views/examples/favorite-view/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-views/examples/favorite-view/pom.xml b/ambari-views/examples/favorite-view/pom.xml
index 2f9d7b1..54287ee 100644
--- a/ambari-views/examples/favorite-view/pom.xml
+++ b/ambari-views/examples/favorite-view/pom.xml
@@ -19,7 +19,7 @@
<parent>
<groupId>org.apache.ambari</groupId>
<artifactId>ambari-view-examples</artifactId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
</parent>
<modelVersion>4.0.0</modelVersion>
<artifactId>favorite-view</artifactId>
@@ -45,7 +45,7 @@
<dependency>
<groupId>org.apache.ambari</groupId>
<artifactId>ambari-views</artifactId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
</dependency>
<dependency>
<groupId>com.sun.jersey</groupId>
http://git-wip-us.apache.org/repos/asf/ambari/blob/bf66bf8e/ambari-views/examples/hello-servlet-view/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-views/examples/hello-servlet-view/pom.xml b/ambari-views/examples/hello-servlet-view/pom.xml
index 6063557..f8bc1e2 100644
--- a/ambari-views/examples/hello-servlet-view/pom.xml
+++ b/ambari-views/examples/hello-servlet-view/pom.xml
@@ -19,13 +19,13 @@
<parent>
<groupId>org.apache.ambari</groupId>
<artifactId>ambari-view-examples</artifactId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
</parent>
<modelVersion>4.0.0</modelVersion>
<artifactId>hello-servlet-view</artifactId>
<packaging>jar</packaging>
<name>Ambari Hello Servlet View</name>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
<url>http://maven.apache.org</url>
<properties>
<ambari.dir>${project.parent.parent.parent.basedir}</ambari.dir>
@@ -46,7 +46,7 @@
<dependency>
<groupId>org.apache.ambari</groupId>
<artifactId>ambari-views</artifactId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
</dependency>
<dependency>
<groupId>com.sun.jersey</groupId>
http://git-wip-us.apache.org/repos/asf/ambari/blob/bf66bf8e/ambari-views/examples/hello-spring-view/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-views/examples/hello-spring-view/pom.xml b/ambari-views/examples/hello-spring-view/pom.xml
index ef1a5e0..f28bbe8 100644
--- a/ambari-views/examples/hello-spring-view/pom.xml
+++ b/ambari-views/examples/hello-spring-view/pom.xml
@@ -19,13 +19,13 @@
<parent>
<groupId>org.apache.ambari</groupId>
<artifactId>ambari-view-examples</artifactId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
</parent>
<modelVersion>4.0.0</modelVersion>
<artifactId>hello-spring-view</artifactId>
<packaging>war</packaging>
<name>Ambari Hello Spring View</name>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
<url>http://maven.apache.org</url>
<properties>
<ambari.dir>${project.parent.parent.parent.basedir}</ambari.dir>
@@ -46,7 +46,7 @@
<dependency>
<groupId>org.apache.ambari</groupId>
<artifactId>ambari-views</artifactId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
</dependency>
<dependency>
<groupId>com.sun.jersey</groupId>
http://git-wip-us.apache.org/repos/asf/ambari/blob/bf66bf8e/ambari-views/examples/helloworld-view/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-views/examples/helloworld-view/pom.xml b/ambari-views/examples/helloworld-view/pom.xml
index 595f5e2..789e4c7 100644
--- a/ambari-views/examples/helloworld-view/pom.xml
+++ b/ambari-views/examples/helloworld-view/pom.xml
@@ -19,13 +19,13 @@
<parent>
<groupId>org.apache.ambari</groupId>
<artifactId>ambari-view-examples</artifactId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
</parent>
<modelVersion>4.0.0</modelVersion>
<artifactId>helloworld-view</artifactId>
<packaging>jar</packaging>
<name>Ambari Hello World View</name>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
<url>http://maven.apache.org</url>
<properties>
<ambari.dir>${project.parent.parent.parent.basedir}</ambari.dir>
@@ -46,7 +46,7 @@
<dependency>
<groupId>org.apache.ambari</groupId>
<artifactId>ambari-views</artifactId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
</dependency>
<dependency>
<groupId>com.sun.jersey</groupId>
http://git-wip-us.apache.org/repos/asf/ambari/blob/bf66bf8e/ambari-views/examples/phone-list-upgrade-view/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-views/examples/phone-list-upgrade-view/pom.xml b/ambari-views/examples/phone-list-upgrade-view/pom.xml
index e8210a4..4dda4f3 100644
--- a/ambari-views/examples/phone-list-upgrade-view/pom.xml
+++ b/ambari-views/examples/phone-list-upgrade-view/pom.xml
@@ -19,7 +19,7 @@
<parent>
<groupId>org.apache.ambari</groupId>
<artifactId>ambari-view-examples</artifactId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
</parent>
<version>2.1.0.0-SNAPSHOT</version>
<modelVersion>4.0.0</modelVersion>
@@ -51,7 +51,7 @@
<dependency>
<groupId>org.apache.ambari</groupId>
<artifactId>ambari-views</artifactId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
</dependency>
<dependency>
<groupId>com.sun.jersey</groupId>
@@ -67,7 +67,7 @@
<dependency>
<groupId>org.apache.ambari</groupId>
<artifactId>ambari-views</artifactId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
http://git-wip-us.apache.org/repos/asf/ambari/blob/bf66bf8e/ambari-views/examples/phone-list-view/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-views/examples/phone-list-view/pom.xml b/ambari-views/examples/phone-list-view/pom.xml
index e1993aa..5b674c4 100644
--- a/ambari-views/examples/phone-list-view/pom.xml
+++ b/ambari-views/examples/phone-list-view/pom.xml
@@ -19,7 +19,7 @@
<parent>
<groupId>org.apache.ambari</groupId>
<artifactId>ambari-view-examples</artifactId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
</parent>
<modelVersion>4.0.0</modelVersion>
<artifactId>phone-list-view</artifactId>
@@ -45,7 +45,7 @@
<dependency>
<groupId>org.apache.ambari</groupId>
<artifactId>ambari-views</artifactId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
</dependency>
<dependency>
<groupId>com.sun.jersey</groupId>
http://git-wip-us.apache.org/repos/asf/ambari/blob/bf66bf8e/ambari-views/examples/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-views/examples/pom.xml b/ambari-views/examples/pom.xml
index b2e606f..6dd5d94 100644
--- a/ambari-views/examples/pom.xml
+++ b/ambari-views/examples/pom.xml
@@ -19,7 +19,7 @@
<parent>
<groupId>org.apache.ambari</groupId>
<artifactId>ambari-project</artifactId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
<relativePath>../../ambari-project</relativePath>
</parent>
<modelVersion>4.0.0</modelVersion>
@@ -27,7 +27,7 @@
<artifactId>ambari-view-examples</artifactId>
<packaging>pom</packaging>
<name>Ambari View Examples</name>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
<modules>
<module>helloworld-view</module>
<module>hello-servlet-view</module>
http://git-wip-us.apache.org/repos/asf/ambari/blob/bf66bf8e/ambari-views/examples/property-validator-view/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-views/examples/property-validator-view/pom.xml b/ambari-views/examples/property-validator-view/pom.xml
index 7ed6612..5a0355e 100644
--- a/ambari-views/examples/property-validator-view/pom.xml
+++ b/ambari-views/examples/property-validator-view/pom.xml
@@ -19,11 +19,11 @@
<parent>
<groupId>org.apache.ambari</groupId>
<artifactId>ambari-view-examples</artifactId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
</parent>
<modelVersion>4.0.0</modelVersion>
<artifactId>property-validator-view</artifactId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
<packaging>jar</packaging>
<name>Ambari Property Validator View</name>
<url>http://maven.apache.org</url>
@@ -34,7 +34,7 @@
<dependency>
<groupId>org.apache.ambari</groupId>
<artifactId>ambari-views</artifactId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
</dependency>
<dependency>
<groupId>com.sun.jersey</groupId>
http://git-wip-us.apache.org/repos/asf/ambari/blob/bf66bf8e/ambari-views/examples/property-view/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-views/examples/property-view/pom.xml b/ambari-views/examples/property-view/pom.xml
index 7462e36..325fdb8 100644
--- a/ambari-views/examples/property-view/pom.xml
+++ b/ambari-views/examples/property-view/pom.xml
@@ -19,11 +19,11 @@
<parent>
<groupId>org.apache.ambari</groupId>
<artifactId>ambari-view-examples</artifactId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
</parent>
<modelVersion>4.0.0</modelVersion>
<artifactId>property-view</artifactId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
<packaging>jar</packaging>
<name>Ambari Property View</name>
<url>http://maven.apache.org</url>
@@ -34,7 +34,7 @@
<dependency>
<groupId>org.apache.ambari</groupId>
<artifactId>ambari-views</artifactId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
</dependency>
<dependency>
<groupId>com.sun.jersey</groupId>
http://git-wip-us.apache.org/repos/asf/ambari/blob/bf66bf8e/ambari-views/examples/restricted-view/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-views/examples/restricted-view/pom.xml b/ambari-views/examples/restricted-view/pom.xml
index 746ef90..1c56c14 100644
--- a/ambari-views/examples/restricted-view/pom.xml
+++ b/ambari-views/examples/restricted-view/pom.xml
@@ -19,7 +19,7 @@
<parent>
<groupId>org.apache.ambari</groupId>
<artifactId>ambari-view-examples</artifactId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
</parent>
<modelVersion>4.0.0</modelVersion>
<artifactId>restricted-view</artifactId>
@@ -45,7 +45,7 @@
<dependency>
<groupId>org.apache.ambari</groupId>
<artifactId>ambari-views</artifactId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
</dependency>
<dependency>
<groupId>com.sun.jersey</groupId>
http://git-wip-us.apache.org/repos/asf/ambari/blob/bf66bf8e/ambari-views/examples/simple-view/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-views/examples/simple-view/pom.xml b/ambari-views/examples/simple-view/pom.xml
index aed9a3e..9a4c7ed 100644
--- a/ambari-views/examples/simple-view/pom.xml
+++ b/ambari-views/examples/simple-view/pom.xml
@@ -19,11 +19,11 @@
<parent>
<groupId>org.apache.ambari</groupId>
<artifactId>ambari-view-examples</artifactId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
</parent>
<modelVersion>4.0.0</modelVersion>
<artifactId>simple-view</artifactId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
<packaging>jar</packaging>
<name>Ambari Simple View</name>
<url>http://maven.apache.org</url>
@@ -34,7 +34,7 @@
<dependency>
<groupId>org.apache.ambari</groupId>
<artifactId>ambari-views</artifactId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
</dependency>
<dependency>
<groupId>com.sun.jersey</groupId>
http://git-wip-us.apache.org/repos/asf/ambari/blob/bf66bf8e/ambari-views/examples/weather-view/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-views/examples/weather-view/pom.xml b/ambari-views/examples/weather-view/pom.xml
index fd85eb6..f3622ca 100644
--- a/ambari-views/examples/weather-view/pom.xml
+++ b/ambari-views/examples/weather-view/pom.xml
@@ -19,7 +19,7 @@
<parent>
<groupId>org.apache.ambari</groupId>
<artifactId>ambari-view-examples</artifactId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
</parent>
<modelVersion>4.0.0</modelVersion>
<artifactId>weather-view</artifactId>
@@ -45,7 +45,7 @@
<dependency>
<groupId>org.apache.ambari</groupId>
<artifactId>ambari-views</artifactId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
</dependency>
<dependency>
<groupId>com.sun.jersey</groupId>
http://git-wip-us.apache.org/repos/asf/ambari/blob/bf66bf8e/ambari-views/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-views/pom.xml b/ambari-views/pom.xml
index b3c143a..7b10076 100644
--- a/ambari-views/pom.xml
+++ b/ambari-views/pom.xml
@@ -20,7 +20,7 @@
<parent>
<groupId>org.apache.ambari</groupId>
<artifactId>ambari-project</artifactId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
<relativePath>../ambari-project</relativePath>
</parent>
<modelVersion>4.0.0</modelVersion>
@@ -28,7 +28,7 @@
<artifactId>ambari-views</artifactId>
<packaging>jar</packaging>
<name>Ambari Views</name>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
<description>Ambari View interfaces.</description>
<dependencies>
<dependency>
http://git-wip-us.apache.org/repos/asf/ambari/blob/bf66bf8e/ambari-web/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-web/pom.xml b/ambari-web/pom.xml
index acf908b..7d1eebe 100644
--- a/ambari-web/pom.xml
+++ b/ambari-web/pom.xml
@@ -20,14 +20,14 @@
<parent>
<groupId>org.apache.ambari</groupId>
<artifactId>ambari-project</artifactId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
<relativePath>../ambari-project</relativePath>
</parent>
<modelVersion>4.0.0</modelVersion>
<artifactId>ambari-web</artifactId>
<packaging>pom</packaging>
<name>Ambari Web</name>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
<description>Ambari Web</description>
<properties>
<ambari.dir>${project.parent.parent.basedir}</ambari.dir>
http://git-wip-us.apache.org/repos/asf/ambari/blob/bf66bf8e/contrib/ambari-log4j/pom.xml
----------------------------------------------------------------------
diff --git a/contrib/ambari-log4j/pom.xml b/contrib/ambari-log4j/pom.xml
index 5bda1ed..553e2c2 100644
--- a/contrib/ambari-log4j/pom.xml
+++ b/contrib/ambari-log4j/pom.xml
@@ -20,7 +20,7 @@
<groupId>org.apache.ambari</groupId>
<artifactId>ambari-log4j</artifactId>
<packaging>jar</packaging>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
<name>ambari-log4j</name>
<url>http://maven.apache.org</url>
<repositories>
http://git-wip-us.apache.org/repos/asf/ambari/blob/bf66bf8e/contrib/ambari-scom/ambari-scom-server/pom.xml
----------------------------------------------------------------------
diff --git a/contrib/ambari-scom/ambari-scom-server/pom.xml b/contrib/ambari-scom/ambari-scom-server/pom.xml
index 8e814c1..109350d 100644
--- a/contrib/ambari-scom/ambari-scom-server/pom.xml
+++ b/contrib/ambari-scom/ambari-scom-server/pom.xml
@@ -19,7 +19,7 @@
<parent>
<groupId>org.apache.ambari</groupId>
<artifactId>ambari-scom-project</artifactId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
</parent>
<modelVersion>4.0.0</modelVersion>
<artifactId>ambari-scom-server</artifactId>
@@ -27,7 +27,7 @@
<name>Ambari SCOM Server</name>
<url>http://maven.apache.org</url>
<properties>
- <ambari.version>2.5.1.0.0</ambari.version>
+ <ambari.version>2.5.2.0.0</ambari.version>
<powermock.version>1.5.4</powermock.version>
</properties>
<dependencies>
http://git-wip-us.apache.org/repos/asf/ambari/blob/bf66bf8e/contrib/ambari-scom/metrics-sink/pom.xml
----------------------------------------------------------------------
diff --git a/contrib/ambari-scom/metrics-sink/pom.xml b/contrib/ambari-scom/metrics-sink/pom.xml
index e27245c..fd6a042 100644
--- a/contrib/ambari-scom/metrics-sink/pom.xml
+++ b/contrib/ambari-scom/metrics-sink/pom.xml
@@ -19,7 +19,7 @@
<parent>
<groupId>org.apache.ambari</groupId>
<artifactId>ambari-scom-project</artifactId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
</parent>
<modelVersion>4.0.0</modelVersion>
<artifactId>metrics-sink</artifactId>
http://git-wip-us.apache.org/repos/asf/ambari/blob/bf66bf8e/contrib/ambari-scom/pom.xml
----------------------------------------------------------------------
diff --git a/contrib/ambari-scom/pom.xml b/contrib/ambari-scom/pom.xml
index 784523c..45200a0 100644
--- a/contrib/ambari-scom/pom.xml
+++ b/contrib/ambari-scom/pom.xml
@@ -20,7 +20,7 @@
<groupId>org.apache.ambari</groupId>
<artifactId>ambari-scom-project</artifactId>
<packaging>pom</packaging>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
<name>Ambari SCOM Project</name>
<modules>
<module>ambari-scom-server</module>
http://git-wip-us.apache.org/repos/asf/ambari/blob/bf66bf8e/contrib/management-packs/microsoft-r_mpack/pom.xml
----------------------------------------------------------------------
diff --git a/contrib/management-packs/microsoft-r_mpack/pom.xml b/contrib/management-packs/microsoft-r_mpack/pom.xml
index a194128..f785801 100644
--- a/contrib/management-packs/microsoft-r_mpack/pom.xml
+++ b/contrib/management-packs/microsoft-r_mpack/pom.xml
@@ -34,7 +34,7 @@
<properties>
<ambari.dir>${project.parent.parent.parent.basedir}</ambari.dir>
- <ambari.version>2.5.1.0.0</ambari.version>
+ <ambari.version>2.5.2.0.0</ambari.version>
</properties>
<build>
http://git-wip-us.apache.org/repos/asf/ambari/blob/bf66bf8e/contrib/management-packs/pom.xml
----------------------------------------------------------------------
diff --git a/contrib/management-packs/pom.xml b/contrib/management-packs/pom.xml
index f6787f0..e8c4f0f 100644
--- a/contrib/management-packs/pom.xml
+++ b/contrib/management-packs/pom.xml
@@ -29,7 +29,7 @@
<version>2.0.0.0-SNAPSHOT</version>
<name>Ambari Contrib Management Packs</name>
<properties>
- <ambari.version>2.5.1.0.0</ambari.version>
+ <ambari.version>2.5.2.0.0</ambari.version>
<ambari.dir>${project.parent.parent.basedir}</ambari.dir>
<hadoop.version>2.7.1</hadoop.version>
</properties>
http://git-wip-us.apache.org/repos/asf/ambari/blob/bf66bf8e/contrib/views/ambari-views-package/pom.xml
----------------------------------------------------------------------
diff --git a/contrib/views/ambari-views-package/pom.xml b/contrib/views/ambari-views-package/pom.xml
index 150e729..803462e 100644
--- a/contrib/views/ambari-views-package/pom.xml
+++ b/contrib/views/ambari-views-package/pom.xml
@@ -21,12 +21,12 @@
<groupId>org.apache.ambari.contrib.views</groupId>
<artifactId>ambari-views-package</artifactId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
<name>Ambari Views Package</name>
<parent>
<groupId>org.apache.ambari.contrib.views</groupId>
<artifactId>ambari-contrib-views</artifactId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
</parent>
<description>
http://git-wip-us.apache.org/repos/asf/ambari/blob/bf66bf8e/contrib/views/capacity-scheduler/pom.xml
----------------------------------------------------------------------
diff --git a/contrib/views/capacity-scheduler/pom.xml b/contrib/views/capacity-scheduler/pom.xml
index b44c3eb..286a3ec 100644
--- a/contrib/views/capacity-scheduler/pom.xml
+++ b/contrib/views/capacity-scheduler/pom.xml
@@ -25,7 +25,7 @@
<parent>
<groupId>org.apache.ambari.contrib.views</groupId>
<artifactId>ambari-contrib-views</artifactId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
</parent>
<dependencies>
@@ -81,7 +81,7 @@
<dependency>
<groupId>org.apache.ambari.contrib.views</groupId>
<artifactId>ambari-views-utils</artifactId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
</dependency>
<dependency>
<groupId>org.easymock</groupId>
http://git-wip-us.apache.org/repos/asf/ambari/blob/bf66bf8e/contrib/views/commons/pom.xml
----------------------------------------------------------------------
diff --git a/contrib/views/commons/pom.xml b/contrib/views/commons/pom.xml
index c417f0f..c72c458 100644
--- a/contrib/views/commons/pom.xml
+++ b/contrib/views/commons/pom.xml
@@ -22,20 +22,20 @@
<modelVersion>4.0.0</modelVersion>
<artifactId>ambari-views-commons</artifactId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
<name>Ambari View Commons</name>
<parent>
<artifactId>ambari-contrib-views</artifactId>
<groupId>org.apache.ambari.contrib.views</groupId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
</parent>
<dependencies>
<dependency>
<groupId>org.apache.ambari.contrib.views</groupId>
<artifactId>ambari-views-utils</artifactId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
</dependency>
<dependency>
http://git-wip-us.apache.org/repos/asf/ambari/blob/bf66bf8e/contrib/views/files/pom.xml
----------------------------------------------------------------------
diff --git a/contrib/views/files/pom.xml b/contrib/views/files/pom.xml
index 61a5192..625efc3 100644
--- a/contrib/views/files/pom.xml
+++ b/contrib/views/files/pom.xml
@@ -25,7 +25,7 @@
<parent>
<groupId>org.apache.ambari.contrib.views</groupId>
<artifactId>ambari-contrib-views</artifactId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
</parent>
<dependencies>
<dependency>
@@ -120,12 +120,12 @@
<dependency>
<groupId>org.apache.ambari.contrib.views</groupId>
<artifactId>ambari-views-utils</artifactId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
</dependency>
<dependency>
<groupId>org.apache.ambari.contrib.views</groupId>
<artifactId>ambari-views-commons</artifactId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
</dependency>
<dependency>
<groupId>com.google.code.gson</groupId>
http://git-wip-us.apache.org/repos/asf/ambari/blob/bf66bf8e/contrib/views/hawq/pom.xml
----------------------------------------------------------------------
diff --git a/contrib/views/hawq/pom.xml b/contrib/views/hawq/pom.xml
index 929f6de..8be0332 100644
--- a/contrib/views/hawq/pom.xml
+++ b/contrib/views/hawq/pom.xml
@@ -25,7 +25,7 @@
<parent>
<groupId>org.apache.ambari.contrib.views</groupId>
<artifactId>ambari-contrib-views</artifactId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
</parent>
<dependencies>
@@ -93,7 +93,7 @@
<ambari.dir>${project.parent.parent.parent.basedir}</ambari.dir>
<ui.dir>${basedir}/src/main/resources/ui</ui.dir>
<hawq-version>1.0.0</hawq-version>
- <ambari.version>2.5.1.0.0</ambari.version>
+ <ambari.version>2.5.2.0.0</ambari.version>
<executable.node>node/node</executable.node>
<executable.npm>npm</executable.npm>
<args.npm></args.npm>
http://git-wip-us.apache.org/repos/asf/ambari/blob/bf66bf8e/contrib/views/hive-next/pom.xml
----------------------------------------------------------------------
diff --git a/contrib/views/hive-next/pom.xml b/contrib/views/hive-next/pom.xml
index 032dfc1..9f9ca45 100644
--- a/contrib/views/hive-next/pom.xml
+++ b/contrib/views/hive-next/pom.xml
@@ -25,7 +25,7 @@
<parent>
<groupId>org.apache.ambari.contrib.views</groupId>
<artifactId>ambari-contrib-views</artifactId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
</parent>
<dependencies>
@@ -201,7 +201,7 @@
<dependency>
<groupId>org.apache.ambari.contrib.views</groupId>
<artifactId>ambari-views-utils</artifactId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
</dependency>
<dependency>
<groupId>commons-validator</groupId>
@@ -226,14 +226,14 @@
<dependency>
<groupId>org.apache.ambari.contrib.views</groupId>
<artifactId>ambari-views-commons</artifactId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
</dependency>
</dependencies>
<properties>
<ambari.dir>${project.parent.parent.parent.basedir}</ambari.dir>
<hive-version>2.1.0</hive-version>
- <ambari.version>2.5.1.0.0</ambari.version>
+ <ambari.version>2.5.2.0.0</ambari.version>
</properties>
<build>
<plugins>
http://git-wip-us.apache.org/repos/asf/ambari/blob/bf66bf8e/contrib/views/hive20/pom.xml
----------------------------------------------------------------------
diff --git a/contrib/views/hive20/pom.xml b/contrib/views/hive20/pom.xml
index 1a7d421..157252b 100644
--- a/contrib/views/hive20/pom.xml
+++ b/contrib/views/hive20/pom.xml
@@ -25,7 +25,7 @@
<parent>
<groupId>org.apache.ambari.contrib.views</groupId>
<artifactId>ambari-contrib-views</artifactId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
</parent>
<dependencies>
@@ -202,7 +202,7 @@
<dependency>
<groupId>org.apache.ambari.contrib.views</groupId>
<artifactId>ambari-views-utils</artifactId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
</dependency>
<dependency>
<groupId>commons-validator</groupId>
@@ -227,7 +227,7 @@
<dependency>
<groupId>org.apache.ambari.contrib.views</groupId>
<artifactId>ambari-views-commons</artifactId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
</dependency>
<dependency>
@@ -247,7 +247,7 @@
<properties>
<ambari.dir>../../..</ambari.dir>
<hive-version>2.1.0</hive-version>
- <ambari.version>2.5.1.0.0</ambari.version>
+ <ambari.version>2.5.2.0.0</ambari.version>
</properties>
<build>
<plugins>
http://git-wip-us.apache.org/repos/asf/ambari/blob/bf66bf8e/contrib/views/hueambarimigration/pom.xml
----------------------------------------------------------------------
diff --git a/contrib/views/hueambarimigration/pom.xml b/contrib/views/hueambarimigration/pom.xml
index 72d11a2..e8a3c71 100644
--- a/contrib/views/hueambarimigration/pom.xml
+++ b/contrib/views/hueambarimigration/pom.xml
@@ -25,7 +25,7 @@
<parent>
<groupId>org.apache.ambari.contrib.views</groupId>
<artifactId>ambari-contrib-views</artifactId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
</parent>
<properties>
@@ -51,7 +51,7 @@
<dependency>
<groupId>org.apache.ambari</groupId>
<artifactId>ambari-views</artifactId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
</dependency>
<dependency>
<groupId>javax.servlet</groupId>
http://git-wip-us.apache.org/repos/asf/ambari/blob/bf66bf8e/contrib/views/jobs/pom.xml
----------------------------------------------------------------------
diff --git a/contrib/views/jobs/pom.xml b/contrib/views/jobs/pom.xml
index 522c195..89e5217 100644
--- a/contrib/views/jobs/pom.xml
+++ b/contrib/views/jobs/pom.xml
@@ -19,14 +19,14 @@
<parent>
<groupId>org.apache.ambari.contrib.views</groupId>
<artifactId>ambari-contrib-views</artifactId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
</parent>
<modelVersion>4.0.0</modelVersion>
<groupId>org.apache.ambari.contrib.views</groupId>
<artifactId>jobs</artifactId>
<packaging>jar</packaging>
<name>Jobs</name>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
<description>Jobs View</description>
<properties>
<ambari.dir>${project.parent.parent.parent.basedir}</ambari.dir>
http://git-wip-us.apache.org/repos/asf/ambari/blob/bf66bf8e/contrib/views/pig/pom.xml
----------------------------------------------------------------------
diff --git a/contrib/views/pig/pom.xml b/contrib/views/pig/pom.xml
index f41f98c..29ed5a5 100644
--- a/contrib/views/pig/pom.xml
+++ b/contrib/views/pig/pom.xml
@@ -25,7 +25,7 @@
<parent>
<groupId>org.apache.ambari.contrib.views</groupId>
<artifactId>ambari-contrib-views</artifactId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
</parent>
<dependencies>
@@ -138,18 +138,18 @@
<dependency>
<groupId>org.apache.ambari.contrib.views</groupId>
<artifactId>ambari-views-utils</artifactId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
</dependency>
<dependency>
<groupId>org.apache.ambari.contrib.views</groupId>
<artifactId>ambari-views-commons</artifactId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
</dependency>
</dependencies>
<properties>
<ambari.dir>${project.parent.parent.parent.basedir}</ambari.dir>
- <ambari.version>2.5.1.0.0</ambari.version>
+ <ambari.version>2.5.2.0.0</ambari.version>
<ui.directory>${basedir}/src/main/resources/ui/pig-web</ui.directory>
</properties>
<build>
http://git-wip-us.apache.org/repos/asf/ambari/blob/bf66bf8e/contrib/views/pom.xml
----------------------------------------------------------------------
diff --git a/contrib/views/pom.xml b/contrib/views/pom.xml
index fa85854..882cb2c 100644
--- a/contrib/views/pom.xml
+++ b/contrib/views/pom.xml
@@ -19,17 +19,17 @@
<parent>
<groupId>org.apache.ambari</groupId>
<artifactId>ambari-project</artifactId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
<relativePath>../../ambari-project</relativePath>
</parent>
<modelVersion>4.0.0</modelVersion>
<groupId>org.apache.ambari.contrib.views</groupId>
<artifactId>ambari-contrib-views</artifactId>
<packaging>pom</packaging>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
<name>Ambari Contrib Views</name>
<properties>
- <ambari.version>2.5.1.0.0</ambari.version>
+ <ambari.version>2.5.2.0.0</ambari.version>
<ambari.dir>${project.parent.parent.basedir}</ambari.dir>
<hadoop.version>2.7.3</hadoop.version>
<views.jars.dir>views-jars</views.jars.dir>
@@ -217,7 +217,7 @@
<dependency>
<groupId>org.apache.ambari</groupId>
<artifactId>ambari-views</artifactId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
</dependency>
</dependencies>
</dependencyManagement>
http://git-wip-us.apache.org/repos/asf/ambari/blob/bf66bf8e/contrib/views/slider/pom.xml
----------------------------------------------------------------------
diff --git a/contrib/views/slider/pom.xml b/contrib/views/slider/pom.xml
index 942f99e..f37018a 100644
--- a/contrib/views/slider/pom.xml
+++ b/contrib/views/slider/pom.xml
@@ -13,13 +13,13 @@
<modelVersion>4.0.0</modelVersion>
<groupId>org.apache.ambari.contrib.views</groupId>
<artifactId>slider</artifactId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
<name>Slider</name>
<parent>
<groupId>org.apache.ambari.contrib.views</groupId>
<artifactId>ambari-contrib-views</artifactId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
</parent>
<dependencies>
@@ -135,7 +135,7 @@
<dependency>
<groupId>org.apache.ambari.contrib.views</groupId>
<artifactId>ambari-views-utils</artifactId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
</dependency>
</dependencies>
http://git-wip-us.apache.org/repos/asf/ambari/blob/bf66bf8e/contrib/views/storm/pom.xml
----------------------------------------------------------------------
diff --git a/contrib/views/storm/pom.xml b/contrib/views/storm/pom.xml
index cf0e303..4bd490e 100644
--- a/contrib/views/storm/pom.xml
+++ b/contrib/views/storm/pom.xml
@@ -19,7 +19,7 @@
<parent>
<groupId>org.apache.ambari.contrib.views</groupId>
<artifactId>ambari-contrib-views</artifactId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
</parent>
<properties>
<ambari.dir>${project.parent.parent.parent.basedir}</ambari.dir>
http://git-wip-us.apache.org/repos/asf/ambari/blob/bf66bf8e/contrib/views/tez/pom.xml
----------------------------------------------------------------------
diff --git a/contrib/views/tez/pom.xml b/contrib/views/tez/pom.xml
index 6e71150..4cd9ad3 100644
--- a/contrib/views/tez/pom.xml
+++ b/contrib/views/tez/pom.xml
@@ -19,7 +19,7 @@
<parent>
<groupId>org.apache.ambari.contrib.views</groupId>
<artifactId>ambari-contrib-views</artifactId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
</parent>
<properties>
<ambari.dir>${project.parent.parent.parent.basedir}</ambari.dir>
@@ -276,7 +276,7 @@
<dependency>
<groupId>org.apache.ambari.contrib.views</groupId>
<artifactId>ambari-views-utils</artifactId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
</dependency>
</dependencies>
http://git-wip-us.apache.org/repos/asf/ambari/blob/bf66bf8e/contrib/views/utils/pom.xml
----------------------------------------------------------------------
diff --git a/contrib/views/utils/pom.xml b/contrib/views/utils/pom.xml
index 884300b..2e3db32 100644
--- a/contrib/views/utils/pom.xml
+++ b/contrib/views/utils/pom.xml
@@ -19,13 +19,13 @@
<modelVersion>4.0.0</modelVersion>
<groupId>org.apache.ambari.contrib.views</groupId>
<artifactId>ambari-views-utils</artifactId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
<name>Ambari View Utils</name>
<parent>
<groupId>org.apache.ambari.contrib.views</groupId>
<artifactId>ambari-contrib-views</artifactId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
</parent>
<dependencies>
http://git-wip-us.apache.org/repos/asf/ambari/blob/bf66bf8e/contrib/views/wfmanager/pom.xml
----------------------------------------------------------------------
diff --git a/contrib/views/wfmanager/pom.xml b/contrib/views/wfmanager/pom.xml
index 627f437..a84535e 100644
--- a/contrib/views/wfmanager/pom.xml
+++ b/contrib/views/wfmanager/pom.xml
@@ -19,14 +19,14 @@
<parent>
<groupId>org.apache.ambari.contrib.views</groupId>
<artifactId>ambari-contrib-views</artifactId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
</parent>
<dependencies>
<dependency>
<groupId>org.apache.ambari.contrib.views</groupId>
<artifactId>ambari-views-utils</artifactId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
</dependency>
<dependency>
<groupId>org.apache.ambari</groupId>
@@ -36,7 +36,7 @@
<dependency>
<groupId>org.apache.ambari.contrib.views</groupId>
<artifactId>ambari-views-commons</artifactId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
</dependency>
<dependency>
<groupId>com.sun.jersey</groupId>
http://git-wip-us.apache.org/repos/asf/ambari/blob/bf66bf8e/docs/pom.xml
----------------------------------------------------------------------
diff --git a/docs/pom.xml b/docs/pom.xml
index 321f6f3..35d4cfb 100644
--- a/docs/pom.xml
+++ b/docs/pom.xml
@@ -28,7 +28,7 @@
<modelVersion>4.0.0</modelVersion>
<groupId>org.apache.ambari</groupId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
<artifactId>ambari</artifactId>
<packaging>pom</packaging>
@@ -42,7 +42,7 @@
<package.pid.dir>/var/run/ambari</package.pid.dir>
<package.release>1</package.release>
<package.type>tar.gz</package.type>
- <ambari.version>2.5.1.0.0</ambari.version>
+ <ambari.version>2.5.2.0.0</ambari.version>
<final.name>${project.artifactId}-${ambari.version}</final.name>
</properties>
http://git-wip-us.apache.org/repos/asf/ambari/blob/bf66bf8e/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index e7d75e4..5dbd689 100644
--- a/pom.xml
+++ b/pom.xml
@@ -21,7 +21,7 @@
<artifactId>ambari</artifactId>
<packaging>pom</packaging>
<name>Ambari Main</name>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
<description>Ambari</description>
<url>http://ambari.apache.org/</url>
<scm>
http://git-wip-us.apache.org/repos/asf/ambari/blob/bf66bf8e/utility/pom.xml
----------------------------------------------------------------------
diff --git a/utility/pom.xml b/utility/pom.xml
index bc8b353..f130fbb 100644
--- a/utility/pom.xml
+++ b/utility/pom.xml
@@ -23,7 +23,7 @@
<parent>
<groupId>org.apache.ambari</groupId>
<artifactId>ambari-project</artifactId>
- <version>2.5.1.0.0</version>
+ <version>2.5.2.0.0</version>
<relativePath>../ambari-project</relativePath>
</parent>
[40/50] [abbrv] ambari git commit: AMBARI-21794. Post Ambari upgrade
on a migration cluster Oozie failed to restart when LZO compression is
enabled. Addendum fix. (swagle)
Posted by ja...@apache.org.
AMBARI-21794. Post Ambari upgrade on a migration cluster Oozie failed to restart when LZO compression is enabled. Addendum fix. (swagle)
Change-Id: Iccf24d0fbd1cf993f16fa01cfbf161aae44629e4
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/f5dc535f
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/f5dc535f
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/f5dc535f
Branch: refs/heads/2.5-maint
Commit: f5dc535f55c5fb7e1dfff77f18135e1d9beddade
Parents: df44822
Author: Siddharth Wagle <sw...@hortonworks.com>
Authored: Wed Aug 23 14:59:28 2017 -0700
Committer: Siddharth Wagle <sw...@hortonworks.com>
Committed: Wed Aug 23 14:59:28 2017 -0700
----------------------------------------------------------------------
.../BigInsights/4.2.5/services/OOZIE/package/scripts/oozie.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/f5dc535f/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/OOZIE/package/scripts/oozie.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/OOZIE/package/scripts/oozie.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/OOZIE/package/scripts/oozie.py
index aa5bc30..8bdfe46 100644
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/OOZIE/package/scripts/oozie.py
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/OOZIE/package/scripts/oozie.py
@@ -309,7 +309,7 @@ def oozie_server_specific(upgrade_type):
Package(params.all_lzo_packages,
retry_on_repo_unavailability=params.agent_stack_retry_on_unavailability,
retry_count=params.agent_stack_retry_count)
- Execute(format('{sudo} cp {hadoop_lib_home}/hadoop-lzo*.jar {oozie_lib_dir}'),
+ Execute(format('{sudo} cp /usr/lib/hadoop-lzo/lib/hadoop-lzo*.jar {oozie_lib_dir}'),
not_if = no_op_test,
)
[45/50] [abbrv] ambari git commit: AMBARI-21788. Ensure
topology_mappings.data has 0644 permissions (aonishuk)
Posted by ja...@apache.org.
AMBARI-21788. Ensure topology_mappings.data has 0644 permissions (aonishuk)
Change-Id: I2c85d8f4ca5a88e3f0bbf46d357bdd0dcd7adc10
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/f2f9afbc
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/f2f9afbc
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/f2f9afbc
Branch: refs/heads/2.5-maint
Commit: f2f9afbc75ff5f39534576b65843d806e6d6cdae
Parents: 2bf87b8
Author: Andrew Onishuk <ao...@hortonworks.com>
Authored: Fri Aug 25 15:08:53 2017 +0300
Committer: Andrew Onishuk <ao...@hortonworks.com>
Committed: Fri Aug 25 15:08:53 2017 +0300
----------------------------------------------------------------------
.../HDP/2.0.6/hooks/before-START/scripts/rack_awareness.py | 1 +
.../stacks/2.0.6/hooks/before-START/test_before_start.py | 6 +++++-
2 files changed, 6 insertions(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/f2f9afbc/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/rack_awareness.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/rack_awareness.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/rack_awareness.py
index 548f051..48158bb 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/rack_awareness.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/rack_awareness.py
@@ -30,6 +30,7 @@ def create_topology_mapping():
content=Template("topology_mappings.data.j2"),
owner=params.hdfs_user,
group=params.user_group,
+ mode=0644,
only_if=format("test -d {net_topology_script_dir}"))
def create_topology_script():
http://git-wip-us.apache.org/repos/asf/ambari/blob/f2f9afbc/ambari-server/src/test/python/stacks/2.0.6/hooks/before-START/test_before_start.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/hooks/before-START/test_before_start.py b/ambari-server/src/test/python/stacks/2.0.6/hooks/before-START/test_before_start.py
index 4f62ace..510dc41 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/hooks/before-START/test_before_start.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/hooks/before-START/test_before_start.py
@@ -92,6 +92,7 @@ class TestHookBeforeStart(RMFTestCase):
self.assertResourceCalled('File', '/etc/hadoop/conf/topology_mappings.data',
owner = 'hdfs',
content = Template('topology_mappings.data.j2'),
+ mode = 0644,
group = 'hadoop',
only_if = 'test -d /etc/hadoop/conf',
)
@@ -167,6 +168,7 @@ class TestHookBeforeStart(RMFTestCase):
owner = 'hdfs',
content = Template('topology_mappings.data.j2'),
group = 'hadoop',
+ mode = 0644,
only_if = 'test -d /etc/hadoop/conf',
)
self.assertResourceCalled('File', '/etc/hadoop/conf/topology_script.py',
@@ -246,6 +248,7 @@ class TestHookBeforeStart(RMFTestCase):
owner = 'hdfs',
content = Template('topology_mappings.data.j2'),
group = 'hadoop',
+ mode = 0644,
only_if = 'test -d /etc/hadoop/conf',
)
self.assertResourceCalled('File', '/etc/hadoop/conf/topology_script.py',
@@ -256,7 +259,7 @@ class TestHookBeforeStart(RMFTestCase):
self.assertNoMoreResources()
def test_hook_refresh_topology_custom_directories(self):
- config_file = "stacks/2.0.6/configs/default.json"
+ config_file = "{0}/test/python/stacks/2.0.6/configs/default.json".format(self.get_src_folder())
with open(config_file, "r") as f:
default_json = json.load(f)
@@ -327,6 +330,7 @@ class TestHookBeforeStart(RMFTestCase):
owner = 'hdfs',
content = Template('topology_mappings.data.j2'),
group = 'hadoop',
+ mode = 0644,
only_if = 'test -d /etc/hadoop/conf',
)
self.assertResourceCalled('File', '/etc/hadoop/conf/topology_script.py',
[19/50] [abbrv] ambari git commit: AMBARI-21729 Owner of yarn.include
stays to be root:root where as dfs.include gets changed to the correct one
upon restart services (dsen)
Posted by ja...@apache.org.
AMBARI-21729 Owner of yarn.include stays to be root:root where as dfs.include gets changed to the correct one upon restart services (dsen)
(cherry picked from commit 640fefb21cc73b7ccbd4dc714c0415dcbb311700)
Change-Id: I102293ceaf8e9815d3a5efb7222449835f64ee52
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/6d2bab16
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/6d2bab16
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/6d2bab16
Branch: refs/heads/2.5-maint
Commit: 6d2bab1615598753b642e35ae2f1148641eac16b
Parents: fd3379f
Author: Dmytro Sen <ds...@apache.org>
Authored: Wed Aug 16 12:35:45 2017 +0300
Committer: Zuul <re...@hortonworks.com>
Committed: Wed Aug 16 11:31:41 2017 -0700
----------------------------------------------------------------------
.../2.1.0.2.0/package/scripts/params_linux.py | 5 +-
.../YARN/2.1.0.2.0/package/scripts/yarn.py | 14 +-
.../stacks/2.0.6/YARN/test_resourcemanager.py | 160 ++-
.../default_yarn_include_file_dont_manage.json | 1260 ++++++++++++++++++
.../default_yarn_include_file_manage.json | 1260 ++++++++++++++++++
.../secured_yarn_include_file_dont_manage.json | 1078 +++++++++++++++
.../secured_yarn_include_file_manage.json | 1078 +++++++++++++++
.../YARN/package/scripts/params_linux.py | 5 +-
.../2.0/services/YARN/package/scripts/yarn.py | 16 +-
9 files changed, 4865 insertions(+), 11 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/6d2bab16/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_linux.py
index fad1fcb..f217192 100644
--- a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_linux.py
@@ -169,9 +169,6 @@ rm_hosts = config['clusterHostInfo']['rm_host']
rm_host = rm_hosts[0]
rm_port = config['configurations']['yarn-site']['yarn.resourcemanager.webapp.address'].split(':')[-1]
rm_https_port = default('/configurations/yarn-site/yarn.resourcemanager.webapp.https.address', ":8090").split(':')[-1]
-# TODO UPGRADE default, update site during upgrade
-rm_nodes_exclude_path = default("/configurations/yarn-site/yarn.resourcemanager.nodes.exclude-path","/etc/hadoop/conf/yarn.exclude")
-rm_nodes_exclude_dir = os.path.dirname(rm_nodes_exclude_path)
java64_home = config['hostLevelParams']['java_home']
java_exec = format("{java64_home}/bin/java")
@@ -234,6 +231,7 @@ user_group = config['configurations']['cluster-env']['user_group']
#exclude file
exclude_hosts = default("/clusterHostInfo/decom_nm_hosts", [])
exclude_file_path = default("/configurations/yarn-site/yarn.resourcemanager.nodes.exclude-path","/etc/hadoop/conf/yarn.exclude")
+rm_nodes_exclude_dir = os.path.dirname(exclude_file_path)
nm_hosts = default("/clusterHostInfo/nm_hosts", [])
#incude file
@@ -241,6 +239,7 @@ include_file_path = default("/configurations/yarn-site/yarn.resourcemanager.node
include_hosts = None
manage_include_files = default("/configurations/yarn-site/manage.include.files", False)
if include_file_path and manage_include_files:
+ rm_nodes_include_dir = os.path.dirname(include_file_path)
include_hosts = list(set(nm_hosts) - set(exclude_hosts))
ats_host = set(default("/clusterHostInfo/app_timeline_server_hosts", []))
http://git-wip-us.apache.org/repos/asf/ambari/blob/6d2bab16/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/yarn.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/yarn.py b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/yarn.py
index 3300875..f6e8391 100644
--- a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/yarn.py
+++ b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/yarn.py
@@ -296,10 +296,22 @@ def yarn(name=None, config_dir=None):
create_parents=True,
cd_access='a',
)
- File(params.rm_nodes_exclude_path,
+ File(params.exclude_file_path,
+ content=Template("exclude_hosts_list.j2"),
owner=params.yarn_user,
group=params.user_group
)
+ if params.include_hosts:
+ Directory(params.rm_nodes_include_dir,
+ mode=0755,
+ create_parents=True,
+ cd_access='a',
+ )
+ File(params.include_file_path,
+ content=Template("include_hosts_list.j2"),
+ owner=params.yarn_user,
+ group=params.user_group
+ )
File(params.yarn_job_summary_log,
owner=params.yarn_user,
group=params.user_group
http://git-wip-us.apache.org/repos/asf/ambari/blob/6d2bab16/ambari-server/src/test/python/stacks/2.0.6/YARN/test_resourcemanager.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_resourcemanager.py b/ambari-server/src/test/python/stacks/2.0.6/YARN/test_resourcemanager.py
index e8b5f78..01b5282 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_resourcemanager.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/YARN/test_resourcemanager.py
@@ -48,6 +48,28 @@ class TestResourceManager(RMFTestCase):
self.assert_configure_default()
self.assertNoMoreResources()
+ def test_configure_default_with_include_file_dont_manage(self):
+ self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/resourcemanager.py",
+ classname="Resourcemanager",
+ command="configure",
+ config_file="default_yarn_include_file_dont_manage.json",
+ stack_version = self.STACK_VERSION,
+ target = RMFTestCase.TARGET_COMMON_SERVICES
+ )
+ self.assert_configure_default(is_include_file_configured=True, manage_include_files=False)
+ self.assertNoMoreResources()
+
+ def test_configure_default_with_include_file_manage(self):
+ self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/resourcemanager.py",
+ classname="Resourcemanager",
+ command="configure",
+ config_file="default_yarn_include_file_manage.json",
+ stack_version = self.STACK_VERSION,
+ target = RMFTestCase.TARGET_COMMON_SERVICES
+ )
+ self.assert_configure_default(is_include_file_configured=True, manage_include_files=True)
+ self.assertNoMoreResources()
+
def test_start_default(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/resourcemanager.py",
classname="Resourcemanager",
@@ -98,6 +120,28 @@ class TestResourceManager(RMFTestCase):
)
self.assert_configure_secured()
+ def test_configure_secured_with_include_file_dont_manage(self):
+
+ self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/resourcemanager.py",
+ classname="Resourcemanager",
+ command="configure",
+ config_file="secured_yarn_include_file_dont_manage.json",
+ stack_version = self.STACK_VERSION,
+ target = RMFTestCase.TARGET_COMMON_SERVICES
+ )
+ self.assert_configure_secured(is_include_file_configured=True, manage_include_files=False)
+
+ def test_configure_secured_with_include_file_manage(self):
+
+ self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/resourcemanager.py",
+ classname="Resourcemanager",
+ command="configure",
+ config_file="secured_yarn_include_file_manage.json",
+ stack_version = self.STACK_VERSION,
+ target = RMFTestCase.TARGET_COMMON_SERVICES
+ )
+ self.assert_configure_secured(is_include_file_configured=True, manage_include_files=True)
+
def test_start_secured(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/resourcemanager.py",
classname="Resourcemanager",
@@ -177,7 +221,95 @@ class TestResourceManager(RMFTestCase):
self.assertNoMoreResources()
- def assert_configure_default(self):
+ def test_decommission_default_with_include_file_dont_manage(self):
+ self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/resourcemanager.py",
+ classname = "Resourcemanager",
+ command = "decommission",
+ config_file="default_yarn_include_file_dont_manage.json",
+ stack_version = self.STACK_VERSION,
+ target = RMFTestCase.TARGET_COMMON_SERVICES
+ )
+ self.assertResourceCalled('File', '/etc/hadoop/conf/yarn.exclude',
+ owner = 'yarn',
+ content = Template('exclude_hosts_list.j2'),
+ group = 'hadoop',
+ )
+ self.assertResourceCalled('Execute', ' yarn --config /etc/hadoop/conf rmadmin -refreshNodes',
+ environment = {'PATH': "/bin:/usr/bin:/usr/lib/hadoop-yarn/bin"},
+ user = 'yarn',
+ )
+ self.assertNoMoreResources()
+
+ def test_decommission_default_with_include_file_manage(self):
+ self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/resourcemanager.py",
+ classname = "Resourcemanager",
+ command = "decommission",
+ config_file="default_yarn_include_file_manage.json",
+ stack_version = self.STACK_VERSION,
+ target = RMFTestCase.TARGET_COMMON_SERVICES
+ )
+ self.assertResourceCalled('File', '/etc/hadoop/conf/yarn.exclude',
+ owner = 'yarn',
+ content = Template('exclude_hosts_list.j2'),
+ group = 'hadoop',
+ )
+ self.assertResourceCalled('File', '/etc/hadoop/conf_for_include/yarn.include',
+ owner = 'yarn',
+ content = Template('include_hosts_list.j2'),
+ group = 'hadoop',
+ )
+ self.assertResourceCalled('Execute', ' yarn --config /etc/hadoop/conf rmadmin -refreshNodes',
+ environment = {'PATH': "/bin:/usr/bin:/usr/lib/hadoop-yarn/bin"},
+ user = 'yarn',
+ )
+ self.assertNoMoreResources()
+
+ def test_decommission_secured_with_include_file_dont_manage(self):
+ self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/resourcemanager.py",
+ classname = "Resourcemanager",
+ command = "decommission",
+ config_file="secured_yarn_include_file_dont_manage.json",
+ stack_version = self.STACK_VERSION,
+ target = RMFTestCase.TARGET_COMMON_SERVICES
+ )
+ self.assertResourceCalled('File', '/etc/hadoop/conf/yarn.exclude',
+ owner = 'yarn',
+ content = Template('exclude_hosts_list.j2'),
+ group = 'hadoop',
+ )
+ self.assertResourceCalled('Execute', '/usr/bin/kinit -kt /etc/security/keytabs/rm.service.keytab rm/c6401.ambari.apache.org@EXAMPLE.COM; yarn --config /etc/hadoop/conf rmadmin -refreshNodes',
+ environment = {'PATH': "/bin:/usr/bin:/usr/lib/hadoop-yarn/bin"},
+ user = 'yarn',
+ )
+
+ self.assertNoMoreResources()
+
+ def test_decommission_secured_with_include_file_manage(self):
+ self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/resourcemanager.py",
+ classname = "Resourcemanager",
+ command = "decommission",
+ config_file="secured_yarn_include_file_manage.json",
+ stack_version = self.STACK_VERSION,
+ target = RMFTestCase.TARGET_COMMON_SERVICES
+ )
+ self.assertResourceCalled('File', '/etc/hadoop/conf/yarn.exclude',
+ owner = 'yarn',
+ content = Template('exclude_hosts_list.j2'),
+ group = 'hadoop',
+ )
+ self.assertResourceCalled('File', '/etc/hadoop/conf_for_include/yarn.include',
+ owner = 'yarn',
+ content = Template('include_hosts_list.j2'),
+ group = 'hadoop',
+ )
+ self.assertResourceCalled('Execute', '/usr/bin/kinit -kt /etc/security/keytabs/rm.service.keytab rm/c6401.ambari.apache.org@EXAMPLE.COM; yarn --config /etc/hadoop/conf rmadmin -refreshNodes',
+ environment = {'PATH': "/bin:/usr/bin:/usr/lib/hadoop-yarn/bin"},
+ user = 'yarn',
+ )
+
+ self.assertNoMoreResources()
+
+ def assert_configure_default(self, is_include_file_configured = False, manage_include_files=False):
self.assertResourceCalled('Directory', '/var/run/hadoop-yarn',
owner = 'yarn',
@@ -274,9 +406,21 @@ class TestResourceManager(RMFTestCase):
cd_access = 'a',
)
self.assertResourceCalled('File', '/etc/hadoop/conf/yarn.exclude',
+ content = Template('exclude_hosts_list.j2'),
owner = 'yarn',
group = 'hadoop',
)
+ if is_include_file_configured and manage_include_files:
+ self.assertResourceCalled('Directory', '/etc/hadoop/conf_for_include',
+ mode = 0755,
+ create_parents = True,
+ cd_access = 'a',
+ )
+ self.assertResourceCalled('File', '/etc/hadoop/conf_for_include/yarn.include',
+ content = Template('include_hosts_list.j2'),
+ owner = 'yarn',
+ group = 'hadoop',
+ )
self.assertResourceCalled('File', '/var/log/hadoop-yarn/yarn/hadoop-mapreduce.jobsummary.log',
owner = 'yarn',
group = 'hadoop',
@@ -346,7 +490,7 @@ class TestResourceManager(RMFTestCase):
group = 'hadoop',
)
- def assert_configure_secured(self):
+ def assert_configure_secured(self, is_include_file_configured = False, manage_include_files=False):
self.assertResourceCalled('Directory', '/var/run/hadoop-yarn',
owner = 'yarn',
@@ -443,9 +587,21 @@ class TestResourceManager(RMFTestCase):
cd_access = 'a',
)
self.assertResourceCalled('File', '/etc/hadoop/conf/yarn.exclude',
+ content = Template('exclude_hosts_list.j2'),
owner = 'yarn',
group = 'hadoop',
)
+ if is_include_file_configured and manage_include_files:
+ self.assertResourceCalled('Directory', '/etc/hadoop/conf_for_include',
+ mode = 0755,
+ create_parents = True,
+ cd_access = 'a',
+ )
+ self.assertResourceCalled('File', '/etc/hadoop/conf_for_include/yarn.include',
+ content = Template('include_hosts_list.j2'),
+ owner = 'yarn',
+ group = 'hadoop',
+ )
self.assertResourceCalled('File', '/var/log/hadoop-yarn/yarn/hadoop-mapreduce.jobsummary.log',
owner = 'yarn',
group = 'hadoop',
[42/50] [abbrv] ambari git commit: AMBARI-21798. Oozie server crashes
post migration after regen kerberos keytabs and restart all services (rlevas)
Posted by ja...@apache.org.
AMBARI-21798. Oozie server crashes post migration after regen kerberos keytabs and restart all services (rlevas)
(cherry picked from commit 2eed29264cb06084bd4a4348e71421d419b22716)
Change-Id: Iec613a193d407e5f456011306b526758c37fb74e
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/4b711613
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/4b711613
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/4b711613
Branch: refs/heads/2.5-maint
Commit: 4b711613c60000839822796a83891efcfc35f6d5
Parents: b41f06f
Author: Robert Levas <rl...@hortonworks.com>
Authored: Thu Aug 24 10:25:45 2017 -0400
Committer: rlevas@hortonworks.com <je...@hortonworks.com>
Committed: Thu Aug 24 14:51:19 2017 +0000
----------------------------------------------------------------------
.../server/upgrade/UpgradeCatalog252.java | 86 ++++++++++++++++----
.../server/upgrade/UpgradeCatalog252Test.java | 57 +++++++++++++
2 files changed, 128 insertions(+), 15 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/4b711613/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog252.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog252.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog252.java
index 0b422f8..718679b 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog252.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog252.java
@@ -44,9 +44,12 @@ import org.apache.ambari.server.state.ConfigHelper;
import org.apache.ambari.server.state.PropertyInfo;
import org.apache.ambari.server.state.StackId;
import org.apache.ambari.server.state.kerberos.AbstractKerberosDescriptorContainer;
+import org.apache.ambari.server.state.kerberos.KerberosComponentDescriptor;
import org.apache.ambari.server.state.kerberos.KerberosConfigurationDescriptor;
import org.apache.ambari.server.state.kerberos.KerberosDescriptor;
import org.apache.ambari.server.state.kerberos.KerberosDescriptorFactory;
+import org.apache.ambari.server.state.kerberos.KerberosServiceDescriptor;
+import org.apache.commons.collections.MapUtils;
import org.apache.commons.lang.StringUtils;
import com.google.common.collect.Sets;
@@ -352,15 +355,52 @@ public class UpgradeCatalog252 extends AbstractUpgradeCatalog {
final KerberosDescriptor kerberosDescriptor = new KerberosDescriptorFactory().createInstance(data);
if (kerberosDescriptor != null) {
+ boolean updated = false;
+
// Find and remove configuration specifications for <code>livy-conf/livy.superusers</code>
// in SPARK since this logic has been moved to the relevant stack/service advisors
- boolean updatedSpark = removeConfigurationSpecification(kerberosDescriptor.getService("SPARK"), "livy-conf", "livy.superusers");
+ if(removeConfigurationSpecifications(kerberosDescriptor.getService("SPARK"),
+ Collections.<String, Collection<String>>singletonMap("livy-conf", Collections.singleton("livy.superusers")))) {
+ updated = true;
+ }
// Find and remove configuration specifications for <code>livy-conf2/livy.superusers</code>
// in SPARK2 since this logic has been moved to the relevant stack/service advisors
- boolean updatedSpark2 = removeConfigurationSpecification(kerberosDescriptor.getService("SPARK2"), "livy2-conf", "livy.superusers");
+ if(removeConfigurationSpecifications(kerberosDescriptor.getService("SPARK2"),
+ Collections.<String, Collection<String>>singletonMap("livy2-conf", Collections.singleton("livy.superusers")))) {
+ updated = true;
+ }
- if (updatedSpark || updatedSpark2) {
+ // Find and remove configuration specifications for the following configurations in KNOX/KNOX_GATEWAY
+ // since they are invalid due to static "knox" embedded in the property name:
+ // * oozie-site/oozie.service.ProxyUserService.proxyuser.knox.groups
+ // * oozie-site/oozie.service.ProxyUserService.proxyuser.knox.hosts
+ // * webhcat-site/webhcat.proxyuser.knox.groups
+ // * webhcat-site/webhcat.proxyuser.knox.hosts
+ // * core-site/hadoop.proxyuser.knox.groups
+ // * core-site/hadoop.proxyuser.knox.hosts
+ // * falcon-runtime.properties/*.falcon.service.ProxyUserService.proxyuser.knox.groups
+ // * falcon-runtime.properties/*.falcon.service.ProxyUserService.proxyuser.knox.hosts
+ KerberosServiceDescriptor knoxKerberosDescriptor = kerberosDescriptor.getService("KNOX");
+ if(knoxKerberosDescriptor != null) {
+ KerberosComponentDescriptor knoxGatewayKerberosDescriptor = knoxKerberosDescriptor.getComponent("KNOX_GATEWAY");
+ if (knoxGatewayKerberosDescriptor != null) {
+ Map<String, Collection<String>> configsToRemove = new HashMap<>();
+ configsToRemove.put("oozie-site",
+ Arrays.asList("oozie.service.ProxyUserService.proxyuser.knox.groups", "oozie.service.ProxyUserService.proxyuser.knox.hosts"));
+ configsToRemove.put("webhcat-site",
+ Arrays.asList("webhcat.proxyuser.knox.groups", "webhcat.proxyuser.knox.hosts"));
+ configsToRemove.put("core-site",
+ Arrays.asList("hadoop.proxyuser.knox.groups", "hadoop.proxyuser.knox.hosts"));
+ configsToRemove.put("falcon-runtime.properties",
+ Arrays.asList("*.falcon.service.ProxyUserService.proxyuser.knox.groups", "*.falcon.service.ProxyUserService.proxyuser.knox.hosts"));
+ if (removeConfigurationSpecifications(knoxGatewayKerberosDescriptor, configsToRemove)) {
+ updated = true;
+ }
+ }
+ }
+
+ if (updated) {
artifactEntity.setArtifactData(kerberosDescriptor.toMap());
artifactDAO.merge(artifactEntity);
}
@@ -470,24 +510,40 @@ public class UpgradeCatalog252 extends AbstractUpgradeCatalog {
}
/**
- * Given an {@link AbstractKerberosDescriptorContainer}, attempts to remove the specified property
- * (<code>configType/propertyName</code> from it.
+ * Given an {@link AbstractKerberosDescriptorContainer}, attempts to remove the specified
+ * configurations (<code>configType/propertyName</code> from it.
*
* @param kerberosDescriptorContainer the container to update
- * @param configType the configuration type
- * @param propertyName the property name
+ * @param configurations a map of configuration types to sets of property names.
* @return true if changes where made to the container; false otherwise
*/
- private boolean removeConfigurationSpecification(AbstractKerberosDescriptorContainer kerberosDescriptorContainer, String configType, String propertyName) {
+ private boolean removeConfigurationSpecifications(AbstractKerberosDescriptorContainer kerberosDescriptorContainer, Map<String, Collection<String>> configurations) {
boolean updated = false;
if (kerberosDescriptorContainer != null) {
- KerberosConfigurationDescriptor configurationDescriptor = kerberosDescriptorContainer.getConfiguration(configType);
- if (configurationDescriptor != null) {
- Map<String, String> properties = configurationDescriptor.getProperties();
- if ((properties != null) && properties.containsKey(propertyName)) {
- properties.remove(propertyName);
- LOG.info("Removed {}/{} from the descriptor named {}", configType, propertyName, kerberosDescriptorContainer.getName());
- updated = true;
+ if (!MapUtils.isEmpty(configurations)) {
+ for (Map.Entry<String, Collection<String>> entry : configurations.entrySet()) {
+ String configType = entry.getKey();
+
+ for (String propertyName : entry.getValue()) {
+ Map<String, KerberosConfigurationDescriptor> configurationDescriptors = kerberosDescriptorContainer.getConfigurations(false);
+ KerberosConfigurationDescriptor configurationDescriptor = (configurationDescriptors == null)
+ ? null
+ : configurationDescriptors.get(configType);
+ if (configurationDescriptor != null) {
+ Map<String, String> properties = configurationDescriptor.getProperties();
+ if ((properties != null) && properties.containsKey(propertyName)) {
+ properties.remove(propertyName);
+ LOG.info("Removed {}/{} from the descriptor named {}", configType, propertyName, kerberosDescriptorContainer.getName());
+ updated = true;
+
+ // If there are no more properties in the configurationDescriptor, remove it from the container.
+ if(properties.isEmpty()) {
+ configurationDescriptors.remove(configType);
+ kerberosDescriptorContainer.setConfigurations(configurationDescriptors);
+ }
+ }
+ }
+ }
}
}
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/4b711613/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog252Test.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog252Test.java b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog252Test.java
index 72956eb..e6dbb7c 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog252Test.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog252Test.java
@@ -54,6 +54,7 @@ import org.apache.ambari.server.state.Cluster;
import org.apache.ambari.server.state.Clusters;
import org.apache.ambari.server.state.Config;
import org.apache.ambari.server.state.Service;
+import org.apache.ambari.server.state.kerberos.KerberosComponentDescriptor;
import org.apache.ambari.server.state.kerberos.KerberosDescriptor;
import org.apache.ambari.server.state.kerberos.KerberosDescriptorFactory;
import org.apache.ambari.server.state.stack.OsFamily;
@@ -289,6 +290,48 @@ public class UpgradeCatalog252Test {
" ]" +
" }," +
" {" +
+ " \"name\": \"KNOX\"," +
+ " \"components\": [" +
+ " {" +
+ " \"name\": \"KNOX_GATEWAY\"," +
+ " \"configurations\": [" +
+ " {" +
+ " \"core-site\": {" +
+ " \"property1\": \"true\"," +
+ " \"property2\": \"true\"," +
+ " \"hadoop.proxyuser.knox.groups\": \"somevalue\"," +
+ " \"hadoop.proxyuser.knox.hosts\": \"somevalue\"" +
+ " }" +
+ " }," +
+ " {" +
+ " \"webhcat-site\": {" +
+ " \"webhcat.proxyuser.knox.groups\": \"somevalue\"," +
+ " \"webhcat.proxyuser.knox.hosts\": \"somevalue\"" +
+ " }" +
+ " }," +
+ " {" +
+ " \"oozie-site\": {" +
+ " \"oozie.service.ProxyUserService.proxyuser.knox.groups\": \"somevalue\"," +
+ " \"oozie.service.ProxyUserService.proxyuser.knox.hosts\": \"somevalue\"" +
+ " }" +
+ " }," +
+ " {" +
+ " \"falcon-runtime.properties\": {" +
+ " \"*.falcon.service.ProxyUserService.proxyuser.knox.groups\": \"somevalue\"," +
+ " \"*.falcon.service.ProxyUserService.proxyuser.knox.hosts\": \"somevalue\"" +
+ " }" +
+ " }," +
+ " {" +
+ " \"some-env\": {" +
+ " \"groups\": \"${hadoop-env/proxyuser_group}\"," +
+ " \"hosts\": \"${clusterHostInfo/existing_service_master_hosts}\"" +
+ " }" +
+ " }" +
+ " ]" +
+ " }" +
+ " ]" +
+ " }," +
+ " {" +
" \"name\": \"NOT_SPARK\"," +
" \"configurations\": [" +
" {" +
@@ -360,5 +403,19 @@ public class UpgradeCatalog252Test {
Assert.assertNotNull(result.getService("NOT_SPARK").getConfiguration("not-livy-conf"));
Assert.assertNotNull(result.getService("NOT_SPARK").getConfiguration("not-livy-conf").getProperties());
Assert.assertTrue(result.getService("NOT_SPARK").getConfiguration("not-livy-conf").getProperties().containsKey("livy.superusers"));
+
+ Assert.assertNotNull(result.getService("KNOX"));
+
+ KerberosComponentDescriptor knoxGateway = result.getService("KNOX").getComponent("KNOX_GATEWAY");
+ Assert.assertNotNull(knoxGateway);
+ Assert.assertNotNull(knoxGateway.getConfiguration("core-site"));
+ Assert.assertNotNull(knoxGateway.getConfiguration("core-site").getProperties());
+ Assert.assertTrue(knoxGateway.getConfiguration("core-site").getProperties().containsKey("property1"));
+ Assert.assertFalse(knoxGateway.getConfiguration("core-site").getProperties().containsKey("hadoop.proxyuser.knox.groups"));
+ Assert.assertFalse(knoxGateway.getConfiguration("core-site").getProperties().containsKey("hadoop.proxyuser.knox.hosts"));
+ Assert.assertNull(knoxGateway.getConfiguration("oozie-site"));
+ Assert.assertNull(knoxGateway.getConfiguration("webhcat-site"));
+ Assert.assertNull(knoxGateway.getConfiguration("falcon-runtime.properties"));
+ Assert.assertNotNull(knoxGateway.getConfiguration("some-env"));
}
}
[03/50] [abbrv] ambari git commit: AMBARI-21664. HDFS namenode rpc
and connection load metrics are not showing. Addendum. (swagle)
Posted by ja...@apache.org.
AMBARI-21664. HDFS namenode rpc and connection load metrics are not showing. Addendum. (swagle)
(cherry picked from commit f153268797c3d448a741ceef5dd92dd8e3d7cb48)
Change-Id: I45d693ad09708cfb7ab782ff16e583bb185f07f0
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/117f4f39
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/117f4f39
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/117f4f39
Branch: refs/heads/2.5-maint
Commit: 117f4f39d629e56dd895f1d521cefe2f35ac8617
Parents: 7f01766
Author: Siddharth Wagle <sw...@hortonworks.com>
Authored: Thu Aug 10 13:47:13 2017 -0700
Committer: swagle@hortonworks.com <je...@hortonworks.com>
Committed: Thu Aug 10 21:01:39 2017 +0000
----------------------------------------------------------------------
.../stacks/HDP/2.0.6/hooks/before-START/scripts/params.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/117f4f39/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py
index bd86b50..5554941 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py
@@ -331,7 +331,7 @@ if dfs_ha_enabled:
namenode_rpc = nn_host
pass
pass
-elif 'dfs.namenode.rpc-address' in config['configurations']['hdfs-site']:
+elif 'hdfs-site' in config['configurations'] and 'dfs.namenode.rpc-address' in config['configurations']['hdfs-site']:
namenode_rpc = default('/configurations/hdfs-site/dfs.namenode.rpc-address', None)
else:
namenode_rpc = default('/configurations/core-site/fs.defaultFS', None)
@@ -341,7 +341,7 @@ if namenode_rpc:
try:
nn_rpc_client_port = int(port_str)
except ValueError:
- nn_rpc_client_port = None
+ pass
if dfs_ha_enabled:
dfs_service_rpc_address = default(format('/configurations/hdfs-site/dfs.namenode.servicerpc-address.{dfs_ha_nameservices}.{namenode_id}'), None)
[18/50] [abbrv] ambari git commit: AMBARI-21729 Owner of yarn.include
stays to be root:root where as dfs.include gets changed to the correct one
upon restart services (dsen)
Posted by ja...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/6d2bab16/ambari-server/src/test/python/stacks/2.0.6/configs/default_yarn_include_file_dont_manage.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default_yarn_include_file_dont_manage.json b/ambari-server/src/test/python/stacks/2.0.6/configs/default_yarn_include_file_dont_manage.json
new file mode 100644
index 0000000..ac3f86d
--- /dev/null
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default_yarn_include_file_dont_manage.json
@@ -0,0 +1,1260 @@
+{
+ "localComponents": [
+ "NAMENODE",
+ "SECONDARY_NAMENODE",
+ "ZOOKEEPER_SERVER",
+ "DATANODE",
+ "HDFS_CLIENT",
+ "ZOOKEEPER_CLIENT",
+ "RESOURCEMANAGER",
+ "HISTORYSERVER",
+ "NODEMANAGER",
+ "YARN_CLIENT",
+ "MAPREDUCE2_CLIENT",
+ "SLIDER",
+ "PIG",
+ "SQOOP",
+ "HIVE_CLIENT",
+ "TEZ_CLIENT",
+ "HIVE_CLIENT",
+ "SPARK_CLIENT",
+ "SPARK2_CLIENT"
+ ],
+ "roleCommand": "SERVICE_CHECK",
+ "clusterName": "c1",
+ "hostname": "c6401.ambari.apache.org",
+ "hostLevelParams": {
+ "not_managed_hdfs_path_list": "[\"/apps/hive/warehouse\",\"/apps/falcon\",\"/mr-history/done\",\"/app-logs\",\"/tmp\"]",
+ "agent_stack_retry_count": "5",
+ "agent_stack_retry_on_unavailability": "false",
+ "agentCacheDir": "/var/lib/ambari-agent/cache",
+ "jdk_location": "http://c6401.ambari.apache.org:8080/resources/",
+ "ambari_db_rca_password": "mapred",
+ "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca",
+ "repo_info": "[{\"baseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0\",\"osType\":\"centos6\",\"repoId\":\"HDP-2.0._\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0\"}]",
+ "jce_name": "UnlimitedJCEPolicyJDK7.zip",
+ "stack_version": "2.0",
+ "stack_name": "HDP",
+ "ambari_db_rca_driver": "org.postgresql.Driver",
+ "jdk_name": "jdk-7u67-linux-x64.tar.gz",
+ "ambari_db_rca_username": "mapred",
+ "java_home": "/usr/jdk64/jdk1.7.0_45",
+ "java_version": "8",
+ "db_name": "ambari",
+ "user_group": "{\"sample\":[\"sample\",\"users\"]}",
+ "group_list": "[\"hadoop\",\"nobody\",\"users\"]",
+ "user_list": "[\"hive\",\"oozie\",\"nobody\",\"ambari-qa\",\"flume\",\"hdfs\",\"storm\",\"mapred\",\"hbase\",\"tez\",\"zookeeper\",\"falcon\",\"sqoop\",\"yarn\",\"hcat\"]",
+ "custom_mysql_jdbc_name" : "mysql-connector-java.jar",
+ "custom_oracle_jdbc_name" : "oracle-jdbc-driver.jar",
+ "custom_postgres_jdbc_name" : "test-postgres-jdbc.jar",
+ "custom_sqlanywhere_jdbc_name" : "sqla-client-jdbc.tar.gz"
+ },
+ "commandType": "EXECUTION_COMMAND",
+ "roleParams": {},
+ "serviceName": "HIVE",
+ "role": "HIVE_SERVER",
+ "commandParams": {
+ "command_timeout": "300",
+ "service_package_folder": "OOZIE",
+ "script_type": "PYTHON",
+ "script": "scripts/service_check.py",
+ "excluded_hosts": "host1,host2",
+ "mark_draining_only" : "false",
+ "update_files_only" : "false",
+ "xml_configs_list":[{"hdfs-site.xml":"hdfs-site"}],
+ "env_configs_list":[{"hadoop-env.sh":"hadoop-env"},{"log4j.properties":"hdfs-log4j,yarn-log4j"}],
+ "properties_configs_list":[{"runtime.properties":"falcon-runtime.properties"},{"startup.properties":"falcon-startup.properties"}],
+ "output_file":"HDFS_CLIENT-configs.tar.gz",
+ "refresh_topology": "True"
+ },
+ "taskId": 152,
+ "public_hostname": "c6401.ambari.apache.org",
+ "configurations": {
+ "sqoop-site": {
+ "atlas.cluster.name": "c1",
+ "sqoop.job.data.publish.class": "org.apache.atlas.sqoop.hook.SqoopHook"
+ },
+ "mapred-site": {
+ "mapreduce.jobhistory.address": "c6402.ambari.apache.org:10020",
+ "mapreduce.cluster.administrators": " hadoop",
+ "mapreduce.reduce.input.buffer.percent": "0.0",
+ "mapreduce.output.fileoutputformat.compress": "false",
+ "mapreduce.framework.name": "yarn",
+ "mapreduce.map.speculative": "false",
+ "mapreduce.reduce.shuffle.merge.percent": "0.66",
+ "yarn.app.mapreduce.am.resource.mb": "683",
+ "mapreduce.map.java.opts": "-Xmx273m",
+ "mapreduce.application.classpath": "$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/*,$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/lib/*",
+ "mapreduce.job.reduce.slowstart.completedmaps": "0.05",
+ "mapreduce.output.fileoutputformat.compress.type": "BLOCK",
+ "mapreduce.reduce.speculative": "false",
+ "mapreduce.reduce.java.opts": "-Xmx546m",
+ "mapreduce.am.max-attempts": "2",
+ "yarn.app.mapreduce.am.admin-command-opts": "-Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN",
+ "mapreduce.reduce.log.level": "INFO",
+ "mapreduce.map.sort.spill.percent": "0.7",
+ "mapreduce.task.timeout": "300000",
+ "mapreduce.map.memory.mb": "341",
+ "mapreduce.task.io.sort.factor": "100",
+ "mapreduce.jobhistory.intermediate-done-dir": "/mr-history/tmp",
+ "mapreduce.reduce.memory.mb": "683",
+ "yarn.app.mapreduce.am.log.level": "INFO",
+ "mapreduce.map.log.level": "INFO",
+ "mapreduce.shuffle.port": "13562",
+ "mapreduce.admin.user.env": "LD_LIBRARY_PATH=/usr/lib/hadoop/lib/native:/usr/lib/hadoop/lib/native/`$JAVA_HOME/bin/java -d32 -version &> /dev/null;if [ $? -eq 0 ]; then echo Linux-i386-32; else echo Linux-amd64-64;fi`",
+ "mapreduce.map.output.compress": "false",
+ "yarn.app.mapreduce.am.staging-dir": "/user",
+ "mapreduce.reduce.shuffle.parallelcopies": "30",
+ "mapreduce.reduce.shuffle.input.buffer.percent": "0.7",
+ "mapreduce.jobhistory.webapp.address": "c6402.ambari.apache.org:19888",
+ "mapreduce.jobhistory.done-dir": "/mr-history/done",
+ "mapreduce.admin.reduce.child.java.opts": "-Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN",
+ "mapreduce.task.io.sort.mb": "136",
+ "yarn.app.mapreduce.am.command-opts": "-Xmx546m",
+ "mapreduce.admin.map.child.java.opts": "-Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN"
+ },
+ "oozie-site": {
+ "oozie.service.PurgeService.purge.interval": "3600",
+ "oozie.service.CallableQueueService.queue.size": "1000",
+ "oozie.service.SchemaService.wf.ext.schemas": "shell-action-0.1.xsd,email-action-0.1.xsd,hive-action-0.2.xsd,sqoop-action-0.2.xsd,ssh-action-0.1.xsd,distcp-action-0.1.xsd,shell-action-0.2.xsd,oozie-sla-0.1.xsd,oozie-sla-0.2.xsd,hive-action-0.3.xsd",
+ "oozie.service.JPAService.jdbc.url": "jdbc:derby:${oozie.data.dir}/${oozie.db.schema.name}-db;create=true",
+ "oozie.service.HadoopAccessorService.nameNode.whitelist": " ",
+ "use.system.libpath.for.mapreduce.and.pig.jobs": "false",
+ "oozie.db.schema.name": "oozie",
+ "oozie.credentials.credentialclasses": "hcat=org.apache.oozie.action.hadoop.HCatCredentials",
+ "oozie.service.JPAService.create.db.schema": "false",
+ "oozie.authentication.kerberos.name.rules": "\n RULE:[2:$1@$0]([jt]t@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-MAPREDUSER/\n RULE:[2:$1@$0]([nd]n@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HDFSUSER/\n RULE:[2:$1@$0](hm@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HBASE-USER/\n RULE:[2:$1@$0](rs@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HBASE-USER/\n DEFAULT",
+ "oozie.service.ActionService.executor.ext.classes": "\n org.apache.oozie.action.email.EmailActionExecutor,\n org.apache.oozie.action.hadoop.HiveActionExecutor,\n org.apache.oozie.action.hadoop.ShellActionExecutor,\n org.apache.oozie.action.hadoop.SqoopActionExecutor,\n org.apache.oozie.action.hadoop.DistcpActionExecutor",
+ "oozie.base.url": "http://c6402.ambari.apache.org:11000/oozie",
+ "oozie.service.JPAService.jdbc.password": "asd",
+ "oozie.service.coord.normal.default.timeout": "120",
+ "oozie.service.AuthorizationService.security.enabled": "true",
+ "oozie.service.JPAService.pool.max.active.conn": "10",
+ "oozie.service.PurgeService.older.than": "30",
+ "oozie.service.coord.push.check.requeue.interval": "30000",
+ "oozie.service.HadoopAccessorService.hadoop.configurations": "*=/etc/hadoop/conf",
+ "oozie.service.HadoopAccessorService.jobTracker.whitelist": " ",
+ "oozie.service.CallableQueueService.callable.concurrency": "3",
+ "oozie.service.JPAService.jdbc.username": "oozie",
+ "oozie.service.CallableQueueService.threads": "10",
+ "oozie.services.ext": "org.apache.oozie.service.PartitionDependencyManagerService,org.apache.oozie.service.HCatAccessorService",
+ "oozie.systemmode": "NORMAL",
+ "oozie.service.WorkflowAppService.system.libpath": "/user/${user.name}/share/lib",
+ "oozie.services": "\n org.apache.oozie.service.SchedulerService,\n org.apache.oozie.service.InstrumentationService,\n org.apache.oozie.service.CallableQueueService,\n org.apache.oozie.service.UUIDService,\n org.apache.oozie.service.ELService,\n org.apache.oozie.service.AuthorizationService,\n org.apache.oozie.service.UserGroupInformationService,\n org.apache.oozie.service.HadoopAccessorService,\n org.apache.oozie.service.URIHandlerService,\n org.apache.oozie.service.MemoryLocksService,\n org.apache.oozie.service.DagXLogInfoService,\n org.apache.oozie.service.SchemaService,\n org.apache.oozie.service.LiteWorkflowAppService,\n org.apache.oozie.service.JPAService,\n org.apache.oozie.service.StoreService,\n org.apache.oozie.service.CoordinatorStoreService,\n org.apache.oozie.service.SLAStoreService,\n org.apache.oozie.service.DBLiteWorkflowStoreService,\n
org.apache.oozie.service.CallbackService,\n org.apache.oozie.service.ActionService,\n org.apache.oozie.service.ActionCheckerService,\n org.apache.oozie.service.RecoveryService,\n org.apache.oozie.service.PurgeService,\n org.apache.oozie.service.CoordinatorEngineService,\n org.apache.oozie.service.BundleEngineService,\n org.apache.oozie.service.DagEngineService,\n org.apache.oozie.service.CoordMaterializeTriggerService,\n org.apache.oozie.service.StatusTransitService,\n org.apache.oozie.service.PauseTransitService,\n org.apache.oozie.service.GroupsService,\n org.apache.oozie.service.ProxyUserService",
+ "oozie.service.URIHandlerService.uri.handlers": "org.apache.oozie.dependency.FSURIHandler,org.apache.oozie.dependency.HCatURIHandler",
+ "oozie.authentication.type": "simple",
+ "oozie.service.JPAService.jdbc.driver": "org.apache.derby.jdbc.EmbeddedDriver",
+ "oozie.system.id": "oozie-${user.name}"
+ },
+ "storm-site": {
+ "topology.tuple.serializer": "backtype.storm.serialization.types.ListDelegateSerializer",
+ "topology.workers": "1",
+ "drpc.worker.threads": "64",
+ "storm.zookeeper.servers": "['c6401.ambari.apache.org','c6402.ambari.apache.org']",
+ "supervisor.heartbeat.frequency.secs": "5",
+ "topology.executor.send.buffer.size": "1024",
+ "drpc.childopts": "-Xmx768m",
+ "nimbus.thrift.port": "6627",
+ "storm.zookeeper.retry.intervalceiling.millis": "30000",
+ "storm.local.dir": "/hadoop/storm",
+ "topology.receiver.buffer.size": "8",
+ "storm.messaging.netty.client_worker_threads": "1",
+ "transactional.zookeeper.root": "/transactional",
+ "drpc.request.timeout.secs": "600",
+ "topology.skip.missing.kryo.registrations": "false",
+ "worker.heartbeat.frequency.secs": "1",
+ "zmq.hwm": "0",
+ "storm.zookeeper.connection.timeout": "15000",
+ "topology.max.error.report.per.interval": "5",
+ "storm.messaging.netty.server_worker_threads": "1",
+ "supervisor.worker.start.timeout.secs": "120",
+ "zmq.threads": "1",
+ "topology.acker.executors": "null",
+ "storm.local.mode.zmq": "false",
+ "topology.max.task.parallelism": "null",
+ "storm.zookeeper.port": "2181",
+ "nimbus.childopts": "-Xmx1024m",
+ "worker.childopts": "-Xmx768m",
+ "drpc.queue.size": "128",
+ "storm.zookeeper.retry.times": "5",
+ "nimbus.monitor.freq.secs": "10",
+ "storm.cluster.mode": "distributed",
+ "dev.zookeeper.path": "/tmp/dev-storm-zookeeper",
+ "drpc.invocations.port": "3773",
+ "storm.zookeeper.root": "/storm",
+ "logviewer.childopts": "-Xmx128m",
+ "transactional.zookeeper.port": "null",
+ "topology.worker.childopts": "null",
+ "topology.max.spout.pending": "null",
+ "nimbus.cleanup.inbox.freq.secs": "600",
+ "storm.messaging.netty.min_wait_ms": "100",
+ "nimbus.task.timeout.secs": "30",
+ "nimbus.thrift.max_buffer_size": "1048576",
+ "topology.sleep.spout.wait.strategy.time.ms": "1",
+ "topology.optimize": "true",
+ "nimbus.reassign": "true",
+ "storm.messaging.transport": "backtype.storm.messaging.netty.Context",
+ "logviewer.appender.name": "A1",
+ "nimbus.host": "c6401.ambari.apache.org",
+ "ui.port": "8744",
+ "supervisor.slots.ports": "[6700, 6701]",
+ "nimbus.file.copy.expiration.secs": "600",
+ "supervisor.monitor.frequency.secs": "3",
+ "ui.childopts": "-Xmx768m",
+ "transactional.zookeeper.servers": "null",
+ "zmq.linger.millis": "5000",
+ "topology.error.throttle.interval.secs": "10",
+ "topology.worker.shared.thread.pool.size": "4",
+ "java.library.path": "/usr/local/lib:/opt/local/lib:/usr/lib",
+ "topology.spout.wait.strategy": "backtype.storm.spout.SleepSpoutWaitStrategy",
+ "task.heartbeat.frequency.secs": "3",
+ "topology.transfer.buffer.size": "1024",
+ "storm.zookeeper.session.timeout": "20000",
+ "topology.executor.receive.buffer.size": "1024",
+ "topology.stats.sample.rate": "0.05",
+ "topology.fall.back.on.java.serialization": "true",
+ "supervisor.childopts": "-Xmx256m",
+ "topology.enable.message.timeouts": "true",
+ "storm.messaging.netty.max_wait_ms": "1000",
+ "nimbus.topology.validator": "backtype.storm.nimbus.DefaultTopologyValidator",
+ "nimbus.supervisor.timeout.secs": "60",
+ "topology.disruptor.wait.strategy": "com.lmax.disruptor.BlockingWaitStrategy",
+ "nimbus.inbox.jar.expiration.secs": "3600",
+ "drpc.port": "3772",
+ "topology.kryo.factory": "backtype.storm.serialization.DefaultKryoFactory",
+ "storm.zookeeper.retry.interval": "1000",
+ "storm.messaging.netty.max_retries": "30",
+ "topology.tick.tuple.freq.secs": "null",
+ "supervisor.enable": "true",
+ "nimbus.task.launch.secs": "120",
+ "task.refresh.poll.secs": "10",
+ "topology.message.timeout.secs": "30",
+ "storm.messaging.netty.buffer_size": "5242880",
+ "topology.state.synchronization.timeout.secs": "60",
+ "supervisor.worker.timeout.secs": "30",
+ "topology.trident.batch.emit.interval.millis": "500",
+ "topology.builtin.metrics.bucket.size.secs": "60",
+ "storm.thrift.transport": "backtype.storm.security.auth.SimpleTransportPlugin",
+ "logviewer.port": "8000",
+ "topology.debug": "false"
+ },
+ "webhcat-site": {
+ "templeton.pig.path": "pig.tar.gz/pig/bin/pig",
+ "templeton.exec.timeout": "60000",
+ "templeton.override.enabled": "false",
+ "templeton.jar": "/usr/lib/hcatalog/share/webhcat/svr/webhcat.jar",
+ "templeton.zookeeper.hosts": "c6401.ambari.apache.org:2181,c6402.ambari.apache.org:2181",
+ "templeton.hive.properties": "hive.metastore.local=false,hive.metastore.uris=thrift://c6402.ambari.apache.org:9083,hive.metastore.sasl.enabled=yes,hive.metastore.execute.setugi=true,hive.metastore.warehouse.dir=/apps/hive/warehouse",
+ "templeton.storage.class": "org.apache.hive.hcatalog.templeton.tool.ZooKeeperStorage",
+ "templeton.hive.archive": "hdfs:///apps/webhcat/hive.tar.gz",
+ "templeton.streaming.jar": "hdfs:///apps/webhcat/hadoop-streaming.jar",
+ "templeton.port": "50111",
+ "templeton.libjars": "/usr/lib/zookeeper/zookeeper.jar",
+ "templeton.hadoop": "/usr/bin/hadoop",
+ "templeton.hive.path": "hive.tar.gz/hive/bin/hive",
+ "templeton.hadoop.conf.dir": "/etc/hadoop/conf",
+ "templeton.hcat": "/usr/bin/hcat",
+ "templeton.pig.archive": "hdfs:///apps/webhcat/pig.tar.gz",
+ "templeton.sqoop.archive": "hdfs:///apps/webhcat/sqoop.tar.gz"
+ },
+ "capacity-scheduler": {
+ "yarn.scheduler.capacity.node-locality-delay": "40",
+ "yarn.scheduler.capacity.root.capacity": "100",
+ "yarn.scheduler.capacity.root.acl_administer_queue": "*",
+ "yarn.scheduler.capacity.root.queues": "default",
+ "yarn.scheduler.capacity.maximum-applications": "10000",
+ "yarn.scheduler.capacity.root.default.user-limit-factor": "1",
+ "yarn.scheduler.capacity.root.default.maximum-capacity": "100",
+ "yarn.scheduler.capacity.root.default.state": "RUNNING",
+ "yarn.scheduler.capacity.maximum-am-resource-percent": "0.2",
+ "yarn.scheduler.capacity.root.default.acl_administer_jobs": "*",
+ "yarn.scheduler.capacity.root.default.capacity": "100",
+ "yarn.scheduler.capacity.root.default.acl_submit_applications": "*"
+ },
+ "hdfs-site": {
+ "dfs.namenode.checkpoint.period": "21600",
+ "dfs.namenode.avoid.write.stale.datanode": "true",
+ "dfs.block.access.token.enable": "true",
+ "dfs.support.append": "true",
+ "dfs.datanode.address": "0.0.0.0:${ambari.dfs.datanode.port}",
+ "nfs.file.dump.dir": "/tmp/.hdfs-nfs",
+ "dfs.datanode.address": "0.0.0.0:${ambari.dfs.datanode.port}",
+ "dfs.cluster.administrators": "test_user1,test_user2 hdfs,test_group",
+ "dfs.replication": "3",
+ "ambari.dfs.datanode.http.port": "50075",
+ "dfs.datanode.balance.bandwidthPerSec": "6250000",
+ "dfs.namenode.safemode.threshold-pct": "1.0f",
+ "dfs.namenode.checkpoint.edits.dir": "${dfs.namenode.checkpoint.dir}",
+ "dfs.permissions.enabled": "true",
+ "fs.checkpoint.size": "67108864",
+ "dfs.client.read.shortcircuit": "true",
+ "dfs.namenode.https-address": "c6401.ambari.apache.org:50470",
+ "dfs.journalnode.edits.dir": "/grid/0/hdfs/journal",
+ "dfs.blocksize": "134217728",
+ "dfs.datanode.max.transfer.threads": "1024",
+ "dfs.datanode.du.reserved": "1073741824",
+ "dfs.webhdfs.enabled": "true",
+ "dfs.namenode.handler.count": "100",
+ "dfs.namenode.checkpoint.dir": "/hadoop/hdfs/namesecondary,/hadoop/hdfs/namesecondary2",
+ "fs.permissions.umask-mode": "022",
+ "dfs.datanode.http.address": "0.0.0.0:50075",
+ "dfs.datanode.ipc.address": "0.0.0.0:8010",
+ "dfs.datanode.data.dir": "/hadoop/hdfs/data",
+ "dfs.namenode.http-address": "c6401.ambari.apache.org:50070",
+ "dfs.blockreport.initialDelay": "120",
+ "dfs.datanode.failed.volumes.tolerated": "0",
+ "dfs.namenode.accesstime.precision": "0",
+ "ambari.dfs.datanode.port": "50010",
+ "dfs.namenode.avoid.read.stale.datanode": "true",
+ "dfs.namenode.secondary.http-address": "c6402.ambari.apache.org:50090",
+ "dfs.namenode.stale.datanode.interval": "30000",
+ "dfs.heartbeat.interval": "3",
+ "dfs.client.read.shortcircuit.streams.cache.size": "4096",
+ "dfs.permissions.superusergroup": "hdfs",
+ "dfs.https.port": "50470",
+ "dfs.journalnode.http-address": "0.0.0.0:8480",
+ "dfs.domain.socket.path": "/var/lib/hadoop-hdfs/dn_socket",
+ "dfs.namenode.write.stale.datanode.ratio": "1.0f",
+ "dfs.hosts.exclude": "/etc/hadoop/conf/dfs.exclude",
+ "dfs.datanode.data.dir.perm": "750",
+ "dfs.namenode.name.dir.restore": "true",
+ "dfs.replication.max": "50",
+ "dfs.namenode.name.dir": "/hadoop/hdfs/namenode"
+ },
+ "hbase-site": {
+ "hbase.hstore.flush.retries.number": "120",
+ "hbase.client.keyvalue.maxsize": "10485760",
+ "hbase.hstore.compactionThreshold": "3",
+ "hbase.rootdir": "hdfs://c6401.ambari.apache.org:8020/apps/hbase/data",
+ "hbase.regionserver.handler.count": "60",
+ "hbase.regionserver.global.memstore.lowerLimit": "0.38",
+ "hbase.hregion.memstore.block.multiplier": "2",
+ "hbase.hregion.memstore.flush.size": "134217728",
+ "hbase.superuser": "hbase",
+ "hbase.zookeeper.property.clientPort": "2181",
+ "hbase.regionserver.global.memstore.upperLimit": "0.4",
+ "zookeeper.session.timeout": "30000",
+ "hbase.tmp.dir": "/hadoop/hbase",
+ "hbase.local.dir": "${hbase.tmp.dir}/local",
+ "hbase.hregion.max.filesize": "10737418240",
+ "hfile.block.cache.size": "0.40",
+ "hbase.security.authentication": "simple",
+ "hbase.defaults.for.version.skip": "true",
+ "hbase.zookeeper.quorum": "c6401.ambari.apache.org,c6402.ambari.apache.org",
+ "zookeeper.znode.parent": "/hbase-unsecure",
+ "hbase.hstore.blockingStoreFiles": "10",
+ "hbase.master.port": "60000",
+ "hbase.hregion.majorcompaction": "86400000",
+ "hbase.security.authorization": "false",
+ "hbase.cluster.distributed": "true",
+ "hbase.hregion.memstore.mslab.enabled": "true",
+ "hbase.client.scanner.caching": "100",
+ "hbase.zookeeper.useMulti": "true"
+ },
+ "core-site": {
+ "io.serializations": "org.apache.hadoop.io.serializer.WritableSerialization",
+ "gluster.daemon.user": "null",
+ "hadoop.proxyuser.oozie.groups": "users",
+ "hadoop.proxyuser.hcat.hosts": "c6402.ambari.apache.org",
+ "hadoop.proxyuser.hive.groups": "users",
+ "hadoop.security.authentication": "simple",
+ "hadoop.proxyuser.oozie.hosts": "c6402.ambari.apache.org",
+ "io.compression.codecs": "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec",
+ "mapreduce.jobtracker.webinterface.trusted": "false",
+ "fs.AbstractFileSystem.glusterfs.impl": "null",
+ "fs.defaultFS": "hdfs://c6401.ambari.apache.org:8020",
+ "fs.trash.interval": "360",
+ "ipc.client.idlethreshold": "8000",
+ "io.file.buffer.size": "131072",
+ "hadoop.security.authorization": "false",
+ "hadoop.proxyuser.hive.hosts": "c6402.ambari.apache.org",
+ "hadoop.security.auth_to_local": "\n RULE:[2:$1@$0]([rn]m@.*)s/.*/yarn/\n RULE:[2:$1@$0](jhs@.*)s/.*/mapred/\n RULE:[2:$1@$0]([nd]n@.*)s/.*/hdfs/\n RULE:[2:$1@$0](hm@.*)s/.*/hbase/\n RULE:[2:$1@$0](rs@.*)s/.*/hbase/\n DEFAULT",
+ "hadoop.proxyuser.hcat.groups": "users",
+ "ipc.client.connection.maxidletime": "30000",
+ "ipc.client.connect.max.retries": "50"
+ },
+ "hive-site": {
+ "hive.enforce.sorting": "true",
+ "javax.jdo.option.ConnectionPassword": "!`\"' 1",
+ "javax.jdo.option.ConnectionDriverName": "com.mysql.jdbc.Driver",
+ "hive.optimize.bucketmapjoin.sortedmerge": "true",
+ "hive.security.metastore.authorization.manager": "org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider",
+ "fs.file.impl.disable.cache": "true",
+ "hive.auto.convert.join.noconditionaltask": "true",
+ "hive.map.aggr": "true",
+ "hive.optimize.index.filter": "true",
+ "hive.security.authorization.enabled": "false",
+ "hive.optimize.reducededuplication.min.reducer": "1",
+ "hive.optimize.bucketmapjoin": "true",
+ "hive.metastore.uris": "thrift://c6402.ambari.apache.org:9083",
+ "hive.mapjoin.bucket.cache.size": "10000",
+ "hive.auto.convert.join.noconditionaltask.size": "1000000000",
+ "hive.vectorized.execution.enabled": "false",
+ "javax.jdo.option.ConnectionUserName": "hive",
+ "hive.metastore.cache.pinobjtypes": "Table,Database,Type,FieldSchema,Order",
+ "hive.optimize.reducededuplication": "true",
+ "hive.metastore.warehouse.dir": "/apps/hive/warehouse",
+ "hive.metastore.client.socket.timeout": "60",
+ "hive.auto.convert.join": "true",
+ "hive.enforce.bucketing": "true",
+ "hive.mapred.reduce.tasks.speculative.execution": "false",
+ "hive.security.authenticator.manager": "org.apache.hadoop.hive.ql.security.ProxyUserAuthenticator",
+ "javax.jdo.option.ConnectionURL": "jdbc:mysql://c6402.ambari.apache.org/hive?createDatabaseIfNotExist=true",
+ "hive.auto.convert.sortmerge.join": "true",
+ "fs.hdfs.impl.disable.cache": "true",
+ "hive.security.authorization.manager": "org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider",
+ "ambari.hive.db.schema.name": "hive",
+ "hive.metastore.execute.setugi": "true",
+ "hive.auto.convert.sortmerge.join.noconditionaltask": "true",
+ "hive.server2.enable.doAs": "true",
+ "hive.server2.authentication": "NOSASL",
+ "hive.server2.transport.mode": "binary",
+ "hive.optimize.mapjoin.mapreduce": "true",
+ "hive.exec.scratchdir" : "/custompath/tmp/hive"
+ },
+ "hive-interactive-site": {
+ "hive.enforce.sorting": "true",
+ "javax.jdo.option.ConnectionPassword": "!`\"' 1",
+ "javax.jdo.option.ConnectionDriverName": "com.mysql.jdbc.Driver",
+ "hive.optimize.bucketmapjoin.sortedmerge": "true",
+ "hive.security.metastore.authorization.manager": "org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider",
+ "fs.file.impl.disable.cache": "true",
+ "hive.auto.convert.join.noconditionaltask": "true",
+ "hive.map.aggr": "true",
+ "hive.optimize.index.filter": "true",
+ "hive.security.authorization.enabled": "false",
+ "hive.optimize.reducededuplication.min.reducer": "1",
+ "hive.optimize.bucketmapjoin": "true",
+ "hive.metastore.uris": "thrift://c6402.ambari.apache.org:9084",
+ "hive.mapjoin.bucket.cache.size": "10000",
+ "hive.auto.convert.join.noconditionaltask.size": "1000000000",
+ "hive.vectorized.execution.enabled": "false",
+ "javax.jdo.option.ConnectionUserName": "hive",
+ "hive.metastore.cache.pinobjtypes": "Table,Database,Type,FieldSchema,Order",
+ "hive.optimize.reducededuplication": "true",
+ "hive.metastore.warehouse.dir": "/apps/hive/warehouse",
+ "hive.metastore.client.socket.timeout": "60",
+ "hive.auto.convert.join": "true",
+ "hive.enforce.bucketing": "true",
+ "hive.mapred.reduce.tasks.speculative.execution": "false",
+ "hive.security.authenticator.manager": "org.apache.hadoop.hive.ql.security.ProxyUserAuthenticator",
+ "javax.jdo.option.ConnectionURL": "jdbc:mysql://c6402.ambari.apache.org/hive?createDatabaseIfNotExist=true",
+ "hive.auto.convert.sortmerge.join": "true",
+ "fs.hdfs.impl.disable.cache": "true",
+ "hive.security.authorization.manager": "org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider",
+ "ambari.hive.db.schema.name": "hive",
+ "hive.metastore.execute.setugi": "true",
+ "hive.auto.convert.sortmerge.join.noconditionaltask": "true",
+ "hive.server2.enable.doAs": "true",
+ "hive.server2.authentication": "NOSASL",
+ "hive.server2.transport.mode": "binary",
+ "hive.optimize.mapjoin.mapreduce": "true",
+ "hive.exec.scratchdir" : "/custompath/tmp/hive"
+ },
+ "hiveserver2-site": {
+ "hive.metastore.metrics.enabled": "true",
+ "hive.service.metrics.reporter": "HADOOP2"
+ },
+ "ranger-hive-plugin-properties": {
+ "XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS": "900",
+ "XAAUDIT.HDFS.DESTINATION_DIRECTORY": "hdfs://__REPLACE__NAME_NODE_HOST:8020/ranger/audit/%app-type%/%time:yyyyMMdd%",
+ "XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit",
+ "common.name.for.certificate": "-",
+ "XAAUDIT.HDFS.IS_ENABLED": "false",
+ "XAAUDIT.HDFS.LOCAL_BUFFER_FILE": "%time:yyyyMMdd-HHmm.ss%.log",
+ "SSL_KEYSTORE_PASSWORD": "myKeyFilePassword",
+ "XAAUDIT.DB.IS_ENABLED": "true",
+ "XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS": "600",
+ "XAAUDIT.SOLR.SOLR_URL": "http://localhost:6083/solr/ranger_audits",
+ "XAAUDIT.SOLR.IS_ENABLED": "false",
+ "SSL_KEYSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-keystore.jks",
+ "XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS": "60",
+ "policy_user": "ambari-qa",
+ "UPDATE_XAPOLICIES_ON_GRANT_REVOKE": "true",
+ "XAAUDIT.HDFS.DESTINTATION_FILE": "%hostname%-audit.log",
+ "XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS": "86400",
+ "XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT": "10",
+ "SSL_TRUSTSTORE_PASSWORD": "changeit",
+ "XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit/archive",
+ "ranger-hive-plugin-enabled": "No",
+ "jdbc.driverClassName": "org.apache.hive.jdbc.HiveDriver",
+ "REPOSITORY_CONFIG_USERNAME": "hive",
+ "XAAUDIT.SOLR.MAX_FLUSH_INTERVAL_MS": "1000",
+ "XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS": "60",
+ "SSL_TRUSTSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-truststore.jks",
+ "REPOSITORY_CONFIG_PASSWORD": "hive",
+ "XAAUDIT.SOLR.MAX_QUEUE_SIZE": "1"
+ },
+ "ranger-knox-plugin-properties": {
+ "XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS": "900",
+ "KNOX_HOME": "/usr/hdp/current/knox-server",
+ "XAAUDIT.HDFS.DESTINATION_DIRECTORY": "hdfs://__REPLACE__NAME_NODE_HOST:8020/ranger/audit/%app-type%/%time:yyyyMMdd%",
+ "XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit",
+ "common.name.for.certificate": "-",
+ "XAAUDIT.HDFS.IS_ENABLED": "false",
+ "XAAUDIT.HDFS.LOCAL_BUFFER_FILE": "%time:yyyyMMdd-HHmm.ss%.log",
+ "SSL_KEYSTORE_PASSWORD": "myKeyFilePassword",
+ "XAAUDIT.DB.IS_ENABLED": "true",
+ "XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS": "600",
+ "XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS": "60",
+ "XAAUDIT.SOLR.SOLR_URL": "http://localhost:6083/solr/ranger_audits",
+ "XAAUDIT.SOLR.IS_ENABLED": "false",
+ "SSL_KEYSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-keystore.jks",
+ "ranger-knox-plugin-enabled": "No",
+ "policy_user": "ambari-qa",
+ "XAAUDIT.HDFS.DESTINTATION_FILE": "%hostname%-audit.log",
+ "XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS": "86400",
+ "XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT": "10",
+ "SSL_TRUSTSTORE_PASSWORD": "changeit",
+ "XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit/archive",
+ "REPOSITORY_CONFIG_USERNAME": "admin",
+ "XAAUDIT.SOLR.MAX_FLUSH_INTERVAL_MS": "1000",
+ "XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS": "60",
+ "SSL_TRUSTSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-truststore.jks",
+ "REPOSITORY_CONFIG_PASSWORD": "admin-password",
+ "XAAUDIT.SOLR.MAX_QUEUE_SIZE": "1"
+ },
+ "yarn-site": {
+ "yarn.nodemanager.disk-health-checker.min-healthy-disks": "0.25",
+ "yarn.nodemanager.container-executor.class": "org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor",
+ "yarn.nodemanager.local-dirs": "/hadoop/yarn/local,/hadoop/yarn/local1",
+ "yarn.resourcemanager.resource-tracker.address": "c6402.ambari.apache.org:8025",
+ "yarn.nodemanager.remote-app-log-dir-suffix": "logs",
+ "yarn.resourcemanager.hostname": "c6402.ambari.apache.org",
+ "yarn.nodemanager.health-checker.script.timeout-ms": "60000",
+ "yarn.resourcemanager.scheduler.class": "org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler",
+ "yarn.nodemanager.resource.memory-mb": "2048",
+ "yarn.scheduler.minimum-allocation-mb": "683",
+ "yarn.resourcemanager.address": "c6402.ambari.apache.org:8050",
+ "yarn.resourcemanager.scheduler.address": "c6402.ambari.apache.org:8030",
+ "yarn.log-aggregation.retain-seconds": "2592000",
+ "yarn.scheduler.maximum-allocation-mb": "2048",
+ "yarn.log-aggregation-enable": "true",
+ "yarn.nodemanager.address": "0.0.0.0:45454",
+ "yarn.nodemanager.container-monitor.interval-ms": "3000",
+ "yarn.nodemanager.log-aggregation.compression-type": "gz",
+ "yarn.nodemanager.log.retain-seconds": "604800",
+ "yarn.nodemanager.delete.debug-delay-sec": "0",
+ "yarn.nodemanager.log-dirs": "/hadoop/yarn/log,/hadoop/yarn/log1",
+ "yarn.nodemanager.health-checker.interval-ms": "135000",
+ "yarn.resourcemanager.am.max-attempts": "2",
+ "yarn.nodemanager.remote-app-log-dir": "/app-logs",
+ "yarn.nodemanager.admin-env": "MALLOC_ARENA_MAX=$MALLOC_ARENA_MAX",
+ "yarn.nodemanager.aux-services": "mapreduce_shuffle",
+ "yarn.nodemanager.vmem-check-enabled": "false",
+ "yarn.nodemanager.vmem-pmem-ratio": "2.1",
+ "yarn.admin.acl": "*",
+ "yarn.resourcemanager.webapp.address": "c6402.ambari.apache.org:8088",
+ "yarn.resourcemanager.nodes.exclude-path": "/etc/hadoop/conf/yarn.exclude",
+ "yarn.resourcemanager.nodes.include-path": "/etc/hadoop/conf_for_include/yarn.include",
+ "yarn.nodemanager.linux-container-executor.group": "hadoop",
+ "yarn.acl.enable": "true",
+ "manage.include.files": "false",
+ "yarn.log.server.url": "http://c6402.ambari.apache.org:19888/jobhistory/logs",
+ "yarn.application.classpath": "/etc/hadoop/conf,/usr/lib/hadoop/*,/usr/lib/hadoop/lib/*,/usr/lib/hadoop-hdfs/*,/usr/lib/hadoop-hdfs/lib/*,/usr/lib/hadoop-yarn/*,/usr/lib/hadoop-yarn/lib/*,/usr/lib/hadoop-mapreduce/*,/usr/lib/hadoop-mapreduce/lib/*",
+ "yarn.resourcemanager.admin.address": "c6402.ambari.apache.org:8141",
+ "yarn.nodemanager.aux-services.mapreduce_shuffle.class": "org.apache.hadoop.mapred.ShuffleHandler",
+ "yarn.timeline-service.leveldb-timeline-store.path": "/var/log/hadoop-yarn/timeline",
+ "yarn.http.policy": "HTTP_ONLY",
+ "yarn.resourcemanager.webapp.https.address": "c6402.ambari.apache.org:8090"
+ },
+ "ranger-yarn-plugin-properties": {
+ "ranger-yarn-plugin-enabled": "No"
+ },
+ "tez-site": {
+ "tez.am.log.level": "WARN",
+ "tez.lib.uris": "hdfs:///apps/tez/,hdfs:///apps/tez/lib/",
+ "tez.staging-dir": "/tmp/${user.name}/staging",
+ "tez.am.am-rm.heartbeat.interval-ms.max": "250"
+ },
+ "slider-env": {
+ "content": "export JAVA_HOME={{java64_home}}\nexport HADOOP_CONF_DIR={{hadoop_conf_dir}}"
+ },
+ "yarn-env": {
+ "yarn_pid_dir_prefix": "/var/run/hadoop-yarn",
+ "apptimelineserver_heapsize": "1024",
+ "nodemanager_heapsize": "1024",
+ "content": "\nexport HADOOP_YARN_HOME={{hadoop_yarn_home}}\nexport YARN_LOG_DIR={{yarn_log_dir_prefix}}/$USER\nexport YARN_PID_DIR={{yarn_pid_dir_prefix}}/$USER\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\nexport JAVA_HOME={{java64_home}}\n\n# User for YARN daemons\nexport HADOOP_YARN_USER=${HADOOP_YARN_USER:-yarn}\n\n# resolve links - $0 may be a softlink\nexport YARN_CONF_DIR=\"${YARN_CONF_DIR:-$HADOOP_YARN_HOME/conf}\"\n\n# some Java parameters\n# export JAVA_HOME=/home/y/libexec/jdk1.6.0/\nif [ \"$JAVA_HOME\" != \"\" ]; then\n #echo \"run java in $JAVA_HOME\"\n JAVA_HOME=$JAVA_HOME\nfi\n\nif [ \"$JAVA_HOME\" = \"\" ]; then\n echo \"Error: JAVA_HOME is not set.\"\n exit 1\nfi\n\nJAVA=$JAVA_HOME/bin/java\nJAVA_HEAP_MAX=-Xmx1000m\n\n# For setting YARN specific HEAP sizes please use this\n# Parameter and set appropriately\nYARN_HEAPSIZE={{yarn_heapsize}}\n\n# check envvars which might override default args\nif [ \"$YARN_HEAPSIZE\" != \"\" ]; then\n JAVA_HEAP_MAX=\"-
Xmx\"\"$YARN_HEAPSIZE\"\"m\"\nfi\n\n# Resource Manager specific parameters\n\n# Specify the max Heapsize for the ResourceManager using a numerical value\n# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set\n# the value to 1000.\n# This value will be overridden by an Xmx setting specified in either YARN_OPTS\n# and/or YARN_RESOURCEMANAGER_OPTS.\n# If not specified, the default value will be picked from either YARN_HEAPMAX\n# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.\nexport YARN_RESOURCEMANAGER_HEAPSIZE={{resourcemanager_heapsize}}\n\n# Specify the JVM options to be used when starting the ResourceManager.\n# These options will be appended to the options specified as YARN_OPTS\n# and therefore may override any similar flags set in YARN_OPTS\n#export YARN_RESOURCEMANAGER_OPTS=\n\n# Node Manager specific parameters\n\n# Specify the max Heapsize for the NodeManager using a numerical value\n# in the scale of MB. For example, to specif
y an jvm option of -Xmx1000m, set\n# the value to 1000.\n# This value will be overridden by an Xmx setting specified in either YARN_OPTS\n# and/or YARN_NODEMANAGER_OPTS.\n# If not specified, the default value will be picked from either YARN_HEAPMAX\n# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.\nexport YARN_NODEMANAGER_HEAPSIZE={{nodemanager_heapsize}}\n\n# Specify the max Heapsize for the HistoryManager using a numerical value\n# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set\n# the value to 1024.\n# This value will be overridden by an Xmx setting specified in either YARN_OPTS\n# and/or YARN_HISTORYSERVER_OPTS.\n# If not specified, the default value will be picked from either YARN_HEAPMAX\n# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.\nexport YARN_HISTORYSERVER_HEAPSIZE={{apptimelineserver_heapsize}}\n\n# Specify the JVM options to be used when starting the NodeManager.\n# These options will be appen
ded to the options specified as YARN_OPTS\n# and therefore may override any similar flags set in YARN_OPTS\n#export YARN_NODEMANAGER_OPTS=\n\n# so that filenames w/ spaces are handled correctly in loops below\nIFS=\n\n\n# default log directory and file\nif [ \"$YARN_LOG_DIR\" = \"\" ]; then\n YARN_LOG_DIR=\"$HADOOP_YARN_HOME/logs\"\nfi\nif [ \"$YARN_LOGFILE\" = \"\" ]; then\n YARN_LOGFILE='yarn.log'\nfi\n\n# default policy file for service-level authorization\nif [ \"$YARN_POLICYFILE\" = \"\" ]; then\n YARN_POLICYFILE=\"hadoop-policy.xml\"\nfi\n\n# restore ordinary behaviour\nunset IFS\n\n\nYARN_OPTS=\"$YARN_OPTS -Dhadoop.log.dir=$YARN_LOG_DIR\"\nYARN_OPTS=\"$YARN_OPTS -Dyarn.log.dir=$YARN_LOG_DIR\"\nYARN_OPTS=\"$YARN_OPTS -Dhadoop.log.file=$YARN_LOGFILE\"\nYARN_OPTS=\"$YARN_OPTS -Dyarn.log.file=$YARN_LOGFILE\"\nYARN_OPTS=\"$YARN_OPTS -Dyarn.home.dir=$YARN_COMMON_HOME\"\nYARN_OPTS=\"$YARN_OPTS -Dyarn.id.str=$YARN_IDENT_STRING\"\nYARN_OPTS=\"$YARN_OPTS -Dhadoop.root.logger=${YARN_
ROOT_LOGGER:-INFO,console}\"\nYARN_OPTS=\"$YARN_OPTS -Dyarn.root.logger=${YARN_ROOT_LOGGER:-INFO,console}\"\nif [ \"x$JAVA_LIBRARY_PATH\" != \"x\" ]; then\n YARN_OPTS=\"$YARN_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH\"\nfi\nYARN_OPTS=\"$YARN_OPTS -Dyarn.policy.file=$YARN_POLICYFILE\"",
+ "yarn_heapsize": "1024",
+ "yarn_user": "yarn",
+ "resourcemanager_heapsize": "1024",
+ "yarn_log_dir_prefix": "/var/log/hadoop-yarn",
+ "min_user_id": "1000",
+ "is_supported_yarn_ranger": "false",
+ "service_check.queue.name": "default"
+ },
+ "hadoop-env": {
+ "hdfs_tmp_dir": "/tmp",
+ "namenode_opt_maxnewsize": "200m",
+ "hdfs_log_dir_prefix": "/var/log/hadoop",
+ "namenode_heapsize": "1024m",
+ "namenode_opt_newsize": "200m",
+ "namenode_opt_permsize" : "128m",
+ "namenode_opt_maxpermsize" : "256m",
+ "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME. All others are\n# optional. When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use. Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop}\n\n# Hadoop Configuration Directory\n#TODO: if env var set that can cause problems\nexport HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-{{hadoop_conf_dir}}}\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options. Empty by default.\nex
port HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appended to HADOOP_OPTS when specified\nexport HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}\"\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USE
R/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\nHADOOP_DATANODE_OPTS=\"-Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_DATANODE_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/
gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS\"\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER={{hdfs_user}}\n\n# Extra ssh options. Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored. $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SE
CURE_DN_USER\n\n# File naming remote slave hosts. $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from. Unset by default.\n# export HADOOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands. Unset by default. This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The s
cheduling priority for daemon processes. See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Use libraries from standard classpath\nJAVA_JDBC_LIBS=\"\"\n#Add libraries required by mysql connector\nfor jarFile in `ls /usr/share/java/*mysql* 2>/dev/null`\ndo\n JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n#Add libraries required by oracle connector\nfor jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`\ndo\n JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n#Add libraries required by nodemanager\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}\n\nif [ -d \"/usr/lib/tez\" ]; then\n export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:/usr/lib/tez/*:/usr/lib/tez/lib/*:/etc/tez/conf\nfi\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n#Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/lib/hadoop/lib/native/Linux-amd64-64",
+ "hdfs_user": "hdfs",
+ "dtnode_heapsize": "1024m",
+ "proxyuser_group": "users",
+ "hadoop_heapsize": "1024",
+ "hadoop_pid_dir_prefix": "/var/run/hadoop"
+ },
+ "hive-env": {
+ "hcat_pid_dir": "/var/run/webhcat",
+ "hcat_user": "hcat",
+ "hive_ambari_database": "MySQL",
+ "hive_hostname": "abtest-3.c.pramod-thangali.internal",
+ "hive_metastore_port": "9083",
+ "webhcat_user": "hcat",
+ "content": "\n if [ \"$SERVICE\" = \"cli\" ]; then\n if [ -z \"$DEBUG\" ]; then\n export HADOOP_OPTS=\"$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseParNewGC -XX:-UseGCOverheadLimit\"\n else\n export HADOOP_OPTS=\"$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit\"\n fi\n fi\n\n# The heap size of the jvm stared by hive shell script can be controlled via:\n\nexport HADOOP_HEAPSIZE=\"{{hive_heapsize}}\"\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS\"\n\n# Larger heap size may be required when running queries over large number of files or partitions.\n# By default hive shell scripts use a heap size of 256 (MB). Larger heap size would also be\n# appropriate for hive server (hwi etc).\n\n\n# Set HADOOP_HOME to point to a specific hadoop install directory\nHADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hive Configuration Directory can
be controlled by:\nexport HIVE_CONF_DIR={{conf_dir}}\n\n# Folder containing extra ibraries required for hive compilation/execution can be controlled by:\nif [ \"${HIVE_AUX_JARS_PATH}\" != \"\" ]; then\n export HIVE_AUX_JARS_PATH=${HIVE_AUX_JARS_PATH}\nelif [ -d \"/usr/lib/hive-hcatalog/\" ]; then\n export HIVE_AUX_JARS_PATH=/usr/lib/hive-hcatalog/share/hcatalog/hive-hcatalog-core-*.jar\nelse\n export HIVE_AUX_JARS_PATH=/usr/lib/hcatalog/share/hcatalog/hcatalog-core.jar\nfi\nexport METASTORE_PORT={{hive_metastore_port}}",
+ "hive_database_name": "hive",
+ "hive_database_type": "mysql",
+ "hive_pid_dir": "/var/run/hive",
+ "hive_log_dir": "/var/log/hive",
+ "hive_user": "hive",
+ "hcat_log_dir": "/var/log/webhcat",
+ "hive_database": "New MySQL Database",
+ "hive_security_authorization": "None"
+ },
+ "ranger-env": {
+ "xml_configurations_supported" : "false"
+ },
+ "cluster-env": {
+ "managed_hdfs_resource_property_names": "",
+ "security_enabled": "false",
+ "hdfs_user_principal" : "",
+ "hdfs_user_keytab" : "",
+ "ignore_groupsusers_create": "false",
+ "smokeuser": "ambari-qa",
+ "kerberos_domain": "EXAMPLE.COM",
+ "user_group": "hadoop",
+ "repo_suse_rhel_template": "[{{repo_id}}]\nname={{repo_id}}\n{% if mirror_list %}mirrorlist={{mirror_list}}{% else %}baseurl={{base_url}}{% endif %}\n\npath=/\nenabled=1\ngpgcheck=0",
+ "repo_ubuntu_template": "{{package_type}} {{base_url}} {{components}}",
+ "metrics_collector_external_hosts": "c6402.ambari.apache.org",
+ "metrics_collector_external_port": "6189",
+ "override_uid" : "true",
+ "fetch_nonlocal_groups": "true",
+ "manage_dirs_on_root": "true",
+ "ignore_bad_mounts": "false"
+ },
+ "hbase-env": {
+ "hbase_pid_dir": "/var/run/hbase",
+ "hbase_user": "hbase",
+ "hbase_master_heapsize": "1024m",
+ "content": "\n# Set environment variables here.\n\n# The java implementation to use. Java 1.6 required.\nexport JAVA_HOME={{java64_home}}\n\n# HBase Configuration directory\nexport HBASE_CONF_DIR=${HBASE_CONF_DIR:-{{hbase_conf_dir}}}\n\n# Extra Java CLASSPATH elements. Optional.\nexport HBASE_CLASSPATH=${HBASE_CLASSPATH}\n\n# The maximum amount of heap to use, in MB. Default is 1000.\n# export HBASE_HEAPSIZE=1000\n\n# Extra Java runtime options.\n# Below are what we set by default. May only work with SUN JVM.\n# For more on why as well as other possible settings,\n# see http://wiki.apache.org/hadoop/PerformanceTuning\nexport HBASE_OPTS=\"-XX:+UseConcMarkSweepGC -XX:ErrorFile={{log_dir}}/hs_err_pid%p.log\"\nexport SERVER_GC_OPTS=\"-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:{{log_dir}}/gc.log-`date +'%Y%m%d%H%M'`\"\n# Uncomment below to enable java garbage collection logging.\n# export HBASE_OPTS=\"$HBASE_OPTS -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateS
tamps -Xloggc:$HBASE_HOME/logs/gc-hbase.log\"\n\n# Uncomment and adjust to enable JMX exporting\n# See jmxremote.password and jmxremote.access in $JRE_HOME/lib/management to configure remote password access.\n# More details at: http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html\n#\n# export HBASE_JMX_BASE=\"-Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false\"\nexport HBASE_MASTER_OPTS=\"-Xmx{{master_heapsize}}\"\nexport HBASE_REGIONSERVER_OPTS=\"-Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70 -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}}\"\n# export HBASE_THRIFT_OPTS=\"$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10103\"\n# export HBASE_ZOOKEEPER_OPTS=\"$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10104\"\n\n# File naming hosts on which HRegionServers will run. $HBASE_HOME/conf/regionservers by default.\nexport HBASE_REGIONSERVERS=${HBASE_CONF_DIR}/regionservers\n\n# Extra
ssh options. Empty by default.\n# export HBASE_SSH_OPTS=\"-o ConnectTimeout=1 -o SendEnv=HBASE_CONF_DIR\"\n\n# Where log files are stored. $HBASE_HOME/logs by default.\nexport HBASE_LOG_DIR={{log_dir}}\n\n# A string representing this instance of hbase. $USER by default.\n# export HBASE_IDENT_STRING=$USER\n\n# The scheduling priority for daemon processes. See 'man nice'.\n# export HBASE_NICENESS=10\n\n# The directory where pid files are stored. /tmp by default.\nexport HBASE_PID_DIR={{pid_dir}}\n\n# Seconds to sleep between slave commands. Unset by default. This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HBASE_SLAVE_SLEEP=0.1\n\n# Tell HBase whether it should manage it's own instance of Zookeeper or not.\nexport HBASE_MANAGES_ZK=false\n\n{% if security_enabled %}\nexport HBASE_OPTS=\"$HBASE_OPTS -Djava.security.auth.login.config={{client_jaas_config_file}}\"\nexport HBASE_MASTER_OPTS=\"$HBAS
E_MASTER_OPTS -Djava.security.auth.login.config={{master_jaas_config_file}}\"\nexport HBASE_REGIONSERVER_OPTS=\"$HBASE_REGIONSERVER_OPTS -Djava.security.auth.login.config={{regionserver_jaas_config_file}}\"\n{% endif %}",
+ "hbase_regionserver_heapsize": "1024m",
+ "hbase_regionserver_xmn_max": "512",
+ "hbase_regionserver_xmn_ratio": "0.2",
+ "hbase_log_dir": "/var/log/hbase",
+ "hbase_java_io_tmpdir" : "/tmp",
+ "hbase_regionserver_shutdown_timeout": "30"
+ },
+ "ganglia-env": {
+ "gmond_user": "nobody",
+ "ganglia_runtime_dir": "/var/run/ganglia/hdp",
+ "rrdcached_base_dir": "/var/lib/ganglia/rrds",
+ "rrdcached_flush_timeout": "7200",
+ "gmetad_user": "nobody",
+ "rrdcached_write_threads": "4",
+ "rrdcached_delay": "1800",
+ "rrdcached_timeout": "3600"
+ },
+ "zookeeper-env": {
+ "zk_user": "zookeeper",
+ "zk_log_dir": "/var/log/zookeeper",
+ "content": "\nexport JAVA_HOME={{java64_home}}\nexport ZOO_LOG_DIR={{zk_log_dir}}\nexport ZOOPIDFILE={{zk_pid_file}}\nexport SERVER_JVMFLAGS={{zk_server_heapsize}}\nexport JAVA=$JAVA_HOME/bin/java\nexport CLASSPATH=$CLASSPATH:/usr/share/zookeeper/*\n\n{% if security_enabled %}\nexport SERVER_JVMFLAGS=\"$SERVER_JVMFLAGS -Djava.security.auth.login.config={{zk_server_jaas_file}}\"\nexport CLIENT_JVMFLAGS=\"$CLIENT_JVMFLAGS -Djava.security.auth.login.config={{zk_client_jaas_file}}\"\n{% endif %}",
+ "zk_pid_dir": "/var/run/zookeeper",
+ "zookeeper_principal_name": "zookeeper/_HOST@EXAMPLE.COM",
+ "zookeeper_keytab_path": "/etc/security/keytabs/zk.service.keytab"
+ },
+ "zoo.cfg": {
+ "clientPort": "2181",
+ "syncLimit": "5",
+ "initLimit": "10",
+ "dataDir": "/hadoop/zookeeper",
+ "tickTime": "2000"
+ },
+ "mapred-env": {
+ "content": "\n# export JAVA_HOME=/home/y/libexec/jdk1.6.0/\n\nexport HADOOP_JOB_HISTORYSERVER_HEAPSIZE={{jobhistory_heapsize}}\n\nexport HADOOP_MAPRED_ROOT_LOGGER=INFO,RFA\n\n#export HADOOP_JOB_HISTORYSERVER_OPTS=\n#export HADOOP_MAPRED_LOG_DIR=\"\" # Where log files are stored. $HADOOP_MAPRED_HOME/logs by default.\n#export HADOOP_JHS_LOGGER=INFO,RFA # Hadoop JobSummary logger.\n#export HADOOP_MAPRED_PID_DIR= # The pid files are stored. /tmp by default.\n#export HADOOP_MAPRED_IDENT_STRING= #A string representing this instance of hadoop. $USER by default\n#export HADOOP_MAPRED_NICENESS= #The scheduling priority for daemons. Defaults to 0.",
+ "mapred_pid_dir_prefix": "/var/run/hadoop-mapreduce",
+ "mapred_user": "mapred",
+ "jobhistory_heapsize": "900",
+ "mapred_log_dir_prefix": "/var/log/hadoop-mapreduce"
+ },
+ "tez-env": {
+ "content": "\n# Tez specific configuration\nexport TEZ_CONF_DIR={{config_dir}}\n\n# Set HADOOP_HOME to point to a specific hadoop install directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# The java implementation to use.\nexport JAVA_HOME={{java64_home}}",
+ "tez_user": "tez"
+ },
+ "flume-env": {
+ "content": "export JAVA_HOME={{java64_home}}\nexport HIVE_HOME={{flume_hive_home}}",
+ "flume_user": "flume",
+ "flume_log_dir": "/var/log/flume",
+ "flume_run_dir": "/var/run/flume"
+ },
+ "storm-env": {
+ "content": "\n#!/bin/bash\n\n# Set Storm specific environment variables here.\n\n# The java implementation to use.\nexport JAVA_HOME={{java_home}}\n\n# export STORM_CONF_DIR=\"\"",
+ "storm_log_dir": "/var/log/storm",
+ "storm_pid_dir": "/var/run/storm",
+ "storm_user": "storm"
+ },
+ "falcon-env": {
+ "falcon_apps_hdfs_dir": "/apps/falcon",
+ "falcon_port": "15000",
+ "falcon_pid_dir": "/var/run/falcon",
+ "falcon_log_dir": "/var/log/falcon",
+ "falcon.emeddedmq.port": "61616",
+ "falcon_user": "falcon",
+ "falcon_local_dir": "/hadoop/falcon",
+ "content": "\n# The java implementation to use. If JAVA_HOME is not found we expect java and jar to be in path\nexport JAVA_HOME={{java_home}}\n\n# any additional java opts you want to set. This will apply to both client and server operations\n#export FALCON_OPTS=\n\n# any additional java opts that you want to set for client only\n#export FALCON_CLIENT_OPTS=\n\n# java heap size we want to set for the client. Default is 1024MB\n#export FALCON_CLIENT_HEAP=\n\n# any additional opts you want to set for prisim service.\n#export FALCON_PRISM_OPTS=\n\n# java heap size we want to set for the prisim service. Default is 1024MB\n#export FALCON_PRISM_HEAP=\n\n# any additional opts you want to set for falcon service.\nexport FALCON_SERVER_OPTS=\"-Dfalcon.embeddedmq={{falcon_embeddedmq_enabled}} -Dfalcon.emeddedmq.port={{falcon_emeddedmq_port}}\"\n\n# java heap size we want to set for the falcon server. Default is 1024MB\n#export FALCON_SERVER_HEAP=\n\n# What is is considered as falcon home
dir. Default is the base locaion of the installed software\n#export FALCON_HOME_DIR=\n\n# Where log files are stored. Defatult is logs directory under the base install location\nexport FALCON_LOG_DIR={{falcon_log_dir}}\n\n# Where pid files are stored. Defatult is logs directory under the base install location\nexport FALCON_PID_DIR={{falcon_pid_dir}}\n\n# where the falcon active mq data is stored. Defatult is logs/data directory under the base install location\nexport FALCON_DATA_DIR={{falcon_embeddedmq_data}}\n\n# Where do you want to expand the war file. By Default it is in /server/webapp dir under the base install dir.\n#export FALCON_EXPANDED_WEBAPP_DIR=",
+ "falcon.embeddedmq.data": "/hadoop/falcon/embeddedmq/data",
+ "falcon.embeddedmq": "true",
+ "falcon_store_uri": "file:///hadoop/falcon/store",
+ "supports_hive_dr": "true"
+ },
+ "oozie-env": {
+ "oozie_derby_database": "Derby",
+ "oozie_admin_port": "11001",
+ "oozie_hostname": "abtest-3.c.pramod-thangali.internal",
+ "oozie_pid_dir": "/var/run/oozie",
+ "content": "\n#!/bin/bash\n\nif [ -d \"/usr/lib/bigtop-tomcat\" ]; then\n export OOZIE_CONFIG=${OOZIE_CONFIG:-/etc/oozie/conf}\n export CATALINA_BASE=${CATALINA_BASE:-/var/lib/oozie/oozie-server}\n export CATALINA_TMPDIR=${CATALINA_TMPDIR:-/var/tmp/oozie}\n export OOZIE_CATALINA_HOME=/usr/lib/bigtop-tomcat\nfi\n\n#Set JAVA HOME\nexport JAVA_HOME={{java_home}}\n\nexport JRE_HOME=${JAVA_HOME}\n\n# Set Oozie specific environment variables here.\n\n# Settings for the Embedded Tomcat that runs Oozie\n# Java System properties for Oozie should be specified in this variable\n#\n# export CATALINA_OPTS=\n\n# Oozie configuration file to load from Oozie configuration directory\n#\n# export OOZIE_CONFIG_FILE=oozie-site.xml\n\n# Oozie logs directory\n#\nexport OOZIE_LOG={{oozie_log_dir}}\n\n# Oozie pid directory\n#\nexport CATALINA_PID={{pid_file}}\n\n#Location of the data for oozie\nexport OOZIE_DATA={{oozie_data_dir}}\n\n# Oozie Log4J configuration file to load from Oozie configuratio
n directory\n#\n# export OOZIE_LOG4J_FILE=oozie-log4j.properties\n\n# Reload interval of the Log4J configuration file, in seconds\n#\n# export OOZIE_LOG4J_RELOAD=10\n\n# The port Oozie server runs\n#\nexport OOZIE_HTTP_PORT={{oozie_server_port}}\n\n# The admin port Oozie server runs\n#\nexport OOZIE_ADMIN_PORT={{oozie_server_admin_port}}\n\n# The host name Oozie server runs on\n#\n# export OOZIE_HTTP_HOSTNAME=`hostname -f`\n\n# The base URL for callback URLs to Oozie\n#\n# export OOZIE_BASE_URL=\"http://${OOZIE_HTTP_HOSTNAME}:${OOZIE_HTTP_PORT}/oozie\"\nexport JAVA_LIBRARY_PATH=/usr/lib/hadoop/lib/native/Linux-amd64-64",
+ "oozie_user": "oozie",
+ "oozie_admin_users": "{oozie_user}, {oozie_user}-admin",
+ "oozie_database": "New Derby Database",
+ "oozie_data_dir": "/hadoop/oozie/data",
+ "oozie_log_dir": "/var/log/oozie",
+ "oozie_tmp_dir": "/var/tmp/oozie"
+ },
+ "webhcat-env": {
+ "content": "\n# The file containing the running pid\nPID_FILE={{pid_file}}\n\nTEMPLETON_LOG_DIR={{templeton_log_dir}}/\n\n\nWEBHCAT_LOG_DIR={{templeton_log_dir}}/\n\n# The console error log\nERROR_LOG={{templeton_log_dir}}/webhcat-console-error.log\n\n# The console log\nCONSOLE_LOG={{templeton_log_dir}}/webhcat-console.log\n\n#TEMPLETON_JAR=templeton_jar_name\n\n#HADOOP_PREFIX=hadoop_prefix\n\n#HCAT_PREFIX=hive_prefix\n\n# Set HADOOP_HOME to point to a specific hadoop install directory\nexport HADOOP_HOME=/usr/lib/hadoop"
+ },
+ "hcat-env": {
+ "content": "JAVA_HOME={{java64_home}}\n HCAT_PID_DIR={{hcat_pid_dir}}/\n HCAT_LOG_DIR={{hcat_log_dir}}/\n HCAT_CONF_DIR={{hcat_conf_dir}}\n HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n #DBROOT is the path where the connector jars are downloaded\n DBROOT={{hcat_dbroot}}\n USER={{hcat_user}}\n METASTORE_PORT={{hive_metastore_port}}"
+ },
+ "pig-env": {
+ "content": "\nJAVA_HOME={{java64_home}}\nHADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\nif [ -d \"/usr/lib/tez\" ]; then\n PIG_OPTS=\"$PIG_OPTS -Dmapreduce.framework.name=yarn\"\nfi"
+ },
+ "sqoop-env": {
+ "content": "\n# Set Hadoop-specific environment variables here.\n\n#Set path to where bin/hadoop is available\n#Set path to where bin/hadoop is available\nexport HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop}\n\n#set the path to where bin/hbase is available\nexport HBASE_HOME=${HBASE_HOME:-/usr/lib/hbase}\n\n#Set the path to where bin/hive is available\nexport HIVE_HOME=${HIVE_HOME:-/usr/lib/hive}\n\n#Set the path for where zookeper config dir is\nexport ZOOCFGDIR=${ZOOCFGDIR:-/etc/zookeeper/conf}\n\n# add libthrift in hive to sqoop class path first so hive imports work\nexport SQOOP_USER_CLASSPATH=\"`ls ${HIVE_HOME}/lib/libthrift-*.jar 2> /dev/null`:${SQOOP_USER_CLASSPATH}\"",
+ "sqoop_user": "sqoop"
+ },
+ "hdfs-log4j": {
+ "content": "log4jproperties\nline2"
+ },
+ "yarn-log4j": {
+ "content": "log4jproperties\nline2"
+ },
+ "hbase-log4j": {
+ "content": "log4jproperties\nline2"
+ },
+ "hive-log4j": {
+ "content": "log4jproperties\nline2"
+ },
+ "hive-exec-log4j": {
+ "content": "log4jproperties\nline2"
+ },
+ "webhcat-log4j": {
+ "content": "log4jproperties\nline2"
+ },
+ "zookeeper-log4j": {
+ "content": "log4jproperties\nline2"
+ },
+ "pig-log4j": {
+ "content": "log4jproperties\nline2"
+ },
+ "pig-properties": {
+ "content": "pigproperties\nline2"
+ },
+ "oozie-log4j": {
+ "content": "log4jproperties\nline2"
+ },
+ "flume-conf": {
+ "content": "a1.sources = r1\n a1.sinks = k1\n a1.channels = c1\n # Describe/configure the source\n a1.sources.r1.type = netcat\n a1.sources.r1.bind = localhost\n a1.sources.r1.port = 44444\n \n # Describe the sink\n a1.sinks.k1.type = logger\n \n # Use a channel which buffers events in memory\n a1.channels.c1.type = memory\n a1.channels.c1.capacity = 1000\n a1.channels.c1.transactionCapacity = 100\n \n # Bind the source and sink to the channel\n a1.sources.r1.channels = c1\n a1.sinks.k1.channel = c1\n"
+ },
+ "flume-log4j": {
+ "content": "log4jproperties\nline2"
+ },
+ "ranger-hbase-plugin-properties": {
+ "POLICY_MGR_URL": "{{policymgr_mgr_url}}",
+ "XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS": "900",
+ "XAAUDIT.HDFS.DESTINATION_DIRECTORY": "hdfs://__REPLACE__NAME_NODE_HOST:8020/ranger/audit/%app-type%/%time:yyyyMMdd%",
+ "XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit",
+ "common.name.for.certificate": "-",
+ "XAAUDIT.HDFS.IS_ENABLED": "false",
+ "SQL_CONNECTOR_JAR": "{{sql_connector_jar}}",
+ "XAAUDIT.HDFS.LOCAL_BUFFER_FILE": "%time:yyyyMMdd-HHmm.ss%.log",
+ "ranger-hbase-plugin-enabled": "No",
+ "REPOSITORY_NAME": "{{repo_name}}",
+ "SSL_KEYSTORE_PASSWORD": "myKeyFilePassword",
+ "XAAUDIT.DB.IS_ENABLED": "true",
+ "XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS": "600",
+ "XAAUDIT.SOLR.SOLR_URL": "http://localhost:6083/solr/ranger_audits",
+ "XAAUDIT.DB.DATABASE_NAME": "{{xa_audit_db_name}}",
+ "XAAUDIT.DB.HOSTNAME": "{{xa_db_host}}",
+ "XAAUDIT.SOLR.IS_ENABLED": "false",
+ "SSL_KEYSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-keystore.jks",
+ "XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS": "60",
+ "XAAUDIT.DB.USER_NAME": "{{xa_audit_db_user}}",
+ "policy_user": "ambari-qa",
+ "UPDATE_XAPOLICIES_ON_GRANT_REVOKE": "true",
+ "XAAUDIT.HDFS.DESTINTATION_FILE": "%hostname%-audit.log",
+ "XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS": "86400",
+ "XAAUDIT.DB.PASSWORD": "{{xa_audit_db_password}}",
+ "XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT": "10",
+ "SSL_TRUSTSTORE_PASSWORD": "changeit",
+ "XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit/archive",
+ "REPOSITORY_CONFIG_USERNAME": "hbase",
+ "XAAUDIT.SOLR.MAX_FLUSH_INTERVAL_MS": "1000",
+ "XAAUDIT.DB.FLAVOUR": "{{xa_audit_db_flavor}}",
+ "XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS": "60",
+ "SSL_TRUSTSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-truststore.jks",
+ "REPOSITORY_CONFIG_PASSWORD": "hbase",
+ "XAAUDIT.SOLR.MAX_QUEUE_SIZE": "1"
+ },
+ "ranger-hdfs-plugin-properties": {
+ "XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS": "900",
+ "XAAUDIT.HDFS.DESTINATION_DIRECTORY": "hdfs://__REPLACE__NAME_NODE_HOST:8020/ranger/audit/%app-type%/%time:yyyyMMdd%",
+ "XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit",
+ "common.name.for.certificate": "-",
+ "XAAUDIT.HDFS.IS_ENABLED": "false",
+ "XAAUDIT.HDFS.LOCAL_BUFFER_FILE": "%time:yyyyMMdd-HHmm.ss%.log",
+ "SSL_KEYSTORE_PASSWORD": "myKeyFilePassword",
+ "XAAUDIT.DB.IS_ENABLED": "true",
+ "XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS": "600",
+ "XAAUDIT.SOLR.SOLR_URL": "http://localhost:6083/solr/ranger_audits",
+ "XAAUDIT.SOLR.IS_ENABLED": "false",
+ "hadoop.rpc.protection": "-",
+ "ranger-hdfs-plugin-enabled": "No",
+ "SSL_KEYSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-keystore.jks",
+ "XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS": "60",
+ "policy_user": "ambari-qa",
+ "XAAUDIT.HDFS.DESTINTATION_FILE": "%hostname%-audit.log",
+ "XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS": "86400",
+ "XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT": "10",
+ "SSL_TRUSTSTORE_PASSWORD": "changeit",
+ "XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit/archive",
+ "REPOSITORY_CONFIG_USERNAME": "hadoop",
+ "XAAUDIT.SOLR.MAX_FLUSH_INTERVAL_MS": "1000",
+ "XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS": "60",
+ "SSL_TRUSTSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-truststore.jks",
+ "REPOSITORY_CONFIG_PASSWORD": "hadoop",
+ "XAAUDIT.SOLR.MAX_QUEUE_SIZE": "1"
+ },
+ "ams-hbase-env": {
+ "hbase_pid_dir": "/var/run/ambari-metrics-collector/",
+ "regionserver_xmn_size": "256m",
+ "max_open_files_limit": "32768",
+ "hbase_master_maxperm_size": "128m",
+ "hbase_regionserver_xmn_ratio": "0.2",
+ "hbase_master_heapsize": "512m",
+ "hbase_regionserver_heapsize": "512m",
+ "hbase_log_dir": "/var/log/ambari-metrics-collector",
+ "hbase_master_xmn_size": "256m",
+ "content": "\n",
+ "hbase_regionserver_shutdown_timeout": "30"
+ },
+ "ams-log4j": {
+ "content": "\n"
+ },
+ "ams-hbase-site": {
+ "hbase.master.info.bindAddress": "0.0.0.0",
+ "hbase.zookeeper.property.dataDir": "${hbase.tmp.dir}/zookeeper",
+ "hbase.master.wait.on.regionservers.mintostart": "1",
+ "hbase.replication": "false",
+ "hbase.regionserver.global.memstore.lowerLimit": "0.3",
+ "hbase.hregion.memstore.block.multiplier": "4",
+ "hbase.hregion.memstore.flush.size": "134217728",
+ "hbase.rootdir": "hdfs://localhost:8020/apps/hbase/data",
+ "hbase.zookeeper.property.clientPort": "61181",
+ "phoenix.spool.directory": "${hbase.tmp.dir}/phoenix-spool",
+ "hbase.client.scanner.timeout.period": "900000",
+ "phoenix.groupby.maxCacheSize": "307200000",
+ "hbase.snapshot.enabled": "false",
+ "hbase.regionserver.global.memstore.upperLimit": "0.35",
+ "hbase_master_xmn_size": "128m",
+ "phoenix.query.spoolThresholdBytes": "12582912",
+ "zookeeper.session.timeout": "120000",
+ "hbase.tmp.dir": "/var/lib/ambari-metrics-collector/hbase-tmp",
+ "hfile.block.cache.size": "0.3",
+ "hbase.regionserver.port": "61320",
+ "hbase.regionserver.thread.compaction.small": "3",
+ "hbase.master.info.port": "61310",
+ "hbase.hregion.majorcompaction": "0",
+ "phoenix.query.maxGlobalMemoryPercentage": "15",
+ "hbase.zookeeper.quorum": "{{zookeeper_quorum_hosts}}",
+ "hbase.regionserver.info.port": "61330",
+ "hbase.hstore.blockingStoreFiles": "200",
+ "hbase.master.port": "61300",
+ "hbase.zookeeper.leaderport": "61388",
+ "hbase.regionserver.thread.compaction.large": "2",
+ "phoenix.query.timeoutMs": "1200000",
+ "hbase.local.dir": "${hbase.tmp.dir}/local",
+ "hbase.cluster.distributed": "true",
+ "zookeeper.session.timeout.localHBaseCluster": "20000",
+ "hbase.client.scanner.caching": "10000",
+ "phoenix.sequence.saltBuckets": "2",
+ "hbase.hstore.flusher.count": "2",
+ "hbase.zookeeper.peerport": "61288"
+ },
+ "ams-env": {
+ "ambari_metrics_user": "ams",
+ "metrics_monitor_log_dir": "/var/log/ambari-metrics-monitor",
+ "metrics_collector_log_dir": "/var/log/ambari-metrics-collector",
+ "metrics_monitor_pid_dir": "/var/run/ambari-metrics-monitor",
+ "content": "\n",
+ "metrics_collector_pid_dir": "/var/run/ambari-metrics-collector",
+ "metrics_collector_heapsize": "512m"
+ },
+ "ams-hbase-policy": {
+ "security.masterregion.protocol.acl": "*",
+ "security.admin.protocol.acl": "*",
+ "security.client.protocol.acl": "*"
+ },
+ "ams-hbase-log4j": {
+ "content": "\n"
+ },
+ "ams-site": {
+ "timeline.metrics.service.http.policy": "HTTPS_ONLY",
+ "timeline.metrics.host.aggregator.minute.ttl": "604800",
+ "timeline.metrics.cluster.aggregator.daily.checkpointCutOffMultiplier": "1",
+ "timeline.metrics.cluster.aggregator.daily.ttl": "63072000",
+ "timeline.metrics.cluster.aggregator.minute.timeslice.interval": "30",
+ "timeline.metrics.service.resultset.fetchSize": "2000",
+ "timeline.metrics.service.checkpointDelay": "60",
+ "timeline.metrics.host.aggregator.hourly.disabled": "false",
+ "timeline.metrics.cluster.aggregator.daily.interval": "86400",
+ "timeline.metrics.cluster.aggregator.hourly.ttl": "31536000",
+ "timeline.metrics.host.aggregator.daily.disabled": "false",
+ "timeline.metrics.hbase.compression.scheme": "SNAPPY",
+ "timeline.metrics.cluster.aggregator.hourly.interval": "3600",
+ "phoenix.spool.directory": "/tmp",
+ "timeline.metrics.host.aggregator.ttl": "86400",
+ "timeline.metrics.cluster.aggregator.hourly.checkpointCutOffMultiplier": "2",
+ "timeline.metrics.service.cluster.aggregator.appIds": "datanode,nodemanager,hbase",
+ "timeline.metrics.service.webapp.address": "0.0.0.0:6188",
+ "timeline.metrics.cluster.aggregator.hourly.disabled": "false",
+ "timeline.metrics.aggregator.checkpoint.dir": "/var/lib/ambari-metrics-collector/checkpoint",
+ "timeline.metrics.hbase.data.block.encoding": "FAST_DIFF",
+ "timeline.metrics.cluster.aggregator.minute.ttl": "2592000",
+ "timeline.metrics.host.aggregator.minute.disabled": "false",
+ "phoenix.query.maxGlobalMemoryPercentage": "25",
+ "timeline.metrics.service.operation.mode": "distributed",
+ "timeline.metrics.host.aggregator.minute.checkpointCutOffMultiplier": "2",
+ "timeline.metrics.cluster.aggregator.minute.checkpointCutOffMultiplier": "2",
+ "timeline.metrics.host.aggregator.hourly.checkpointCutOffMultiplier": "2",
+ "timeline.metrics.cluster.aggregator.daily.disabled": "false",
+ "timeline.metrics.service.rpc.address": "0.0.0.0:60200",
+ "timeline.metrics.cluster.aggregator.minute.disabled": "false",
+ "timeline.metrics.host.aggregator.hourly.ttl": "2592000",
+ "timeline.metrics.host.aggregator.minute.interval": "120",
+ "timeline.metrics.service.default.result.limit": "5760",
+ "timeline.metrics.host.aggregator.daily.ttl": "31536000",
+ "timeline.metrics.host.aggregator.daily.checkpointCutOffMultiplier": "1",
+ "timeline.metrics.daily.aggregator.minute.interval": "86400",
+ "timeline.metrics.cluster.aggregator.minute.interval": "120",
+ "timeline.metrics.host.aggregator.hourly.interval": "3600"
+ },
+ "ams-grafana-env": {
+ "metrics_grafana_log_dir": "/var/log/ambari-metrics-grafana",
+ "metrics_grafana_pid_dir": "/var/run/ambari-metrics-grafana",
+ "metrics_grafana_data_dir": "/var/lib/ambari-metrics-grafana",
+ "content": "\n",
+ "metrics_grafana_username" : "admin",
+ "metrics_grafana_password" : "admin"
+ },
+ "ams-ssl-server": {
+ "content": "\n"
+ },
+ "ams-ssl-client": {
+ "ssl.client.truststore.location": "/etc/security/clientKeys/all.jks",
+ "ssl.client.truststore.type": "jks",
+ "ssl.client.truststore.password": "bigdata"
+ },
+ "ams-grafana-ini": {
+ "content": "\n"
+ },
+ "hadoop-metrics2.properties": {
+ "content": "# Licensed to the Apache Software Foundation (ASF) under one or more\r\n# contributor license agreements. See the NOTICE file distributed with\r\n# this work for additional information regarding copyright ownership.\r\n# The ASF licenses this file to You under the Apache License, Version 2.0\r\n# (the \"License\"); you may not use this file except in compliance with\r\n# the License. You may obtain a copy of the License at\r\n#\r\n# http:\/\/www.apache.org\/licenses\/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n\r\n# syntax: [prefix].[source|sink|jmx].[instance].[options]\r\n# See package.html for org.apache.hadoop.metrics2 for details\r\n\r\n{% if has_ganglia_se
rver %}\r\n*.period=60\r\n\r\n*.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31\r\n*.sink.ganglia.period=10\r\n\r\n# default for supportsparse is false\r\n*.sink.ganglia.supportsparse=true\r\n\r\n.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both\r\n.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40\r\n\r\n# Hook up to the server\r\nnamenode.sink.ganglia.servers={{ganglia_server_host}}:8661\r\ndatanode.sink.ganglia.servers={{ganglia_server_host}}:8659\r\njobtracker.sink.ganglia.servers={{ganglia_server_host}}:8662\r\ntasktracker.sink.ganglia.servers={{ganglia_server_host}}:8658\r\nmaptask.sink.ganglia.servers={{ganglia_server_host}}:8660\r\nreducetask.sink.ganglia.servers={{ganglia_server_host}}:8660\r\nresourcemanager.sink.ganglia.servers={{ganglia_server_host}}:8664\r\nnodemanager.sink.ganglia.servers={{ganglia_server_host}}:8657\r\nhistoryserver.sink.ganglia.servers={{ganglia_server_host}}:8666\r\njournaln
ode.sink.ganglia.servers={{ganglia_server_host}}:8654\r\nnimbus.sink.ganglia.servers={{ganglia_server_host}}:8649\r\nsupervisor.sink.ganglia.servers={{ganglia_server_host}}:8650\r\n\r\nresourcemanager.sink.ganglia.tagsForPrefix.yarn=Queue\r\n\r\n{% endif %}\r\n\r\n{% if has_metric_collector %}\r\n\r\n*.period={{metrics_collection_period}}\r\n*.sink.timeline.plugin.urls=file:\/\/\/usr\/lib\/ambari-metrics-hadoop-sink\/ambari-metrics-hadoop-sink.jar\r\n*.sink.timeline.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink\r\n*.sink.timeline.period={{metrics_collection_period}}\r\n*.sink.timeline.sendInterval={{metrics_report_interval}}000\r\n*.sink.timeline.slave.host.name={{hostname}}\r\n*.sink.timeline.zookeeper.quorum={{zookeeper_quorum}}\r\n*.sink.timeline.protocol={{metric_collector_protocol}}\r\n*.sink.timeline.port={{metric_collector_port}}\r\n\r\n# HTTPS properties\r\n*.sink.timeline.truststore.path = {{metric_truststore_path}}\r\n*.sink.timeline.truststore.t
ype = {{metric_truststore_type}}\r\n*.sink.timeline.truststore.password = {{metric_truststore_password}}\r\n\r\ndatanode.sink.timeline.collector={{metric_collector_hosts}}\r\nnamenode.sink.timeline.collector={{metric_collector_hosts}}\r\nresourcemanager.sink.timeline.collector={{metric_collector_hosts}}\r\nnodemanager.sink.timeline.collector={{metric_collector_hosts}}\r\njobhistoryserver.sink.timeline.collector={{metric_collector_hosts}}\r\njournalnode.sink.timeline.collector={{metric_collector_hosts}}\r\nmaptask.sink.timeline.collector={{metric_collector_hosts}}\r\nreducetask.sink.timeline.collector={{metric_collector_hosts}}\r\napplicationhistoryserver.sink.timeline.collector={{metric_collector_hosts}}\r\n\r\nresourcemanager.sink.timeline.tagsForPrefix.yarn=Queue\r\n\r\n{% if is_nn_client_port_configured %}\r\n# Namenode rpc ports customization\r\nnamenode.sink.timeline.metric.rpc.client.port={{nn_rpc_client_port}}\r\n{% endif %}\r\n{% if is_nn_dn_port_configured %}\r\nnamenode.si
nk.timeline.metric.rpc.datanode.port={{nn_rpc_dn_port}}\r\n{% endif %}\r\n{% if is_nn_healthcheck_port_configured %}\r\nnamenode.sink.timeline.metric.rpc.healthcheck.port={{nn_rpc_healthcheck_port}}\r\n{% endif %}\r\n\r\n{% endif %}"
+ }
+ },
+ "configuration_attributes": {
+ "ams-hbase-env": {},
+ "ams-hbase-security-site": {},
+ "ams-log4j": {},
+ "ams-hbase-site": {},
+ "ams-hbase-policy": {},
+ "ams-hbase-log4j": {},
+ "ams-site": {},
+ "ams-ssl-server": {},
+ "ams-ssl-client": {},
+ "sqoop-site": {},
+ "yarn-site": {
+ "final": {
+ "yarn.nodemanager.disk-health-checker.min-healthy-disks": "true",
+ "yarn.nodemanager.container-executor.class": "true",
+ "yarn.nodemanager.local-dirs": "true"
+ }
+ },
+ "tez-site": {
+ "final": {
+ "tez.am.log.level": "true"
+ }
+ },
+ "capacity-scheduler": {
+ "final": {
+ "yarn.scheduler.capacity.node-locality-delay": "true"
+ }
+ },
+ "mapred-site": {
+ "final": {
+ "mapred.healthChecker.script.path": "true",
+ "mapreduce.jobtracker.staging.root.dir": "true"
+ }
+ },
+ "oozie-site": {
+ "final": {
+ "oozie.service.PurgeService.purge.interval": "true",
+ "oozie.service.CallableQueueService.queue.size": "true"
+ }
+ },
+ "webhcat-site": {
+ "final": {
+ "templeton.pig.path": "true",
+ "templeton.exec.timeout": "true",
+ "templeton.override.enabled": "true"
+ }
+ },
+ "hdfs-site": {
+ "final": {
+ "dfs.web.ugi": "true",
+ "dfs.support.append": "true",
+ "dfs.cluster.administrators": "true"
+ }
+ },
+ "hbase-site": {
+ "final": {
+ "hbase.client.keyvalue.maxsize": "true",
+ "hbase.hstore.compactionThreshold": "true",
+ "hbase.rootdir": "true"
+ }
+ },
+ "core-site": {
+ "final": {
+ "hadoop.proxyuser.hive.groups": "true",
+ "webinterface.private.actions": "true",
+ "hadoop.proxyuser.oozie.hosts": "true"
+ }
+ },
+ "hive-site": {
+ "final": {
+ "javax.jdo.option.ConnectionPassword": "true",
+ "javax.jdo.option.ConnectionDriverName": "true",
+ "hive.optimize.bucketmapjoin.sortedmerge": "true"
+ }
+ },
+ "hiveserver2-site": {
+ "final": {
+ "javax.jdo.option.ConnectionPassword": "true",
+ "javax.jdo.option.ConnectionDriverName": "true",
+ "hive.optimize.bucketmapjoin.sortedmerge": "true"
+ }
+ },
+ "hive-interactive-site": {
+ "final": {
+ "javax.jdo.option.ConnectionPassword": "true",
+ "javax.jdo.option.ConnectionDriverName": "true",
+ "hive.optimize.bucketmapjoin.sortedmerge": "true"
+ }
+ }
+ },
+ "configurationTags": {
+ "ams-hbase-env": {
+ "tag": "version1"
+ },
+ "ams-hbase-security-site": {
+ "tag": "version1"
+ },
+ "ams-hbase-site": {
+ "tag": "version1"
+ },
+ "ams-env": {
+ "tag": "version1"
+ },
+ "ams-site": {
+ "tag": "version1"
+ },
+ "ams-ssl-server": {
+ "tag": "version1"
+ },
+ "ams-ssl-client": {
+ "tag": "version1"
+ },
+ "ams-hbase-policy": {
+ "tag": "version1"
+ },
+ "ams-log4j": {
+ "tag": "version1"
+ },
+ "ams-hbase-log4j": {
+ "tag": "version1"
+ },
+ "capacity-scheduler": {
+ "tag": "version1"
+ },
+ "oozie-site": {
+ "tag": "version1"
+ },
+ "storm-site": {
+ "tag": "version1"
+ },
+ "webhcat-site": {
+ "tag": "version1"
+ },
+ "global": {
+ "tag": "version1"
+ },
+ "mapred-site": {
+ "tag": "version1"
+ },
+ "hdfs-site": {
+ "tag": "version1"
+ },
+ "hbase-site": {
+ "tag": "version1"
+ },
+ "core-site": {
+ "tag": "version1"
+ },
+ "yarn-site": {
+ "tag": "version1"
+ },
+ "hive-site": {
+ "tag": "version1"
+ },
+ "hive-interactive-site": {
+ "tag": "version1"
+ },
+ "hiveserver2-site": {
+ "tag": "version1"
+ },
+ "hdfs-log4j": {
+ "tag": "version1"
+ },
+ "yarn-log4j": {
+ "tag": "version1"
+ },
+ "hbase-log4j": {
+ "tag": "version1"
+ },
+ "hive-log4j": {
+ "tag": "version1"
+ },
+ "hive-exec-log4j": {
+ "tag": "version1"
+ },
+ "zookeeper-log4j": {
+ "tag": "version1"
+ },
+ "oozie-log4j": {
+ "tag": "version1"
+ },
+ "pig-log4j": {
+ "tag": "version1"
+ },
+ "pig-properties": {
+ "tag": "version1"
+ }
+ },
+ "commandId": "7-1",
+ "clusterHostInfo": {
+ "ambari_server_host": [
+ "c6401.ambari.apache.org"
+ ],
+ "snamenode_host": [
+ "c6402.ambari.apache.org"
+ ],
+ "nm_hosts": [
+ "c6402.ambari.apache.org"
+ ],
+ "drpc_server_hosts": [
+ "c6402.ambari.apache.org"
+ ],
+ "slave_hosts": [
+ "c6402.ambari.apache.org"
+ ],
+ "ganglia_server_host": [
+ "c6401.ambari.apache.org"
+ ],
+ "hive_server_host": [
+ "c6402.ambari.apache.org"
+ ],
+ "logviewer_server_hosts": [
+ "c6402.ambari.apache.org"
+ ],
+ "hive_metastore_host": [
+ "c6402.ambari.apache.org"
+ ],
+ "hbase_rs_hosts": [
+ "c6402.ambari.apache.org"
+ ],
+ "webhcat_server_host": [
+ "c6402.ambari.apache.org"
+ ],
+ "zookeeper_hosts": [
+ "c6401.ambari.apache.org",
+ "c6402.ambari.apache.org"
+ ],
+ "supervisor_hosts": [
+ "c6401.ambari.apache.org",
+ "c6402.ambari.apache.org"
+ ],
+ "ganglia_monitor_hosts": [
+ "c6401.ambari.apache.org",
+ "c6402.ambari.apache.org"
+ ],
+ "all_ping_ports": [
+ "8670",
+ "8670"
+ ],
+ "rm_host": [
+ "c6402.ambari.apache.org"
+ ],
+ "all_hosts": [
+ "c6401.ambari.apache.org",
+ "c6402.ambari.apache.org"
+ ],
+ "all_racks": [
+ "/default/rack_01",
+ "/default/rack_02"
+ ],
+ "all_ipv4_ips": [
+ "127.0.0.1",
+ "127.0.0.1"
+ ],
+ "storm_ui_server_hosts": [
+ "c6401.ambari.apache.org"
+ ],
+ "oozie_server": [
+ "c6402.ambari.apache.org"
+ ],
+ "hs_host": [
+ "c6402.ambari.apache.org"
+ ],
+ "nimbus_hosts": [
+ "c6401.ambari.apache.org"
+ ],
+ "namenode_host": [
+ "c6401.ambari.apache.org"
+ ],
+ "hbase_master_hosts": [
+ "c6402.ambari.apache.org"
+ ],
+ "hive_mysql_host": [
+ "c6402.ambari.apache.org"
+ ],
+ "falcon_server_hosts": [
+ "c6402.ambari.apache.org"
+ ],
+ "metrics_collector_hosts": [
+ "c6401.ambari.apache.org"
+ ]
+ }
+}
[02/50] [abbrv] ambari git commit: AMBARI-21696. Spark thrift service
was alerting for connectivity for all newly created clusters. (Mingjie Tang
via stoader)
Posted by ja...@apache.org.
AMBARI-21696. Spark thrift service was alerting for connectivity for all newly created clusters. (Mingjie Tang via stoader)
(cherry picked from commit 43c0e4946709d73bf3009dcfed68eb9820184382)
Change-Id: I99f64f803905584fcc5e14d066eca00027df0e43
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/7f01766a
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/7f01766a
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/7f01766a
Branch: refs/heads/2.5-maint
Commit: 7f01766af7de4366b7c334211ad9d972c4b6d207
Parents: 816ebb5
Author: Mingjie Tang <mt...@hortonworks.com>
Authored: Thu Aug 10 11:36:21 2017 +0200
Committer: stoader@hortonworks.com <je...@hortonworks.com>
Committed: Thu Aug 10 19:56:35 2017 +0000
----------------------------------------------------------------------
.../SPARK/1.2.1/package/scripts/alerts/alert_spark_thrift_port.py | 2 +-
.../2.0.0/package/scripts/alerts/alert_spark2_thrift_port.py | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/7f01766a/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/alerts/alert_spark_thrift_port.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/alerts/alert_spark_thrift_port.py b/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/alerts/alert_spark_thrift_port.py
index 93a2de8..3f80fd9 100644
--- a/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/alerts/alert_spark_thrift_port.py
+++ b/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/alerts/alert_spark_thrift_port.py
@@ -44,7 +44,7 @@ HIVE_SERVER2_PRINCIPAL_KEY = '{{hive-site/hive.server2.authentication.kerberos.p
# The configured Kerberos executable search paths, if any
KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY = '{{kerberos-env/executable_search_paths}}'
-THRIFT_PORT_DEFAULT = 10015
+THRIFT_PORT_DEFAULT = 10001
HIVE_SERVER_TRANSPORT_MODE_DEFAULT = 'binary'
HIVEUSER_DEFAULT = 'hive'
http://git-wip-us.apache.org/repos/asf/ambari/blob/7f01766a/ambari-server/src/main/resources/common-services/SPARK2/2.0.0/package/scripts/alerts/alert_spark2_thrift_port.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SPARK2/2.0.0/package/scripts/alerts/alert_spark2_thrift_port.py b/ambari-server/src/main/resources/common-services/SPARK2/2.0.0/package/scripts/alerts/alert_spark2_thrift_port.py
index 1ce4aca..9e1afea 100644
--- a/ambari-server/src/main/resources/common-services/SPARK2/2.0.0/package/scripts/alerts/alert_spark2_thrift_port.py
+++ b/ambari-server/src/main/resources/common-services/SPARK2/2.0.0/package/scripts/alerts/alert_spark2_thrift_port.py
@@ -45,7 +45,7 @@ HIVE_SERVER2_PRINCIPAL_KEY = '{{hive-site/hive.server2.authentication.kerberos.p
# The configured Kerberos executable search paths, if any
KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY = '{{kerberos-env/executable_search_paths}}'
-THRIFT_PORT_DEFAULT = 10016
+THRIFT_PORT_DEFAULT = 10002
HIVE_SERVER_TRANSPORT_MODE_DEFAULT = 'binary'
HIVEUSER_DEFAULT = 'hive'
[50/50] [abbrv] ambari git commit: Revert "AMBARI-21205 Make
ToggleKerberos and AddDeleteService experimental features (Duc Le via rzang)"
Posted by ja...@apache.org.
Revert "AMBARI-21205 Make ToggleKerberos and AddDeleteService experimental features (Duc Le via rzang)"
This reverts commit d4d145a8b385c944293b8aa64d53bf55d4993853.
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/fe71b222
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/fe71b222
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/fe71b222
Branch: refs/heads/2.5-maint
Commit: fe71b2224b9360be7fdeb735c9e2e7a0115fefbb
Parents: 2453e16
Author: Jaimin Jetly <ja...@hortonworks.com>
Authored: Mon Oct 9 11:33:29 2017 -0700
Committer: Jaimin Jetly <ja...@hortonworks.com>
Committed: Mon Oct 9 11:33:29 2017 -0700
----------------------------------------------------------------------
ambari-web/app/config.js | 4 +--
ambari-web/app/routes/add_service_routes.js | 2 +-
ambari-web/app/routes/main.js | 2 +-
.../app/templates/main/admin/kerberos.hbs | 38 +++++++++-----------
.../main/service/all_services_actions.hbs | 14 ++++----
ambari-web/app/views/main/admin.js | 14 ++++----
.../main/admin/stack_upgrade/services_view.js | 2 +-
ambari-web/app/views/main/menu.js | 14 ++++----
ambari-web/app/views/main/service/item.js | 2 +-
.../admin/stack_upgrade/services_view_test.js | 1 -
10 files changed, 40 insertions(+), 53 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/fe71b222/ambari-web/app/config.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/config.js b/ambari-web/app/config.js
index f5959f0..451416d 100644
--- a/ambari-web/app/config.js
+++ b/ambari-web/app/config.js
@@ -89,9 +89,7 @@ App.supports = {
kerberosStackAdvisor: true,
logCountVizualization: false,
enabledWizardForHostOrderedUpgrade: true,
- manageJournalNode: true,
- enableToggleKerberos: true,
- enableAddDeleteServices: true
+ manageJournalNode: true
};
if (App.enableExperimental) {
http://git-wip-us.apache.org/repos/asf/ambari/blob/fe71b222/ambari-web/app/routes/add_service_routes.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/routes/add_service_routes.js b/ambari-web/app/routes/add_service_routes.js
index ee19bb7..13a1345 100644
--- a/ambari-web/app/routes/add_service_routes.js
+++ b/ambari-web/app/routes/add_service_routes.js
@@ -24,7 +24,7 @@ module.exports = App.WizardRoute.extend({
route: '/service/add',
enter: function (router) {
- if (App.isAuthorized('SERVICE.ADD_DELETE_SERVICES') && App.supports.enableAddDeleteServices) {
+ if (App.isAuthorized('SERVICE.ADD_DELETE_SERVICES')) {
// `getSecurityStatus` call is required to retrieve information related to kerberos type: Manual or automated kerberos
router.get('mainController').isLoading.call(router.get('clusterController'),'isClusterNameLoaded').done(function () {
App.router.get('mainAdminKerberosController').getSecurityStatus().always(function () {
http://git-wip-us.apache.org/repos/asf/ambari/blob/fe71b222/ambari-web/app/routes/main.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/routes/main.js b/ambari-web/app/routes/main.js
index 529c289..02b561d 100644
--- a/ambari-web/app/routes/main.js
+++ b/ambari-web/app/routes/main.js
@@ -403,7 +403,7 @@ module.exports = Em.Route.extend(App.RouterRedirections, {
adminKerberos: Em.Route.extend({
route: '/kerberos',
enter: function (router, transition) {
- if (router.get('loggedIn') && (!App.isAuthorized('CLUSTER.TOGGLE_KERBEROS') || !App.supports.enableToggleKerberos)) {
+ if (router.get('loggedIn') && !App.isAuthorized('CLUSTER.TOGGLE_KERBEROS')) {
router.transitionTo('main.dashboard.index');
}
},
http://git-wip-us.apache.org/repos/asf/ambari/blob/fe71b222/ambari-web/app/templates/main/admin/kerberos.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/main/admin/kerberos.hbs b/ambari-web/app/templates/main/admin/kerberos.hbs
index 4fc97d1..4f35fd5 100644
--- a/ambari-web/app/templates/main/admin/kerberos.hbs
+++ b/ambari-web/app/templates/main/admin/kerberos.hbs
@@ -20,23 +20,21 @@
<div>
<p class="text-success">{{t admin.security.enabled}}
{{#isAuthorized "CLUSTER.TOGGLE_KERBEROS"}}
- {{#if App.supports.enableToggleKerberos}}
- <button class="btn btn-padding btn-warning admin-disable-security-btn" {{bindAttr disabled="isKerberosButtonsDisabled"}} {{action notifySecurityOffPopup target="controller"}}>{{t admin.kerberos.button.disable}} </button>
- {{#unless isManualKerberos}}
- <button class="btn btn-success" id="regenerate-keytabs" {{bindAttr disabled="isKerberosButtonsDisabled"}} {{action regenerateKeytabs target="controller"}}>
- <i class="icon-repeat"></i> {{t admin.kerberos.button.regenerateKeytabs}}</button>
- {{#if App.isCredentialStorePersistent}}
- <button class="btn btn-primary" {{action showManageKDCCredentialsPopup target="controller"}}>{{t admin.kerberos.credentials.store.menu.label}}</button>
- {{/if}}
- {{/unless}}
- <br/>
- {{#unless isEditMode}}
- <a {{action makeConfigsEditable target="controller"}}
- class="pull-right edit-link">
- {{t common.edit}}
- </a>
- {{/unless}}
- {{/if}}
+ <button class="btn btn-padding btn-warning admin-disable-security-btn" {{bindAttr disabled="isKerberosButtonsDisabled"}} {{action notifySecurityOffPopup target="controller"}}>{{t admin.kerberos.button.disable}} </button>
+ {{#unless isManualKerberos}}
+ <button class="btn btn-success" id="regenerate-keytabs" {{bindAttr disabled="isKerberosButtonsDisabled"}} {{action regenerateKeytabs target="controller"}}>
+ <i class="icon-repeat"></i> {{t admin.kerberos.button.regenerateKeytabs}}</button>
+ {{#if App.isCredentialStorePersistent}}
+ <button class="btn btn-primary" {{action showManageKDCCredentialsPopup target="controller"}}>{{t admin.kerberos.credentials.store.menu.label}}</button>
+ {{/if}}
+ {{/unless}}
+ <br/>
+ {{#unless isEditMode}}
+ <a {{action makeConfigsEditable target="controller"}}
+ class="pull-right edit-link">
+ {{t common.edit}}
+ </a>
+ {{/unless}}
{{/isAuthorized}}
</p>
</div>
@@ -54,10 +52,8 @@
<div>
<p class="muted background-text">{{t admin.security.disabled}}
{{#isAuthorized "CLUSTER.TOGGLE_KERBEROS"}}
- {{#if App.supports.enableToggleKerberos}}
- <a class="btn btn-padding btn-success admin-enable-security-btn" {{action checkAndStartKerberosWizard target="controller"}}>{{t admin.kerberos.button.enable}} </a>
- <br/>
- {{/if}}
+ <a class="btn btn-padding btn-success admin-enable-security-btn" {{action checkAndStartKerberosWizard target="controller"}}>{{t admin.kerberos.button.enable}} </a>
+ <br/>
{{/isAuthorized}}
</p>
</div>
http://git-wip-us.apache.org/repos/asf/ambari/blob/fe71b222/ambari-web/app/templates/main/service/all_services_actions.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/main/service/all_services_actions.hbs b/ambari-web/app/templates/main/service/all_services_actions.hbs
index 30901a9..0ca1cd5 100644
--- a/ambari-web/app/templates/main/service/all_services_actions.hbs
+++ b/ambari-web/app/templates/main/service/all_services_actions.hbs
@@ -24,14 +24,12 @@
</a>
<ul class="pull-left dropdown-menu">
{{#isAuthorized "SERVICE.ADD_DELETE_SERVICES"}}
- {{#if App.supports.enableAddDeleteServices}}
- <li {{bindAttr class="view.serviceController.isAllServicesInstalled:disabled :add-service-button"}}>
- <a href="#"
- {{bindAttr class="view.serviceController.isAllServicesInstalled:disabled"}}
- {{action gotoAddService target="view.serviceController"}}>
- <i class="icon-plus icon-white"></i> {{t services.service.add}}</a>
- </li>
- {{/if}}
+ <li {{bindAttr class="view.serviceController.isAllServicesInstalled:disabled :add-service-button"}}>
+ <a href="#"
+ {{bindAttr class="view.serviceController.isAllServicesInstalled:disabled"}}
+ {{action gotoAddService target="view.serviceController"}}>
+ <i class="icon-plus icon-white"></i> {{t services.service.add}}</a>
+ </li>
{{/isAuthorized}}
{{#isAuthorized "SERVICE.START_STOP"}}
<li class="divider"></li>
http://git-wip-us.apache.org/repos/asf/ambari/blob/fe71b222/ambari-web/app/views/main/admin.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/main/admin.js b/ambari-web/app/views/main/admin.js
index 05d0f56..509f380 100644
--- a/ambari-web/app/views/main/admin.js
+++ b/ambari-web/app/views/main/admin.js
@@ -39,14 +39,12 @@ App.MainAdminView = Em.View.extend({
});
}
if (!App.get('isHadoopWindowsStack') && App.isAuthorized('CLUSTER.TOGGLE_KERBEROS') || (App.get('upgradeInProgress') || App.get('upgradeHolding')) ) {
- if (App.supports.enableToggleKerberos) {
- items.push({
- name: 'kerberos',
- url: 'adminKerberos.index',
- label: Em.I18n.t('common.kerberos'),
- disabled: App.get('upgradeInProgress') || App.get('upgradeHolding')
- });
- }
+ items.push({
+ name: 'kerberos',
+ url: 'adminKerberos.index',
+ label: Em.I18n.t('common.kerberos'),
+ disabled: App.get('upgradeInProgress') || App.get('upgradeHolding')
+ });
}
if ((App.isAuthorized('SERVICE.START_STOP, CLUSTER.MODIFY_CONFIGS') && App.isAuthorized('SERVICE.MANAGE_AUTO_START, CLUSTER.MANAGE_AUTO_START')) || (App.get('upgradeInProgress') || App.get('upgradeHolding'))) {
if (App.supports.serviceAutoStart) {
http://git-wip-us.apache.org/repos/asf/ambari/blob/fe71b222/ambari-web/app/views/main/admin/stack_upgrade/services_view.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/main/admin/stack_upgrade/services_view.js b/ambari-web/app/views/main/admin/stack_upgrade/services_view.js
index 25efffe..f566814 100644
--- a/ambari-web/app/views/main/admin/stack_upgrade/services_view.js
+++ b/ambari-web/app/views/main/admin/stack_upgrade/services_view.js
@@ -56,7 +56,7 @@ App.MainAdminStackServicesView = Em.View.extend({
* @param event
*/
goToAddService: function (event) {
- if (!App.isAuthorized('SERVICE.ADD_DELETE_SERVICES') || !App.supports.enableAddDeleteServices) {
+ if (!App.isAuthorized('SERVICE.ADD_DELETE_SERVICES')) {
return;
} else if (event.context == "KERBEROS") {
App.router.get('mainAdminKerberosController').checkAndStartKerberosWizard();
http://git-wip-us.apache.org/repos/asf/ambari/blob/fe71b222/ambari-web/app/views/main/menu.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/main/menu.js b/ambari-web/app/views/main/menu.js
index 06bb282..00b1c38 100644
--- a/ambari-web/app/views/main/menu.js
+++ b/ambari-web/app/views/main/menu.js
@@ -121,14 +121,12 @@ App.MainMenuView = Em.CollectionView.extend({
});
}
if (!App.get('isHadoopWindowsStack') && App.isAuthorized('CLUSTER.TOGGLE_KERBEROS') || (App.get('upgradeInProgress') || App.get('upgradeHolding'))) {
- if (App.supports.enableToggleKerberos) {
- categories.push({
- name: 'kerberos',
- url: 'kerberos/',
- label: Em.I18n.t('common.kerberos'),
- disabled: App.get('upgradeInProgress') || App.get('upgradeHolding')
- });
- }
+ categories.push({
+ name: 'kerberos',
+ url: 'kerberos/',
+ label: Em.I18n.t('common.kerberos'),
+ disabled: App.get('upgradeInProgress') || App.get('upgradeHolding')
+ });
}
if ((App.isAuthorized('SERVICE.START_STOP, CLUSTER.MODIFY_CONFIGS') && App.isAuthorized('SERVICE.MANAGE_AUTO_START, CLUSTER.MANAGE_AUTO_START')) || (App.get('upgradeInProgress') || App.get('upgradeHolding'))) {
if (App.supports.serviceAutoStart) {
http://git-wip-us.apache.org/repos/asf/ambari/blob/fe71b222/ambari-web/app/views/main/service/item.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/main/service/item.js b/ambari-web/app/views/main/service/item.js
index e9b1055..09457f2 100644
--- a/ambari-web/app/views/main/service/item.js
+++ b/ambari-web/app/views/main/service/item.js
@@ -288,7 +288,7 @@ App.MainServiceItemView = Em.View.extend({
options.push(actionMap.DOWNLOAD_CLIENT_CONFIGS);
}
- if (App.isAuthorized("SERVICE.ADD_DELETE_SERVICES") && App.supports.enableAddDeleteServices) {
+ if (App.isAuthorized("SERVICE.ADD_DELETE_SERVICES")) {
options.push(actionMap.DELETE_SERVICE);
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/fe71b222/ambari-web/test/views/main/admin/stack_upgrade/services_view_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/views/main/admin/stack_upgrade/services_view_test.js b/ambari-web/test/views/main/admin/stack_upgrade/services_view_test.js
index da75cf2..70d182c 100644
--- a/ambari-web/test/views/main/admin/stack_upgrade/services_view_test.js
+++ b/ambari-web/test/views/main/admin/stack_upgrade/services_view_test.js
@@ -34,7 +34,6 @@ describe('App.MainAdminStackServicesView', function () {
sinon.stub(App.router, 'get').returns(mock);
sinon.spy(mock, 'checkAndStartKerberosWizard');
isAccessibleMock = sinon.stub(App, 'isAuthorized');
- App.set('supports.enableAddDeleteServices', true);
});
afterEach(function() {
App.get('router').transitionTo.restore();
[30/50] [abbrv] ambari git commit: AMBARI-21769. zeppelin principal
and livy.superusers property do not match on upgraded cluster from Ambari
2.4.2 -and HDP 2.5.5 (rlevas)
Posted by ja...@apache.org.
AMBARI-21769. zeppelin principal and livy.superusers property do not match on upgraded cluster from Ambari 2.4.2 -and HDP 2.5.5 (rlevas)
(cherry picked from commit 0ae0494dcc264dd2bbb9cb84bad4b13fdfa2a9ef)
Change-Id: Ieb349ad3f8c157cebffb87c4f2e7a4518f3803aa
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/a4f77844
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/a4f77844
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/a4f77844
Branch: refs/heads/2.5-maint
Commit: a4f7784458d6ac8cf8ae569b2d656767d2dc5679
Parents: 8e7ec78
Author: Robert Levas <rl...@hortonworks.com>
Authored: Mon Aug 21 17:03:40 2017 -0400
Committer: rlevas@hortonworks.com <je...@hortonworks.com>
Committed: Mon Aug 21 21:21:26 2017 +0000
----------------------------------------------------------------------
.../kerberos/DeconstructedPrincipal.java | 2 +-
.../server/upgrade/UpgradeCatalog252.java | 168 ++++++-
.../stacks/HDP/2.5/services/SPARK/kerberos.json | 3 +-
.../stacks/HDP/2.5/services/stack_advisor.py | 58 ++-
.../stacks/HDP/2.6/services/SPARK/kerberos.json | 3 +-
.../HDP/2.6/services/SPARK2/kerberos.json | 3 +-
.../stacks/HDP/2.6/services/stack_advisor.py | 62 ++-
.../server/upgrade/UpgradeCatalog252Test.java | 198 ++++++++
.../stacks/2.5/common/test_stack_advisor.py | 197 ++++++++
.../stacks/2.6/common/test_stack_advisor.py | 471 +++++++++++++++++++
10 files changed, 1154 insertions(+), 11 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/a4f77844/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/DeconstructedPrincipal.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/DeconstructedPrincipal.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/DeconstructedPrincipal.java
index f5d8156..d0c79ab 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/DeconstructedPrincipal.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/DeconstructedPrincipal.java
@@ -35,7 +35,7 @@ import java.util.regex.Pattern;
* This class will create a DeconstructedPrincipal from a String containing a principal using
* {@link DeconstructedPrincipal#valueOf(String, String)}
*/
-class DeconstructedPrincipal {
+public class DeconstructedPrincipal {
/**
* Regular expression to parse the different principal formats:
* <ul>
http://git-wip-us.apache.org/repos/asf/ambari/blob/a4f77844/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog252.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog252.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog252.java
index ce1c8f1..0b422f8 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog252.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog252.java
@@ -1,4 +1,4 @@
-/**
+/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@@ -20,24 +20,33 @@ package org.apache.ambari.server.upgrade;
import java.sql.SQLException;
import java.util.Arrays;
import java.util.Collection;
+import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
+import java.util.TreeSet;
import org.apache.ambari.server.AmbariException;
import org.apache.ambari.server.controller.AmbariManagementController;
import org.apache.ambari.server.orm.DBAccessor.DBColumnInfo;
+import org.apache.ambari.server.orm.dao.ArtifactDAO;
import org.apache.ambari.server.orm.dao.ClusterDAO;
+import org.apache.ambari.server.orm.entities.ArtifactEntity;
import org.apache.ambari.server.orm.entities.ClusterConfigMappingEntity;
import org.apache.ambari.server.orm.entities.ClusterEntity;
+import org.apache.ambari.server.serveraction.kerberos.DeconstructedPrincipal;
import org.apache.ambari.server.state.Cluster;
import org.apache.ambari.server.state.Clusters;
import org.apache.ambari.server.state.Config;
import org.apache.ambari.server.state.ConfigHelper;
import org.apache.ambari.server.state.PropertyInfo;
import org.apache.ambari.server.state.StackId;
+import org.apache.ambari.server.state.kerberos.AbstractKerberosDescriptorContainer;
+import org.apache.ambari.server.state.kerberos.KerberosConfigurationDescriptor;
+import org.apache.ambari.server.state.kerberos.KerberosDescriptor;
+import org.apache.ambari.server.state.kerberos.KerberosDescriptorFactory;
import org.apache.commons.lang.StringUtils;
import com.google.common.collect.Sets;
@@ -126,6 +135,8 @@ public class UpgradeCatalog252 extends AbstractUpgradeCatalog {
resetStackToolsAndFeatures();
ensureConfigTypesHaveAtLeastOneVersionSelected();
updateMariaDBRedHatSupportHive();
+ updateKerberosDescriptorArtifacts();
+ fixLivySuperusers();
}
/**
@@ -328,4 +339,159 @@ public class UpgradeCatalog252 extends AbstractUpgradeCatalog {
}
}
}
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ protected void updateKerberosDescriptorArtifact(ArtifactDAO artifactDAO, ArtifactEntity artifactEntity) throws AmbariException {
+ if (artifactEntity != null) {
+ Map<String, Object> data = artifactEntity.getArtifactData();
+
+ if (data != null) {
+ final KerberosDescriptor kerberosDescriptor = new KerberosDescriptorFactory().createInstance(data);
+
+ if (kerberosDescriptor != null) {
+ // Find and remove configuration specifications for <code>livy-conf/livy.superusers</code>
+ // in SPARK since this logic has been moved to the relevant stack/service advisors
+ boolean updatedSpark = removeConfigurationSpecification(kerberosDescriptor.getService("SPARK"), "livy-conf", "livy.superusers");
+
+ // Find and remove configuration specifications for <code>livy-conf2/livy.superusers</code>
+ // in SPARK2 since this logic has been moved to the relevant stack/service advisors
+ boolean updatedSpark2 = removeConfigurationSpecification(kerberosDescriptor.getService("SPARK2"), "livy2-conf", "livy.superusers");
+
+ if (updatedSpark || updatedSpark2) {
+ artifactEntity.setArtifactData(kerberosDescriptor.toMap());
+ artifactDAO.merge(artifactEntity);
+ }
+ }
+ }
+ }
+ }
+
+ /**
+ * Fixes the <code>livy.superusers</code> value in <code>livy-conf</code> and
+ * <code>livy2-conf</code>.
+ * <p>
+ * When Kerberos is enabled, the values of <code>livy.superusers</code> in <code>livy-conf</code>
+ * and <code>livy2-conf</code> are potentially incorrect due to an issue with the Spark and Spark2
+ * kerberos.json files. In Ambari 2.5.2, the logic to set <code>livy.superusers</code> has been
+ * moved to the stack advisor and removed from the kerberos.json files. The user-supplied Kerberos
+ * descriptor is fixed in {@link #updateKerberosDescriptorArtifact(ArtifactDAO, ArtifactEntity)}.
+ * <p>
+ * If Zeppelin is installed and Kerberos is enabled, then <code>livy.superusers</code> should be
+ * updated to contain the proper value for the Zeppelin user. If the incorrect value is there and
+ * in the form of <code>zeppelin-clustername</code> then it will be removed.
+ */
+ void fixLivySuperusers() throws AmbariException {
+ Clusters clusters = injector.getInstance(Clusters.class);
+ if (clusters != null) {
+ Map<String, Cluster> clusterMap = clusters.getClusters();
+
+ if (clusterMap != null && !clusterMap.isEmpty()) {
+ for (final Cluster cluster : clusterMap.values()) {
+ Config zeppelinEnvProperties = cluster.getDesiredConfigByType("zeppelin-env");
+ if (zeppelinEnvProperties != null) {
+ Map<String, String> zeppelinProperties = zeppelinEnvProperties.getProperties();
+ if (zeppelinProperties != null) {
+ String zeppelinPrincipal = zeppelinProperties.get("zeppelin.server.kerberos.principal");
+
+ if (!StringUtils.isEmpty(zeppelinPrincipal)) {
+ // Parse the principal name component from the full principal. The default realm of
+ // EXAMPLE.COM is used because we really don't care what the realm is.
+ DeconstructedPrincipal deconstructedPrincipal = DeconstructedPrincipal.valueOf(zeppelinPrincipal, "EXAMPLE.COM");
+ String newZeppelinPrincipalName = deconstructedPrincipal.getPrincipalName();
+ String oldZeppelinPrincipalName = "zeppelin-" + cluster.getClusterName().toLowerCase();
+
+ // Fix livy-conf/livy.supserusers
+ updateListValues(cluster, "livy-conf", "livy.superusers",
+ Collections.singleton(newZeppelinPrincipalName), Collections.singleton(oldZeppelinPrincipalName));
+
+ // Fix livy2-conf/livy.supserusers
+ updateListValues(cluster, "livy2-conf", "livy.superusers",
+ Collections.singleton(newZeppelinPrincipalName), Collections.singleton(oldZeppelinPrincipalName));
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ /**
+ * Updates the contents of a configuration with comma-delimited list of values.
+ * <p>
+ * Items will be added and/or removed as needed. If changes are made to the value, the configuration
+ * is updated in the cluster.
+ *
+ * @param cluster the cluster
+ * @param configType the configuration type
+ * @param propertyName the property name
+ * @param valuesToAdd a set of values to add to the list
+ * @param valuesToRemove a set of values to remove from the list
+ * @throws AmbariException
+ */
+ private void updateListValues(Cluster cluster, String configType, String propertyName, Set<String> valuesToAdd, Set<String> valuesToRemove)
+ throws AmbariException {
+ Config config = cluster.getDesiredConfigByType(configType);
+ if (config != null) {
+ Map<String, String> properties = config.getProperties();
+ if (properties != null) {
+ String existingValue = properties.get(propertyName);
+ String newValue = null;
+
+ if (StringUtils.isEmpty(existingValue)) {
+ if ((valuesToAdd != null) && !valuesToAdd.isEmpty()) {
+ newValue = StringUtils.join(valuesToAdd, ',');
+ }
+ } else {
+ Set<String> valueSet = new TreeSet<>(Arrays.asList(existingValue.split("\\s*,\\s*")));
+
+ boolean removedValues = false;
+ if (valuesToRemove != null) {
+ removedValues = valueSet.removeAll(valuesToRemove);
+ }
+
+ boolean addedValues = false;
+ if (valuesToAdd != null) {
+ addedValues = valueSet.addAll(valuesToAdd);
+ }
+
+ if (removedValues || addedValues) {
+ newValue = StringUtils.join(valueSet, ',');
+ }
+ }
+
+ if (!StringUtils.isEmpty(newValue)) {
+ updateConfigurationPropertiesForCluster(cluster, configType, Collections.singletonMap(propertyName, newValue), true, true);
+ }
+ }
+ }
+ }
+
+ /**
+ * Given an {@link AbstractKerberosDescriptorContainer}, attempts to remove the specified property
+ * (<code>configType/propertyName</code> from it.
+ *
+ * @param kerberosDescriptorContainer the container to update
+ * @param configType the configuration type
+ * @param propertyName the property name
+ * @return true if changes where made to the container; false otherwise
+ */
+ private boolean removeConfigurationSpecification(AbstractKerberosDescriptorContainer kerberosDescriptorContainer, String configType, String propertyName) {
+ boolean updated = false;
+ if (kerberosDescriptorContainer != null) {
+ KerberosConfigurationDescriptor configurationDescriptor = kerberosDescriptorContainer.getConfiguration(configType);
+ if (configurationDescriptor != null) {
+ Map<String, String> properties = configurationDescriptor.getProperties();
+ if ((properties != null) && properties.containsKey(propertyName)) {
+ properties.remove(propertyName);
+ LOG.info("Removed {}/{} from the descriptor named {}", configType, propertyName, kerberosDescriptorContainer.getName());
+ updated = true;
+ }
+ }
+ }
+
+ return updated;
+ }
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/a4f77844/ambari-server/src/main/resources/stacks/HDP/2.5/services/SPARK/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/services/SPARK/kerberos.json b/ambari-server/src/main/resources/stacks/HDP/2.5/services/SPARK/kerberos.json
index aa81edb..54ddf02 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/services/SPARK/kerberos.json
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/services/SPARK/kerberos.json
@@ -43,8 +43,7 @@
{
"livy-conf": {
"livy.server.auth.type": "kerberos",
- "livy.impersonation.enabled": "true",
- "livy.superusers": "zeppelin${principal_suffix}"
+ "livy.impersonation.enabled": "true"
}
},
{
http://git-wip-us.apache.org/repos/asf/ambari/blob/a4f77844/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py
index 855d33ad..d4404617 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py
@@ -443,11 +443,22 @@ class HDP25StackAdvisor(HDP24StackAdvisor):
"RANGER_KMS": self.recommendRangerKMSConfigurations,
"STORM": self.recommendStormConfigurations,
"OOZIE": self.recommendOozieConfigurations,
- "SPARK2": self.recommendSpark2Configurations
+ "SPARK": self.recommendSparkConfigurations,
+ "SPARK2": self.recommendSpark2Configurations,
+ "ZEPPELIN": self.recommendZeppelinConfigurations
}
parentRecommendConfDict.update(childRecommendConfDict)
return parentRecommendConfDict
+ def recommendSparkConfigurations(self, configurations, clusterData, services, hosts):
+ """
+ :type configurations dict
+ :type clusterData dict
+ :type services dict
+ :type hosts dict
+ """
+ self.__addZeppelinToLivySuperUsers(configurations, services)
+
def recommendSpark2Configurations(self, configurations, clusterData, services, hosts):
"""
:type configurations dict
@@ -520,6 +531,15 @@ class HDP25StackAdvisor(HDP24StackAdvisor):
putStormSiteProperty('storm.cluster.metrics.consumer.register', 'null')
putStormSiteProperty('topology.metrics.consumer.register', 'null')
+ def recommendZeppelinConfigurations(self, configurations, clusterData, services, hosts):
+ """
+ :type configurations dict
+ :type clusterData dict
+ :type services dict
+ :type hosts dict
+ """
+ self.__addZeppelinToLivySuperUsers(configurations, services)
+
def constructAtlasRestAddress(self, services, hosts):
"""
:param services: Collection of services in the cluster with configs
@@ -2158,6 +2178,42 @@ yarn.scheduler.capacity.root.{0}.maximum-am-resource-percent=1""".format(llap_qu
return self.toConfigurationValidationProblems(validationItems, "ranger-tagsync-site")
+ def __addZeppelinToLivySuperUsers(self, configurations, services):
+ """
+ If Kerberos is enabled AND Zeppelin is installed and Spark Livy Server is installed, then set
+ livy-conf/livy.superusers to contain the Zeppelin principal name from
+ zeppelin-env/zeppelin.server.kerberos.principal
+
+ :param configurations:
+ :param services:
+ """
+ if self.isSecurityEnabled(services):
+ zeppelin_env = self.getServicesSiteProperties(services, "zeppelin-env")
+
+ if zeppelin_env and 'zeppelin.server.kerberos.principal' in zeppelin_env:
+ zeppelin_principal = zeppelin_env['zeppelin.server.kerberos.principal']
+ zeppelin_user = zeppelin_principal.split('@')[0] if zeppelin_principal else None
+
+ if zeppelin_user:
+ livy_conf = self.getServicesSiteProperties(services, 'livy-conf')
+
+ if livy_conf:
+ superusers = livy_conf['livy.superusers'] if livy_conf and 'livy.superusers' in livy_conf else None
+
+ # add the Zeppelin user to the set of users
+ if superusers:
+ _superusers = superusers.split(',')
+ _superusers = [x.strip() for x in _superusers]
+ _superusers = filter(None, _superusers) # Removes empty string elements from array
+ else:
+ _superusers = []
+
+ if zeppelin_user not in _superusers:
+ _superusers.append(zeppelin_user)
+
+ putLivyProperty = self.putProperty(configurations, 'livy-conf', services)
+ putLivyProperty('livy.superusers', ','.join(_superusers))
+
def __isServiceDeployed(self, services, serviceName):
servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
return serviceName in servicesList
http://git-wip-us.apache.org/repos/asf/ambari/blob/a4f77844/ambari-server/src/main/resources/stacks/HDP/2.6/services/SPARK/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/SPARK/kerberos.json b/ambari-server/src/main/resources/stacks/HDP/2.6/services/SPARK/kerberos.json
index 872f78b..c68d627 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/SPARK/kerberos.json
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/SPARK/kerberos.json
@@ -43,8 +43,7 @@
{
"livy-conf": {
"livy.server.auth.type": "kerberos",
- "livy.impersonation.enabled": "true",
- "livy.superusers": "zeppelin${principal_suffix}"
+ "livy.impersonation.enabled": "true"
}
},
{
http://git-wip-us.apache.org/repos/asf/ambari/blob/a4f77844/ambari-server/src/main/resources/stacks/HDP/2.6/services/SPARK2/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/SPARK2/kerberos.json b/ambari-server/src/main/resources/stacks/HDP/2.6/services/SPARK2/kerberos.json
index 0f99bbb..8b6b41d 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/SPARK2/kerberos.json
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/SPARK2/kerberos.json
@@ -43,8 +43,7 @@
{
"livy2-conf": {
"livy.server.auth.type": "kerberos",
- "livy.impersonation.enabled": "true",
- "livy.superusers": "zeppelin${principal_suffix}"
+ "livy.impersonation.enabled": "true"
}
},
{
http://git-wip-us.apache.org/repos/asf/ambari/blob/a4f77844/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py
index 577e4f5..23ee0ca 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py
@@ -42,7 +42,9 @@ class HDP26StackAdvisor(HDP25StackAdvisor):
"HBASE": self.recommendHBASEConfigurations,
"YARN": self.recommendYARNConfigurations,
"KAFKA": self.recommendKAFKAConfigurations,
- "STORM": self.recommendSTORMConfigurations
+ "STORM": self.recommendSTORMConfigurations,
+ "SPARK2": self.recommendSPARK2Configurations,
+ "ZEPPELIN": self.recommendZEPPELINConfigurations
}
parentRecommendConfDict.update(childRecommendConfDict)
return parentRecommendConfDict
@@ -94,7 +96,27 @@ class HDP26StackAdvisor(HDP25StackAdvisor):
else:
putStormSiteProperty("nimbus.credential.renewers.classes", "['org.apache.storm.hdfs.security.AutoHDFS', 'org.apache.storm.hbase.security.AutoHBase', 'org.apache.storm.hive.security.AutoHive']")
putStormSiteProperty("nimbus.credential.renewers.freq.secs", "82800")
- pass
+ pass
+
+ def recommendSPARK2Configurations(self, configurations, clusterData, services, hosts):
+ """
+ :type configurations dict
+ :type clusterData dict
+ :type services dict
+ :type hosts dict
+ """
+ super(HDP26StackAdvisor, self).recommendSpark2Configurations(configurations, clusterData, services, hosts)
+ self.__addZeppelinToLivy2SuperUsers(configurations, services)
+
+ def recommendZEPPELINConfigurations(self, configurations, clusterData, services, hosts):
+ """
+ :type configurations dict
+ :type clusterData dict
+ :type services dict
+ :type hosts dict
+ """
+ super(HDP26StackAdvisor, self).recommendZeppelinConfigurations(configurations, clusterData, services, hosts)
+ self.__addZeppelinToLivy2SuperUsers(configurations, services)
def recommendAtlasConfigurations(self, configurations, clusterData, services, hosts):
super(HDP26StackAdvisor, self).recommendAtlasConfigurations(configurations, clusterData, services, hosts)
@@ -648,3 +670,39 @@ class HDP26StackAdvisor(HDP25StackAdvisor):
putRangerKafkaPluginProperty("REPOSITORY_CONFIG_USERNAME",kafka_user)
else:
Logger.info("Not setting Kafka Repo user for Ranger.")
+
+ def __addZeppelinToLivy2SuperUsers(self, configurations, services):
+ """
+ If Kerberos is enabled AND Zeppelin is installed AND Spark2 Livy Server is installed, then set
+ livy2-conf/livy.superusers to contain the Zeppelin principal name from
+ zeppelin-env/zeppelin.server.kerberos.principal
+
+ :param configurations:
+ :param services:
+ """
+ if self.isSecurityEnabled(services):
+ zeppelin_env = self.getServicesSiteProperties(services, "zeppelin-env")
+
+ if zeppelin_env and 'zeppelin.server.kerberos.principal' in zeppelin_env:
+ zeppelin_principal = zeppelin_env['zeppelin.server.kerberos.principal']
+ zeppelin_user = zeppelin_principal.split('@')[0] if zeppelin_principal else None
+
+ if zeppelin_user:
+ livy2_conf = self.getServicesSiteProperties(services, 'livy2-conf')
+
+ if livy2_conf:
+ superusers = livy2_conf['livy.superusers'] if livy2_conf and 'livy.superusers' in livy2_conf else None
+
+ # add the Zeppelin user to the set of users
+ if superusers:
+ _superusers = superusers.split(',')
+ _superusers = [x.strip() for x in _superusers]
+ _superusers = filter(None, _superusers) # Removes empty string elements from array
+ else:
+ _superusers = []
+
+ if zeppelin_user not in _superusers:
+ _superusers.append(zeppelin_user)
+
+ putLivy2ConfProperty = self.putProperty(configurations, 'livy2-conf', services)
+ putLivy2ConfProperty('livy.superusers', ','.join(_superusers))
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/ambari/blob/a4f77844/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog252Test.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog252Test.java b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog252Test.java
index b71b335..72956eb 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog252Test.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog252Test.java
@@ -19,30 +19,43 @@
package org.apache.ambari.server.upgrade;
import static org.easymock.EasyMock.anyObject;
+import static org.easymock.EasyMock.anyString;
import static org.easymock.EasyMock.capture;
+import static org.easymock.EasyMock.createMock;
import static org.easymock.EasyMock.eq;
import static org.easymock.EasyMock.expect;
+import static org.easymock.EasyMock.expectLastCall;
import static org.easymock.EasyMock.newCapture;
import static org.easymock.EasyMock.replay;
import static org.easymock.EasyMock.reset;
import static org.easymock.EasyMock.verify;
+import java.lang.reflect.Type;
import java.sql.Connection;
import java.sql.ResultSet;
import java.sql.Statement;
+import java.util.Collections;
+import java.util.Map;
+import java.util.Set;
import javax.persistence.EntityManager;
+import org.apache.ambari.server.AmbariException;
import org.apache.ambari.server.actionmanager.ActionManager;
import org.apache.ambari.server.configuration.Configuration;
+import org.apache.ambari.server.controller.AmbariManagementController;
import org.apache.ambari.server.controller.KerberosHelper;
import org.apache.ambari.server.controller.MaintenanceStateHelper;
import org.apache.ambari.server.orm.DBAccessor;
import org.apache.ambari.server.orm.DBAccessor.DBColumnInfo;
+import org.apache.ambari.server.orm.dao.ArtifactDAO;
+import org.apache.ambari.server.orm.entities.ArtifactEntity;
import org.apache.ambari.server.state.Cluster;
import org.apache.ambari.server.state.Clusters;
import org.apache.ambari.server.state.Config;
import org.apache.ambari.server.state.Service;
+import org.apache.ambari.server.state.kerberos.KerberosDescriptor;
+import org.apache.ambari.server.state.kerberos.KerberosDescriptorFactory;
import org.apache.ambari.server.state.stack.OsFamily;
import org.easymock.Capture;
import org.easymock.EasyMockRunner;
@@ -55,6 +68,7 @@ import org.junit.Test;
import org.junit.runner.RunWith;
import com.google.gson.Gson;
+import com.google.gson.reflect.TypeToken;
import com.google.inject.Binder;
import com.google.inject.Guice;
import com.google.inject.Injector;
@@ -163,4 +177,188 @@ public class UpgradeCatalog252Test {
Assert.assertEquals(0, captured.getDefaultValue());
Assert.assertEquals(Short.class, captured.getType());
}
+
+ @Test
+ public void testFixLivySuperUsers() throws AmbariException {
+
+ final Clusters clusters = createMock(Clusters.class);
+ final Cluster cluster = createMock(Cluster.class);
+ final Config zeppelinEnv = createMock(Config.class);
+ final Config livyConf = createMock(Config.class);
+ final Config livyConfNew = createMock(Config.class);
+ final Config livy2Conf = createMock(Config.class);
+ final Config livy2ConfNew = createMock(Config.class);
+ final AmbariManagementController controller = createMock(AmbariManagementController.class);
+
+ Capture<? extends Map<String, String>> captureLivyConfProperties = newCapture();
+ Capture<? extends Map<String, String>> captureLivy2ConfProperties = newCapture();
+
+ Module module = new Module() {
+ @Override
+ public void configure(Binder binder) {
+ binder.bind(DBAccessor.class).toInstance(dbAccessor);
+ binder.bind(OsFamily.class).toInstance(osFamily);
+ binder.bind(EntityManager.class).toInstance(entityManager);
+ binder.bind(Configuration.class).toInstance(configuration);
+ binder.bind(Clusters.class).toInstance(clusters);
+ binder.bind(AmbariManagementController.class).toInstance(controller);
+ }
+ };
+
+ expect(clusters.getClusters()).andReturn(Collections.singletonMap("c1", cluster)).once();
+
+ expect(cluster.getClusterName()).andReturn("c1").atLeastOnce();
+ expect(cluster.getDesiredConfigByType("zeppelin-env")).andReturn(zeppelinEnv).atLeastOnce();
+ expect(cluster.getServiceByConfigType("livy-conf")).andReturn("SPARK").atLeastOnce();
+ expect(cluster.getDesiredConfigByType("livy-conf")).andReturn(livyConf).atLeastOnce();
+ expect(cluster.getConfigsByType("livy-conf")).andReturn(Collections.singletonMap("tag1", livyConf)).atLeastOnce();
+ expect(cluster.getConfig(eq("livy-conf"), anyString())).andReturn(livyConfNew).atLeastOnce();
+ expect(cluster.getServiceByConfigType("livy2-conf")).andReturn("SPARK2").atLeastOnce();
+ expect(cluster.getDesiredConfigByType("livy2-conf")).andReturn(livy2Conf).atLeastOnce();
+ expect(cluster.getConfigsByType("livy2-conf")).andReturn(Collections.singletonMap("tag1", livy2Conf)).atLeastOnce();
+ expect(cluster.getConfig(eq("livy2-conf"), anyString())).andReturn(livy2ConfNew).atLeastOnce();
+ expect(cluster.addDesiredConfig(eq("ambari-upgrade"), anyObject(Set.class))).andReturn(null).atLeastOnce();
+
+ expect(zeppelinEnv.getProperties()).andReturn(Collections.singletonMap("zeppelin.server.kerberos.principal", "zeppelin_user@AMBARI.LOCAL")).once();
+
+ expect(livyConf.getProperties()).andReturn(Collections.singletonMap("livy.superusers", "zeppelin-c1, some_user")).atLeastOnce();
+ expect(livyConf.getPropertiesAttributes()).andReturn(Collections.<String, Map<String, String>>emptyMap()).atLeastOnce();
+ expect(livy2Conf.getProperties()).andReturn(Collections.<String, String>emptyMap()).atLeastOnce();
+ expect(livy2Conf.getPropertiesAttributes()).andReturn(Collections.<String, Map<String, String>>emptyMap()).atLeastOnce();
+
+ expect(controller.createConfig(eq(cluster), eq("livy-conf"), capture(captureLivyConfProperties), anyString(), anyObject(Map.class)))
+ .andReturn(livyConfNew)
+ .once();
+ expect(controller.createConfig(eq(cluster), eq("livy2-conf"), capture(captureLivy2ConfProperties), anyString(), anyObject(Map.class)))
+ .andReturn(livy2ConfNew)
+ .once();
+
+ replay(clusters, cluster, zeppelinEnv, livy2Conf, livyConf, controller);
+
+ Injector injector = Guice.createInjector(module);
+ UpgradeCatalog252 upgradeCatalog252 = injector.getInstance(UpgradeCatalog252.class);
+ upgradeCatalog252.fixLivySuperusers();
+
+ verify(clusters, cluster, zeppelinEnv, livy2Conf, livyConf, controller);
+
+ Assert.assertTrue(captureLivyConfProperties.hasCaptured());
+ Assert.assertEquals("some_user,zeppelin_user", captureLivyConfProperties.getValue().get("livy.superusers"));
+
+ Assert.assertTrue(captureLivy2ConfProperties.hasCaptured());
+ Assert.assertEquals("zeppelin_user", captureLivy2ConfProperties.getValue().get("livy.superusers"));
+ }
+
+ @Test
+ public void testUpdateKerberosDescriptorArtifact() throws AmbariException {
+ String initialJson = "{" +
+ " \"services\": [" +
+ " {" +
+ " \"name\": \"SPARK\"," +
+ " \"configurations\": [" +
+ " {" +
+ " \"livy-conf\": {" +
+ " \"property1\": \"true\"," +
+ " \"property2\": \"true\"," +
+ " \"livy.superusers\": \"somevalue\"" +
+ " }" +
+ " }," +
+ " {" +
+ " \"some-env\": {" +
+ " \"groups\": \"${hadoop-env/proxyuser_group}\"," +
+ " \"hosts\": \"${clusterHostInfo/existing_service_master_hosts}\"" +
+ " }" +
+ " }" +
+ " ]" +
+ " }," +
+ " {" +
+ " \"name\": \"SPARK2\"," +
+ " \"configurations\": [" +
+ " {" +
+ " \"livy2-conf\": {" +
+ " \"property1\": \"true\"," +
+ " \"property2\": \"true\"," +
+ " \"livy.superusers\": \"somevalue\"" +
+ " }" +
+ " }," +
+ " {" +
+ " \"some2-env\": {" +
+ " \"groups\": \"${hadoop-env/proxyuser_group}\"," +
+ " \"hosts\": \"${clusterHostInfo/existing_service_master_hosts}\"" +
+ " }" +
+ " }" +
+ " ]" +
+ " }," +
+ " {" +
+ " \"name\": \"NOT_SPARK\"," +
+ " \"configurations\": [" +
+ " {" +
+ " \"not-livy-conf\": {" +
+ " \"property1\": \"true\"," +
+ " \"property2\": \"true\"," +
+ " \"livy.superusers\": \"somevalue\"" +
+ " }" +
+ " }," +
+ " {" +
+ " \"some2-env\": {" +
+ " \"groups\": \"${hadoop-env/proxyuser_group}\"," +
+ " \"hosts\": \"${clusterHostInfo/existing_service_master_hosts}\"" +
+ " }" +
+ " }" +
+ " ]" +
+ " }" +
+ " ]" +
+ "}";
+
+
+ Type type = new TypeToken<Map<String, Object>>() {
+ }.getType();
+ Map<String,Object> map = new Gson().fromJson(initialJson, type);
+
+ Module module = new Module() {
+ @Override
+ public void configure(Binder binder) {
+ binder.bind(DBAccessor.class).toInstance(dbAccessor);
+ binder.bind(OsFamily.class).toInstance(osFamily);
+ binder.bind(EntityManager.class).toInstance(entityManager);
+ binder.bind(Configuration.class).toInstance(configuration);
+ }
+ };
+
+ Capture<? extends Map<String, Object>> captureMap = newCapture();
+ ArtifactEntity artifactEntity = createMock(ArtifactEntity.class);
+
+ expect(artifactEntity.getArtifactData()).andReturn(map).once();
+ artifactEntity.setArtifactData(capture(captureMap));
+ expectLastCall().once();
+
+ ArtifactDAO artifactDAO = createMock(ArtifactDAO.class);
+ expect(artifactDAO.merge(artifactEntity)).andReturn(artifactEntity).once();
+
+ replay(artifactDAO, artifactEntity);
+
+ Injector injector = Guice.createInjector(module);
+ UpgradeCatalog252 upgradeCatalog252 = injector.getInstance(UpgradeCatalog252.class);
+ upgradeCatalog252.updateKerberosDescriptorArtifact(artifactDAO, artifactEntity);
+
+ verify(artifactDAO, artifactEntity);
+
+ Assert.assertTrue(captureMap.hasCaptured());
+
+ KerberosDescriptor result = new KerberosDescriptorFactory().createInstance(captureMap.getValue());
+
+ Assert.assertNotNull(result.getService("SPARK"));
+ Assert.assertNotNull(result.getService("SPARK").getConfiguration("livy-conf"));
+ Assert.assertNotNull(result.getService("SPARK").getConfiguration("livy-conf").getProperties());
+ Assert.assertFalse(result.getService("SPARK").getConfiguration("livy-conf").getProperties().containsKey("livy.superusers"));
+
+ Assert.assertNotNull(result.getService("SPARK2"));
+ Assert.assertNotNull(result.getService("SPARK2").getConfiguration("livy2-conf"));
+ Assert.assertNotNull(result.getService("SPARK2").getConfiguration("livy2-conf").getProperties());
+ Assert.assertFalse(result.getService("SPARK2").getConfiguration("livy2-conf").getProperties().containsKey("livy.superusers"));
+
+ Assert.assertNotNull(result.getService("NOT_SPARK"));
+ Assert.assertNotNull(result.getService("NOT_SPARK").getConfiguration("not-livy-conf"));
+ Assert.assertNotNull(result.getService("NOT_SPARK").getConfiguration("not-livy-conf").getProperties());
+ Assert.assertTrue(result.getService("NOT_SPARK").getConfiguration("not-livy-conf").getProperties().containsKey("livy.superusers"));
+ }
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/a4f77844/ambari-server/src/test/python/stacks/2.5/common/test_stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.5/common/test_stack_advisor.py b/ambari-server/src/test/python/stacks/2.5/common/test_stack_advisor.py
index 9f75f9d..a04b4cd 100644
--- a/ambari-server/src/test/python/stacks/2.5/common/test_stack_advisor.py
+++ b/ambari-server/src/test/python/stacks/2.5/common/test_stack_advisor.py
@@ -509,6 +509,203 @@ class TestHDP25StackAdvisor(TestCase):
self.expected_visibility_false = {'visible': 'false'}
self.expected_visibility_true = {'visible': 'true'}
+ def test_recommendSPARKConfigurations_SecurityEnabledZeppelinInstalled(self):
+ configurations = {
+ "cluster-env": {
+ "properties": {
+ "security_enabled": "true",
+ }
+ },
+ "livy-conf": {
+ "properties": {
+ "livy.property1": "value1"
+ }
+ },
+ "zeppelin-env": {
+ "properties": {
+ "zeppelin.server.kerberos.principal": "zeppelin_user@REALM"
+ }
+ }
+ }
+ services = {"configurations": configurations}
+ services['services'] = [
+ {
+ "StackServices": {
+ "service_name": "SPARK"
+ },
+ }
+ ]
+ clusterData = {
+ "cpu": 4,
+ "containers": 5,
+ "ramPerContainer": 256,
+ "yarnMinContainerSize": 256
+ }
+ expected = {
+ "cluster-env": {
+ "properties": {
+ "security_enabled": "true",
+ }
+ },
+ "livy-conf": {
+ "properties": {
+ "livy.superusers": "zeppelin_user",
+ "livy.property1": "value1"
+ }
+ },
+ "zeppelin-env": {
+ "properties": {
+ "zeppelin.server.kerberos.principal": "zeppelin_user@REALM"
+ }
+ }
+ }
+
+ self.stackAdvisor.recommendSparkConfigurations(configurations, clusterData, services, None)
+ self.assertEquals(configurations, expected)
+
+ def test_recommendSPARKConfigurations_SecurityNotEnabledZeppelinInstalled(self):
+ configurations = {
+ "cluster-env": {
+ "properties": {
+ "security_enabled": "false",
+ }
+ },
+ "livy-conf": {
+ "properties": {
+ }
+ },
+ "zeppelin-env": {
+ "properties": {
+ }
+ }
+ }
+ services = {"configurations": configurations}
+ services['services'] = [
+ {
+ "StackServices": {
+ "service_name": "SPARK"
+ },
+ }
+ ]
+ clusterData = {
+ "cpu": 4,
+ "containers": 5,
+ "ramPerContainer": 256,
+ "yarnMinContainerSize": 256
+ }
+ expected = {
+ "cluster-env": {
+ "properties": {
+ "security_enabled": "false",
+ }
+ },
+ "livy-conf": {
+ "properties": {
+ }
+ },
+ "zeppelin-env": {
+ "properties": {
+ }
+ }
+ }
+
+ self.stackAdvisor.recommendSparkConfigurations(configurations, clusterData, services, None)
+ self.assertEquals(configurations, expected)
+
+ def test_recommendSPARKConfigurations_SecurityEnabledZeppelinInstalledExistingValue(self):
+ configurations = {
+ "cluster-env": {
+ "properties": {
+ "security_enabled": "true",
+ }
+ },
+ "livy-conf": {
+ "properties": {
+ "livy.superusers": "livy_user"
+ }
+ },
+ "zeppelin-env": {
+ "properties": {
+ "zeppelin.server.kerberos.principal": "zeppelin_user@REALM"
+ }
+ }
+ }
+ services = {"configurations": configurations}
+ services['services'] = [
+ {
+ "StackServices": {
+ "service_name": "SPARK"
+ },
+ }
+ ]
+ clusterData = {
+ "cpu": 4,
+ "containers": 5,
+ "ramPerContainer": 256,
+ "yarnMinContainerSize": 256
+ }
+ expected = {
+ "cluster-env": {
+ "properties": {
+ "security_enabled": "true",
+ }
+ },
+ "livy-conf": {
+ "properties": {
+ "livy.superusers": "livy_user,zeppelin_user"
+ }
+ },
+ "zeppelin-env": {
+ "properties": {
+ "zeppelin.server.kerberos.principal": "zeppelin_user@REALM"
+ }
+ }
+ }
+
+ self.stackAdvisor.recommendSparkConfigurations(configurations, clusterData, services, None)
+ self.assertEquals(configurations, expected)
+
+ def test_recommendSPARKConfigurations_SecurityEnabledZeppelinNotInstalled(self):
+ configurations = {
+ "cluster-env": {
+ "properties": {
+ "security_enabled": "true",
+ }
+ },
+ "livy-conf": {
+ "properties": {
+ }
+ }
+ }
+ services = {"configurations": configurations}
+ services['services'] = [
+ {
+ "StackServices": {
+ "service_name": "SPARK"
+ },
+ }
+ ]
+ clusterData = {
+ "cpu": 4,
+ "containers": 5,
+ "ramPerContainer": 256,
+ "yarnMinContainerSize": 256
+ }
+ expected = {
+ "cluster-env": {
+ "properties": {
+ "security_enabled": "true",
+ }
+ },
+ "livy-conf": {
+ "properties": {
+ }
+ }
+ }
+
+ self.stackAdvisor.recommendSparkConfigurations(configurations, clusterData, services, None)
+ self.assertEquals(configurations, expected)
+
def test_recommendSPARK2Configurations(self):
configurations = {}
services = {"configurations": configurations}
http://git-wip-us.apache.org/repos/asf/ambari/blob/a4f77844/ambari-server/src/test/python/stacks/2.6/common/test_stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.6/common/test_stack_advisor.py b/ambari-server/src/test/python/stacks/2.6/common/test_stack_advisor.py
index 3ba18d8..e88ec8e 100644
--- a/ambari-server/src/test/python/stacks/2.6/common/test_stack_advisor.py
+++ b/ambari-server/src/test/python/stacks/2.6/common/test_stack_advisor.py
@@ -204,6 +204,477 @@ class TestHDP26StackAdvisor(TestCase):
'druid.broker.jvm.heap.memory': {'maximum': '49152'}}}}
)
+ def test_recommendSPARK2Configurations_SecurityEnabledZeppelinInstalled(self):
+ configurations = {
+ "cluster-env": {
+ "properties": {
+ "security_enabled": "true",
+ }
+ },
+ "livy2-conf": {
+ "properties": {
+ "livy.property1": "value1"
+ }
+ },
+ "zeppelin-env": {
+ "properties": {
+ "zeppelin.server.kerberos.principal": "zeppelin_user@REALM"
+ }
+ }
+ }
+ services = {"configurations": configurations}
+ services['services'] = [
+ {
+ "StackServices": {
+ "service_name": "SPARK2"
+ },
+ }
+ ]
+ clusterData = {
+ "cpu": 4,
+ "containers": 5,
+ "ramPerContainer": 256,
+ "yarnMinContainerSize": 256
+ }
+ expected = {
+ "cluster-env": {
+ "properties": {
+ "security_enabled": "true",
+ }
+ },
+ "livy2-conf": {
+ "properties": {
+ "livy.superusers": "zeppelin_user",
+ "livy.property1": "value1"
+ }
+ },
+ "spark2-defaults": {
+ "properties": {
+ "spark.yarn.queue": "default"
+ }
+ },
+ "spark2-thrift-sparkconf": {
+ "properties": {
+ "spark.yarn.queue": "default"
+ }
+ },
+ "zeppelin-env": {
+ "properties": {
+ "zeppelin.server.kerberos.principal": "zeppelin_user@REALM"
+ }
+ }
+ }
+
+ self.stackAdvisor.recommendSPARK2Configurations(configurations, clusterData, services, None)
+ self.assertEquals(configurations, expected)
+
+ def test_recommendSPARK2Configurations_SecurityNotEnabledZeppelinInstalled(self):
+ configurations = {
+ "cluster-env": {
+ "properties": {
+ "security_enabled": "false",
+ }
+ },
+ "livy2-conf": {
+ "properties": {
+ "livy.property1": "value1"
+ }
+ },
+ "zeppelin-env": {
+ "properties": {
+ }
+ }
+ }
+ services = {"configurations": configurations}
+ services['services'] = [
+ {
+ "StackServices": {
+ "service_name": "SPARK2"
+ },
+ }
+ ]
+ clusterData = {
+ "cpu": 4,
+ "containers": 5,
+ "ramPerContainer": 256,
+ "yarnMinContainerSize": 256
+ }
+ expected = {
+ "cluster-env": {
+ "properties": {
+ "security_enabled": "false",
+ }
+ },
+ "livy2-conf": {
+ "properties": {
+ "livy.property1": "value1"
+ }
+ },
+ "spark2-defaults": {
+ "properties": {
+ "spark.yarn.queue": "default"
+ }
+ },
+ "spark2-thrift-sparkconf": {
+ "properties": {
+ "spark.yarn.queue": "default"
+ }
+ },
+ "zeppelin-env": {
+ "properties": {
+ }
+ }
+ }
+
+ self.stackAdvisor.recommendSPARK2Configurations(configurations, clusterData, services, None)
+ self.assertEquals(configurations, expected)
+
+ def test_recommendSPARK2Configurations_SecurityEnabledZeppelinInstalledExistingValue(self):
+ configurations = {
+ "cluster-env": {
+ "properties": {
+ "security_enabled": "true",
+ }
+ },
+ "livy2-conf": {
+ "properties": {
+ "livy.property1": "value1",
+ "livy.superusers": "livy_user"
+ }
+ },
+ "zeppelin-env": {
+ "properties": {
+ "zeppelin.server.kerberos.principal": "zeppelin_user@REALM"
+ }
+ }
+ }
+ services = {"configurations": configurations}
+ services['services'] = [
+ {
+ "StackServices": {
+ "service_name": "SPARK2"
+ },
+ }
+ ]
+ clusterData = {
+ "cpu": 4,
+ "containers": 5,
+ "ramPerContainer": 256,
+ "yarnMinContainerSize": 256
+ }
+ expected = {
+ "cluster-env": {
+ "properties": {
+ "security_enabled": "true",
+ }
+ },
+ "livy2-conf": {
+ "properties": {
+ "livy.property1": "value1",
+ "livy.superusers": "livy_user,zeppelin_user"
+ }
+ },
+ "spark2-defaults": {
+ "properties": {
+ "spark.yarn.queue": "default"
+ }
+ },
+ "spark2-thrift-sparkconf": {
+ "properties": {
+ "spark.yarn.queue": "default"
+ }
+ },
+ "zeppelin-env": {
+ "properties": {
+ "zeppelin.server.kerberos.principal": "zeppelin_user@REALM"
+ }
+ }
+ }
+
+ self.stackAdvisor.recommendSPARK2Configurations(configurations, clusterData, services, None)
+ self.assertEquals(configurations, expected)
+
+ def test_recommendSPARK2Configurations_SecurityEnabledZeppelinNotInstalled(self):
+ configurations = {
+ "cluster-env": {
+ "properties": {
+ "security_enabled": "true",
+ }
+ },
+ "livy2-conf": {
+ "properties": {
+ "livy.property1": "value1"
+ }
+ }
+ }
+ services = {"configurations": configurations}
+ services['services'] = [
+ {
+ "StackServices": {
+ "service_name": "SPARK2"
+ },
+ }
+ ]
+ clusterData = {
+ "cpu": 4,
+ "containers": 5,
+ "ramPerContainer": 256,
+ "yarnMinContainerSize": 256
+ }
+ expected = {
+ "cluster-env": {
+ "properties": {
+ "security_enabled": "true"
+ }
+ },
+ "livy2-conf": {
+ "properties": {
+ "livy.property1": "value1"
+ }
+ },
+ "spark2-defaults": {
+ "properties": {
+ "spark.yarn.queue": "default"
+ }
+ },
+ "spark2-thrift-sparkconf": {
+ "properties": {
+ "spark.yarn.queue": "default"
+ }
+ }
+ }
+
+ self.stackAdvisor.recommendSPARK2Configurations(configurations, clusterData, services, None)
+ self.assertEquals(configurations, expected)
+
+ def test_recommendZEPPELINConfigurations_SecurityEnabledSPARKInstalled(self):
+ configurations = {
+ "cluster-env": {
+ "properties": {
+ "security_enabled": "true",
+ }
+ },
+ "livy-conf": {
+ "properties": {
+ "livy.property1": "value1"
+ }
+ },
+ "livy2-conf": {
+ "properties": {
+ "livy.property1": "value1"
+ }
+ },
+ "zeppelin-env": {
+ "properties": {
+ "zeppelin.server.kerberos.principal": "zeppelin_user@REALM"
+ }
+ }
+ }
+ services = {"configurations": configurations}
+ services['services'] = [
+ {
+ "StackServices": {
+ "service_name": "ZEPPELIN"
+ },
+ }
+ ]
+ clusterData = {
+ "cpu": 4,
+ "containers": 5,
+ "ramPerContainer": 256,
+ "yarnMinContainerSize": 256
+ }
+ expected = {
+ "cluster-env": {
+ "properties": {
+ "security_enabled": "true",
+ }
+ },
+ "livy-conf": {
+ "properties": {
+ "livy.superusers": "zeppelin_user",
+ "livy.property1": "value1"
+ }
+ },
+ "livy2-conf": {
+ "properties": {
+ "livy.superusers": "zeppelin_user",
+ "livy.property1": "value1"
+ }
+ },
+ "zeppelin-env": {
+ "properties": {
+ "zeppelin.server.kerberos.principal": "zeppelin_user@REALM"
+ }
+ }
+ }
+
+ self.stackAdvisor.recommendZEPPELINConfigurations(configurations, clusterData, services, None)
+ self.assertEquals(configurations, expected)
+
+ def test_recommendZEPPELINConfigurations_SecurityNotEnabledSparkInstalled(self):
+ configurations = {
+ "cluster-env": {
+ "properties": {
+ "security_enabled": "false",
+ }
+ },
+ "livy-conf": {
+ "properties": {
+ }
+ },
+ "livy2-conf": {
+ "properties": {
+ }
+ },
+ "zeppelin-env": {
+ "properties": {
+ }
+ }
+ }
+ services = {"configurations": configurations}
+ services['services'] = [
+ {
+ "StackServices": {
+ "service_name": "ZEPPELIN"
+ },
+ }
+ ]
+ clusterData = {
+ "cpu": 4,
+ "containers": 5,
+ "ramPerContainer": 256,
+ "yarnMinContainerSize": 256
+ }
+ expected = {
+ "cluster-env": {
+ "properties": {
+ "security_enabled": "false",
+ }
+ },
+ "livy-conf": {
+ "properties": {
+ }
+ },
+ "livy2-conf": {
+ "properties": {
+ }
+ },
+ "zeppelin-env": {
+ "properties": {
+ }
+ }
+ }
+
+ self.stackAdvisor.recommendZEPPELINConfigurations(configurations, clusterData, services, None)
+ self.assertEquals(configurations, expected)
+
+ def test_recommendZEPPELINConfigurations_SecurityEnabledZeppelinInstalledExistingValue(self):
+ configurations = {
+ "cluster-env": {
+ "properties": {
+ "security_enabled": "true",
+ }
+ },
+ "livy-conf": {
+ "properties": {
+ "livy.superusers": "livy_user, hdfs"
+ }
+ },
+ "livy2-conf": {
+ "properties": {
+ "livy.superusers": "livy2_user"
+ }
+ },
+ "zeppelin-env": {
+ "properties": {
+ "zeppelin.server.kerberos.principal": "zeppelin_user@REALM"
+ }
+ }
+ }
+ services = {"configurations": configurations}
+ services['services'] = [
+ {
+ "StackServices": {
+ "service_name": "ZEPPELIN"
+ },
+ }
+ ]
+ clusterData = {
+ "cpu": 4,
+ "containers": 5,
+ "ramPerContainer": 256,
+ "yarnMinContainerSize": 256
+ }
+ expected = {
+ "cluster-env": {
+ "properties": {
+ "security_enabled": "true",
+ }
+ },
+ "livy-conf": {
+ "properties": {
+ "livy.superusers": "livy_user,hdfs,zeppelin_user"
+ }
+ },
+ "livy2-conf": {
+ "properties": {
+ "livy.superusers": "livy2_user,zeppelin_user"
+ }
+ },
+ "zeppelin-env": {
+ "properties": {
+ "zeppelin.server.kerberos.principal": "zeppelin_user@REALM"
+ }
+ }
+ }
+
+ self.stackAdvisor.recommendZEPPELINConfigurations(configurations, clusterData, services, None)
+ self.assertEquals(configurations, expected)
+
+ def test_recommendZEPPELINConfigurations_SecurityEnabledSparkNotInstalled(self):
+ configurations = {
+ "cluster-env": {
+ "properties": {
+ "security_enabled": "true",
+ },
+ "zeppelin-env": {
+ "properties": {
+ "zeppelin.server.kerberos.principal": "zeppelin_user@REALM"
+ }
+ }
+ }
+ }
+ services = {"configurations": configurations}
+ services['services'] = [
+ {
+ "StackServices": {
+ "service_name": "ZEPPELIN"
+ },
+ }
+ ]
+ clusterData = {
+ "cpu": 4,
+ "containers": 5,
+ "ramPerContainer": 256,
+ "yarnMinContainerSize": 256
+ }
+ expected = {
+ "cluster-env": {
+ "properties": {
+ "security_enabled": "true",
+ },
+ "zeppelin-env": {
+ "properties": {
+ "zeppelin.server.kerberos.principal": "zeppelin_user@REALM"
+ }
+ }
+ }
+ }
+
+ self.stackAdvisor.recommendZEPPELINConfigurations(configurations, clusterData, services, None)
+ self.assertEquals(configurations, expected)
+
def test_recommendDruidConfigurations_WithPostgresql(self):
hosts = {
"items": [
[04/50] [abbrv] ambari git commit: AMBARI-21705 : Metrics Collector
start failed due to 'Unable to initialize HA controller'. (avijayan)
Posted by ja...@apache.org.
AMBARI-21705 : Metrics Collector start failed due to 'Unable to initialize HA controller'. (avijayan)
(cherry picked from commit bf51a82c9cf82d0cdda2a748d8102cffd73a8246)
Change-Id: I391880c3eaceeb476c7b1e0ba63d71d3a359d83e
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/7ced0c9b
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/7ced0c9b
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/7ced0c9b
Branch: refs/heads/2.5-maint
Commit: 7ced0c9be4776443b83c5bf2bccb8a5ef599fd5b
Parents: 117f4f3
Author: Aravindan Vijayan <av...@hortonworks.com>
Authored: Thu Aug 10 16:32:19 2017 -0700
Committer: avijayan@hortonworks.com <je...@hortonworks.com>
Committed: Thu Aug 10 23:56:38 2017 +0000
----------------------------------------------------------------------
.../metrics/timeline/availability/MetricCollectorHAController.java | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/7ced0c9b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/availability/MetricCollectorHAController.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/availability/MetricCollectorHAController.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/availability/MetricCollectorHAController.java
index addb14e..a06f4e8 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/availability/MetricCollectorHAController.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/availability/MetricCollectorHAController.java
@@ -138,8 +138,8 @@ public class MetricCollectorHAController {
if (CollectionUtils.isEmpty(nodes) || !nodes.contains(instanceConfig.getInstanceName())) {
LOG.info("Adding participant instance " + instanceConfig);
admin.addInstance(clusterName, instanceConfig);
- success = true;
}
+ success = true;
} catch (HelixException | ZkNoNodeException ex) {
LOG.warn("Helix Cluster not yet setup fully.");
if (i < tries - 1) {
[41/50] [abbrv] ambari git commit: AMBARI-21800. Some services had
issue coming up after express upgrade on IOP clusters. (swagle)
Posted by ja...@apache.org.
AMBARI-21800. Some services had issue coming up after express upgrade on IOP clusters. (swagle)
Change-Id: Ifc3ebf613fca4712c2c87110706033dd24cf704b
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/b41f06f7
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/b41f06f7
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/b41f06f7
Branch: refs/heads/2.5-maint
Commit: b41f06f7b126cf5beb4e1c9504aafe9157c2507a
Parents: f5dc535
Author: Siddharth Wagle <sw...@hortonworks.com>
Authored: Wed Aug 23 19:34:34 2017 -0700
Committer: Siddharth Wagle <sw...@hortonworks.com>
Committed: Wed Aug 23 19:34:34 2017 -0700
----------------------------------------------------------------------
.../apache/ambari/server/metadata/RoleCommandOrder.java | 11 ++++++-----
1 file changed, 6 insertions(+), 5 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/b41f06f7/ambari-server/src/main/java/org/apache/ambari/server/metadata/RoleCommandOrder.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/metadata/RoleCommandOrder.java b/ambari-server/src/main/java/org/apache/ambari/server/metadata/RoleCommandOrder.java
index 234ef26..ef9e1fa 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/metadata/RoleCommandOrder.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/metadata/RoleCommandOrder.java
@@ -133,7 +133,7 @@ public class RoleCommandOrder implements Cloneable {
this.sectionKeys = sectionKeys;
dependencies.clear();
- StackId stackId = cluster.getCurrentStackVersion();
+ StackId stackId = cluster.getDesiredStackVersion();
StackInfo stack = null;
try {
stack = ambariMetaInfo.getStack(stackId.getStackName(),
@@ -141,6 +141,8 @@ public class RoleCommandOrder implements Cloneable {
} catch (AmbariException e) {
}
+ LOG.info("Generating RCO graph for cluster {} and stack {}", cluster.getClusterName(), stackId);
+
Map<String,Object> userData = stack.getRoleCommandOrder().getContent();
Map<String,Object> generalSection =
(Map<String, Object>) userData.get(GENERAL_DEPS_KEY);
@@ -266,10 +268,9 @@ public class RoleCommandOrder implements Cloneable {
roleCommandDeps.add(new RoleCommandPair(rco.getRole(), RoleCommand.RESTART));
}
- if (LOG.isDebugEnabled()) {
- LOG.debug("Adding dependency for " + restartPair + ", " +
- "dependencies => " + roleCommandDeps);
- }
+ LOG.info("Adding dependency for " + restartPair + ", " +
+ "dependencies => " + roleCommandDeps);
+
missingDependencies.put(restartPair, roleCommandDeps);
}
}
[13/50] [abbrv] ambari git commit: AMBARI-21045. Enable Storm's
AutoTGT configs in secure mode (Sriharsha Chintalapani via smohanty)""
Posted by ja...@apache.org.
AMBARI-21045. Enable Storm's AutoTGT configs in secure mode (Sriharsha Chintalapani via smohanty)""
This reverts commit 96e5ea566037e7874e815000966495145974f69c.
(cherry picked from commit d7b2d856632fa2614ba2d2b153c8717ed4390d66)
Change-Id: I769c8b159d212e66b37726bb971a0fa150b4a513
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/4f101bde
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/4f101bde
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/4f101bde
Branch: refs/heads/2.5-maint
Commit: 4f101bde42939e2fd4b0b5b28a4be0e60ea5a08c
Parents: 5ec0382
Author: Sumit Mohanty <sm...@hortonworks.com>
Authored: Tue Aug 15 16:58:33 2017 -0700
Committer: smohanty@hortonworks.com <je...@hortonworks.com>
Committed: Wed Aug 16 00:41:10 2017 +0000
----------------------------------------------------------------------
.../STORM/1.1.0/configuration/storm-env.xml | 54 ++++++++++++
.../STORM/1.1.0/configuration/storm-site.xml | 48 -----------
.../stacks/HDP/2.3/upgrades/config-upgrade.xml | 7 ++
.../HDP/2.3/upgrades/nonrolling-upgrade-2.6.xml | 4 +
.../stacks/HDP/2.3/upgrades/upgrade-2.6.xml | 1 +
.../stacks/HDP/2.4/upgrades/config-upgrade.xml | 7 ++
.../HDP/2.4/upgrades/nonrolling-upgrade-2.6.xml | 4 +
.../stacks/HDP/2.4/upgrades/upgrade-2.6.xml | 2 +
.../stacks/HDP/2.5/upgrades/config-upgrade.xml | 7 ++
.../HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml | 4 +
.../stacks/HDP/2.5/upgrades/upgrade-2.6.xml | 1 +
.../stacks/HDP/2.6/services/stack_advisor.py | 87 +++++++++++++++++++-
.../stacks/HDP/2.6/upgrades/config-upgrade.xml | 6 ++
.../HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml | 4 +
.../stacks/HDP/2.6/upgrades/upgrade-2.6.xml | 1 +
.../src/main/resources/stacks/stack_advisor.py | 18 ++++
16 files changed, 204 insertions(+), 51 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/4f101bde/ambari-server/src/main/resources/common-services/STORM/1.1.0/configuration/storm-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.1.0/configuration/storm-env.xml b/ambari-server/src/main/resources/common-services/STORM/1.1.0/configuration/storm-env.xml
new file mode 100644
index 0000000..2ce560d
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.1.0/configuration/storm-env.xml
@@ -0,0 +1,54 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_adding_forbidden="true">
+ <!-- storm-env.sh -->
+ <property>
+ <name>content</name>
+ <display-name>storm-env template</display-name>
+ <description>This is the jinja template for storm-env.sh file</description>
+ <value>
+#!/bin/bash
+
+# Set Storm specific environment variables here.
+
+# The java implementation to use.
+export JAVA_HOME={{java64_home}}
+
+export STORM_CONF_DIR={{conf_dir}}
+export STORM_HOME={{storm_component_home_dir}}
+export STORM_JAR_JVM_OPTS={{jar_jvm_opts}}
+
+#set storm-auto creds
+# check if storm_jaas.conf in config , only enable storm_auto_creds in secure mode.
+STORM_JAAS_CONF=$STORM_HOME/conf/storm_jaas.conf
+STORM_AUTOCREDS_LIB_DIR=$STORM_HOME/external/storm-autocreds
+
+if [ -f $STORM_JAAS_CONF ] && [ -d $STORM_AUTOCREDS_LIB_DIR ]; then
+ export STORM_EXT_CLASSPATH=$STORM_AUTOCREDS_LIB_DIR
+fi
+ </value>
+ <value-attributes>
+ <type>content</type>
+ </value-attributes>
+ <on-ambari-upgrade add="true"/>
+ </property>
+</configuration>
http://git-wip-us.apache.org/repos/asf/ambari/blob/4f101bde/ambari-server/src/main/resources/common-services/STORM/1.1.0/configuration/storm-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.1.0/configuration/storm-site.xml b/ambari-server/src/main/resources/common-services/STORM/1.1.0/configuration/storm-site.xml
deleted file mode 100644
index b2e9acb..0000000
--- a/ambari-server/src/main/resources/common-services/STORM/1.1.0/configuration/storm-site.xml
+++ /dev/null
@@ -1,48 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration supports_final="true">
- <property>
- <name>nimbus.impersonation.acl</name>
- <description>
- The ImpersonationAuthorizer uses nimbus.impersonation.acl as the acl to authorize users. Following is a sample nimbus config for supporting impersonation:
- nimbus.impersonation.acl:
- impersonating_user1:
- hosts:
- [comma separated list of hosts from which impersonating_user1 is allowed to impersonate other users]
- groups:
- [comma separated list of groups whose users impersonating_user1 is allowed to impersonate]
- impersonating_user2:
- hosts:
- [comma separated list of hosts from which impersonating_user2 is allowed to impersonate other users]
- groups:
- [comma separated list of groups whose users impersonating_user2 is allowed to impersonate]
- </description>
- <!-- The depends-on section is new in this version, required so Stack Advisor can include streamline-env and streamline-common configs. -->
- <depends-on>
- <property>
- <type>streamline-env</type>
- <name>streamline_principal_name</name>
- </property>
- </depends-on>
- <on-ambari-upgrade add="false"/>
- </property>
-</configuration>
http://git-wip-us.apache.org/repos/asf/ambari/blob/4f101bde/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/config-upgrade.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/config-upgrade.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/config-upgrade.xml
index d8e2d7d..0c4106e 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/config-upgrade.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/config-upgrade.xml
@@ -664,6 +664,13 @@
replace-with="${sys:workers.artifacts}/${sys:storm.id}/${sys:worker.port}/${sys:logfile.name}"/>
</definition>
+ <definition xsi:type="configure" id="storm_nimbus_autocred_config" summary="Update Storm's Nimbus AutoCred config">
+ <type>storm-site</type>
+ <set key="nimbus.autocredential.plugins.classes" value="['org.apache.storm.hdfs.security.AutoHDFS', 'org.apache.storm.hbase.security.AutoHBase', 'org.apache.storm.hive.security.AutoHive']" if-type="streamline-common" if-key="authorizer.class.name" if-key-state="present"/>
+ <set key="nimbus.credential.renewers.classes" value="['org.apache.storm.hdfs.security.AutoHDFS', 'org.apache.storm.hbase.security.AutoHBase', 'org.apache.storm.hive.security.AutoHive']" if-type="streamline-common" if-key="authorizer.class.name" if-key-state="present"/>
+ <set key="nimbus.credential.renewers.freq.secs" value="82800" if-type="streamline-common" if-key="authorizer.class.name" if-key-state="present"/>
+ </definition>
+
</changes>
</component>
</service>
http://git-wip-us.apache.org/repos/asf/ambari/blob/4f101bde/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.6.xml
index ca3c193..c45c7c5 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.6.xml
@@ -638,6 +638,10 @@
<task xsi:type="configure" id="hdp_2_5_0_0_upgrade_storm_1.0"/>
</execute-stage>
+ <execute-stage service="STORM" component="NIMBUS" title="Apply config changes for Storm AutoCreds">
+ <task xsi:type="configure" id="storm_nimbus_autocred_config"/>
+ </execute-stage>
+
<execute-stage service="STORM" component="NIMBUS" title="Apply config changes for Storm">
<!-- Remove Atlas configs that were incorrectly added to storm-site instead of Atlas' application.properties. -->
<task xsi:type="configure" id="hdp_2_5_0_0_remove_storm_atlas_configs"/>
http://git-wip-us.apache.org/repos/asf/ambari/blob/4f101bde/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.6.xml
index cf2c257..69be4dc 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.6.xml
@@ -1130,6 +1130,7 @@
<task xsi:type="configure" id="storm_worker_log4j_parameterize" />
<task xsi:type="configure" id="storm_cluster_log4j_parameterize" />
<task xsi:type="configure" id="storm_worker_log4j_directory" />
+ <task xsi:type="configure" id="storm_nimbus_autocred_config" />
</pre-upgrade>
<pre-downgrade>
http://git-wip-us.apache.org/repos/asf/ambari/blob/4f101bde/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/config-upgrade.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/config-upgrade.xml b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/config-upgrade.xml
index 83a0a1e..3e62fb1 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/config-upgrade.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/config-upgrade.xml
@@ -483,6 +483,13 @@
<replace key="content" find="${sys:storm.log.dir}/${sys:logfile.name}"
replace-with="${sys:workers.artifacts}/${sys:storm.id}/${sys:worker.port}/${sys:logfile.name}"/>
</definition>
+ <definition xsi:type="configure" id="storm_nimbus_autocred_config" summary="Update Storm's Nimbus AutoCred config">
+ <type>storm-site</type>
+ <set key="nimbus.autocredential.plugins.classes" value="['org.apache.storm.hdfs.security.AutoHDFS', 'org.apache.storm.hbase.security.AutoHBase', 'org.apache.storm.hive.security.AutoHive']" if-type="streamline-common" if-key="authorizer.class.name" if-key-state="present"/>
+ <set key="nimbus.credential.renewers.classes" value="['org.apache.storm.hdfs.security.AutoHDFS', 'org.apache.storm.hbase.security.AutoHBase', 'org.apache.storm.hive.security.AutoHive']" if-type="streamline-common" if-key="authorizer.class.name" if-key-state="present"/>
+ <set key="nimbus.credential.renewers.freq.secs" value="82800" if-type="streamline-common" if-key="authorizer.class.name" if-key-state="present"/>
+ </definition>
+
</changes>
</component>
</service>
http://git-wip-us.apache.org/repos/asf/ambari/blob/4f101bde/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.6.xml
index 6e899ee..b6f48fc 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.6.xml
@@ -563,6 +563,10 @@
<task xsi:type="configure" id="hdp_2_5_0_0_remove_storm_atlas_configs"/>
</execute-stage>
+ <execute-stage service="STORM" component="NIMBUS" title="Apply config changes for Storm AutoCreds">
+ <task xsi:type="configure" id="storm_nimbus_autocred_config"/>
+ </execute-stage>
+
<execute-stage service="STORM" component="NIMBUS" title="Apply security changes for Storm">
<!-- Add nimbus.impersonation acls . -->
<task xsi:type="configure" id="hdp_2_5_0_0_add_storm_security_configs" />
http://git-wip-us.apache.org/repos/asf/ambari/blob/4f101bde/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.6.xml
index 4e14e83..f1d34e7 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.6.xml
@@ -1092,6 +1092,8 @@
<task xsi:type="configure" id="storm_worker_log4j_parameterize" />
<task xsi:type="configure" id="storm_cluster_log4j_parameterize" />
<task xsi:type="configure" id="storm_worker_log4j_directory" />
+ <task xsi:type="configure" id="storm_nimbus_autocred_config" />
+
</pre-upgrade>
<pre-downgrade>
http://git-wip-us.apache.org/repos/asf/ambari/blob/4f101bde/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/config-upgrade.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/config-upgrade.xml b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/config-upgrade.xml
index f227bac..8539006 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/config-upgrade.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/config-upgrade.xml
@@ -63,6 +63,13 @@
<regex-replace key="content" find="A1" immediateFlush="false"
 fileName="\$\{sys:storm.log.dir}/\$\{sys:logfile.name}"
 filePattern="\$\{sys:storm.log.dir}/\$\{sys:logfile.name}.%i.gz">
 <PatternLayout>
 <pattern>\$\{pattern}</pattern>
 </PatternLayout>
 <Policies>
 <SizeBasedTriggeringPolicy size="(?:[0-9]+) MB"/> <!-- Or every 100 MB -->
 </Policies>
 <DefaultRolloverStrategy max="([0-9]+)"
replace-with="A1" immediateFlush="false"
 fileName="${sys:storm.log.dir}/${sys:logfile.name}"
 filePattern="${sys:storm.log.dir}/${sys:logfile.name}.%i.gz">
 <PatternLayout>
 <pattern>${pattern}</pattern>
 </PatternLayout>
 <Policies>
 <SizeBasedTriggeringPolicy size="{{storm_a1_maxfilesize}} MB"/> <!-- Or every 100 MB -->
 </Policies>
 <DefaultRolloverStrategy max="{{storm_a1_maxbackupindex}}"/>
</definition>
+ <definition xsi:type="configure" id="storm_nimbus_autocred_config" summary="Update Storm's Nimbus AutoCred config">
+ <type>storm-site</type>
+ <set key="nimbus.autocredential.plugins.classes" value="['org.apache.storm.hdfs.security.AutoHDFS', 'org.apache.storm.hbase.security.AutoHBase', 'org.apache.storm.hive.security.AutoHive']" if-type="streamline-common" if-key="authorizer.class.name" if-key-state="present"/>
+ <set key="nimbus.credential.renewers.classes" value="['org.apache.storm.hdfs.security.AutoHDFS', 'org.apache.storm.hbase.security.AutoHBase', 'org.apache.storm.hive.security.AutoHive']" if-type="streamline-common" if-key="authorizer.class.name" if-key-state="present"/>
+ <set key="nimbus.credential.renewers.freq.secs" value="82800" if-type="streamline-common" if-key="authorizer.class.name" if-key-state="present"/>
+ </definition>
+
</changes>
</component>
</service>
http://git-wip-us.apache.org/repos/asf/ambari/blob/4f101bde/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml
index ce10e8b..ef4c6f4 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml
@@ -416,6 +416,10 @@
<task xsi:type="configure" id="hdp_2_6_0_0_remove_bind_anonymous"/>
</execute-stage>
+ <execute-stage service="STORM" component="NIMBUS" title="Apply config changes for Storm AutoCreds">
+ <task xsi:type="configure" id="storm_nimbus_autocred_config"/>
+ </execute-stage>
+
<!--RANGER-->
<execute-stage service="RANGER" component="RANGER_ADMIN" title="Parameterizing Ranger Admin Log4J Properties">
<task xsi:type="configure" id="admin_log4j_parameterize">
http://git-wip-us.apache.org/repos/asf/ambari/blob/4f101bde/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml
index 840b17d..d9675f1 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml
@@ -1030,6 +1030,7 @@
<pre-upgrade>
<task xsi:type="configure" id="storm_worker_log4j_parameterize" />
<task xsi:type="configure" id="storm_cluster_log4j_parameterize" />
+ <task xsi:type="configure" id="storm_nimbus_autocred_config" />
</pre-upgrade>
<pre-downgrade/>
<upgrade>
http://git-wip-us.apache.org/repos/asf/ambari/blob/4f101bde/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py
index cc5fa92..974b10c 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py
@@ -41,11 +41,92 @@ class HDP26StackAdvisor(HDP25StackAdvisor):
"HIVE": self.recommendHIVEConfigurations,
"HBASE": self.recommendHBASEConfigurations,
"YARN": self.recommendYARNConfigurations,
- "KAFKA": self.recommendKAFKAConfigurations
+ "KAFKA": self.recommendKAFKAConfigurations,
+ "BEACON": self.recommendBEACONConfigurations,
+ "STORM": self.recommendSTORMConfigurations
}
parentRecommendConfDict.update(childRecommendConfDict)
return parentRecommendConfDict
+ def recommendSTORMConfigurations(self, configurations, clusterData, services, hosts):
+ """
+ In HDF-2.6.1 we introduced a new way of doing Auto Credentials with services such as
+ HDFS, HIVE, HBASE. This method will update the required configs for autocreds if the users installs
+ STREAMLINE service.
+ """
+ super(HDP26StackAdvisor, self).recommendStormConfigurations(configurations, clusterData, services, hosts)
+ storm_site = self.getServicesSiteProperties(services, "storm-site")
+ storm_env = self.getServicesSiteProperties(services, "storm-env")
+ putStormSiteProperty = self.putProperty(configurations, "storm-site", services)
+ putStormSiteAttributes = self.putPropertyAttribute(configurations, "storm-site")
+ security_enabled = self.isSecurityEnabled(services)
+ servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+
+ if storm_env and storm_site and security_enabled and 'STREAMLINE' in servicesList:
+ storm_nimbus_impersonation_acl = storm_site["nimbus.impersonation.acl"] if "nimbus.impersonation.acl" in storm_site else None
+ streamline_env = self.getServicesSiteProperties(services, "streamline-env")
+ _streamline_principal_name = streamline_env['streamline_principal_name'] if 'streamline_principal_name' in streamline_env else None
+ if _streamline_principal_name is not None and storm_nimbus_impersonation_acl is not None:
+ streamline_bare_principal = get_bare_principal(_streamline_principal_name)
+ storm_nimbus_impersonation_acl.replace('{{streamline_bare_principal}}', streamline_bare_principal)
+ putStormSiteProperty('nimbus.impersonation.acl', storm_nimbus_impersonation_acl)
+
+ storm_nimbus_autocred_plugin_classes = storm_site["nimbus.autocredential.plugins.classes"] if "nimbus.autocredential.plugins.classes" in storm_site else None
+ if storm_nimbus_autocred_plugin_classes is not None:
+ new_storm_nimbus_autocred_plugin_classes = ['org.apache.storm.hdfs.security.AutoHDFS',
+ 'org.apache.storm.hbase.security.AutoHBase',
+ 'org.apache.storm.hive.security.AutoHive']
+ new_conf = DefaultStackAdvisor.appendToYamlString(storm_nimbus_autocred_plugin_classes,
+ new_storm_nimbus_autocred_plugin_classes)
+
+ putStormSiteProperty("nimbus.autocredential.plugins.classes", new_conf)
+ else:
+ putStormSiteProperty("nimbus.autocredential.plugins.classes", "['org.apache.storm.hdfs.security.AutoHDFS', 'org.apache.storm.hbase.security.AutoHBase', 'org.apache.storm.hive.security.AutoHive']")
+
+
+ storm_nimbus_credential_renewer_classes = storm_site["nimbus.credential.renewers.classes"] if "nimbus.credential.renewers.classes" in storm_site else None
+ if storm_nimbus_credential_renewer_classes is not None:
+ new_storm_nimbus_credential_renewer_classes_array = ['org.apache.storm.hdfs.security.AutoHDFS',
+ 'org.apache.storm.hbase.security.AutoHBase',
+ 'org.apache.storm.hive.security.AutoHive']
+ new_conf = DefaultStackAdvisor.appendToYamlString(storm_nimbus_credential_renewer_classes,
+ new_storm_nimbus_credential_renewer_classes_array)
+ putStormSiteProperty("nimbus.autocredential.plugins.classes", new_conf)
+ else:
+ putStormSiteProperty("nimbus.credential.renewers.classes", "['org.apache.storm.hdfs.security.AutoHDFS', 'org.apache.storm.hbase.security.AutoHBase', 'org.apache.storm.hive.security.AutoHive']")
+ putStormSiteProperty("nimbus.credential.renewers.freq.secs", "82800")
+ pass
+
+ def recommendBEACONConfigurations(self, configurations, clusterData, services, hosts):
+ beaconEnvProperties = self.getSiteProperties(services['configurations'], 'beacon-env')
+ putbeaconEnvProperty = self.putProperty(configurations, "beacon-env", services)
+
+ # database URL and driver class recommendations
+ if beaconEnvProperties and self.checkSiteProperties(beaconEnvProperties, 'beacon_store_driver') and self.checkSiteProperties(beaconEnvProperties, 'beacon_database'):
+ putbeaconEnvProperty('beacon_store_driver', self.getDBDriver(beaconEnvProperties['beacon_database']))
+ if beaconEnvProperties and self.checkSiteProperties(beaconEnvProperties, 'beacon_store_db_name', 'beacon_store_url') and self.checkSiteProperties(beaconEnvProperties, 'beacon_database'):
+ beaconServerHost = self.getHostWithComponent('BEACON', 'BEACON_SERVER', services, hosts)
+ beaconDBConnectionURL = beaconEnvProperties['beacon_store_url']
+ protocol = self.getProtocol(beaconEnvProperties['beacon_database'])
+ oldSchemaName = getOldValue(self, services, "beacon-env", "beacon_store_db_name")
+ oldDBType = getOldValue(self, services, "beacon-env", "beacon_database")
+ # under these if constructions we are checking if beacon server hostname available,
+ # if it's default db connection url with "localhost" or if schema name was changed or if db type was changed (only for db type change from default mysql to existing mysql)
+ # or if protocol according to current db type differs with protocol in db connection url(other db types changes)
+ if beaconServerHost is not None:
+ if (beaconDBConnectionURL and "//localhost" in beaconDBConnectionURL) or oldSchemaName or oldDBType or (protocol and beaconDBConnectionURL and not beaconDBConnectionURL.startswith(protocol)):
+ dbConnection = self.getDBConnectionStringBeacon(beaconEnvProperties['beacon_database']).format(beaconServerHost['Hosts']['host_name'], beaconEnvProperties['beacon_store_db_name'])
+ putbeaconEnvProperty('beacon_store_url', dbConnection)
+
+ def getDBConnectionStringBeacon(self, databaseType):
+ driverDict = {
+ 'NEW DERBY DATABASE': 'jdbc:derby:${{beacon.data.dir}}/${{beacon.store.db.name}}-db;create=true',
+ 'EXISTING MYSQL DATABASE': 'jdbc:mysql://{0}/{1}',
+ 'EXISTING MYSQL / MARIADB DATABASE': 'jdbc:mysql://{0}/{1}',
+ 'EXISTING ORACLE DATABASE': 'jdbc:oracle:thin:@//{0}:1521/{1}'
+ }
+ return driverDict.get(databaseType.upper())
+
def recommendAtlasConfigurations(self, configurations, clusterData, services, hosts):
super(HDP26StackAdvisor, self).recommendAtlasConfigurations(configurations, clusterData, services, hosts)
servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
@@ -401,9 +482,9 @@ class HDP26StackAdvisor(HDP25StackAdvisor):
propertyValue = "https://"+webapp_address+"/ws/v1/applicationhistory"
Logger.info("validateYarnSiteConfigurations: recommended value for webservice url"+services["configurations"]["yarn-site"]["properties"]["yarn.log.server.web-service.url"])
if services["configurations"]["yarn-site"]["properties"]["yarn.log.server.web-service.url"] != propertyValue:
- validationItems.append(
+ validationItems = [
{"config-name": "yarn.log.server.web-service.url",
- "item": self.getWarnItem("Value should be %s" % propertyValue)})
+ "item": self.getWarnItem("Value should be %s" % propertyValue)}]
return self.toConfigurationValidationProblems(validationItems, "yarn-site")
def validateDruidHistoricalConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
http://git-wip-us.apache.org/repos/asf/ambari/blob/4f101bde/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/config-upgrade.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/config-upgrade.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/config-upgrade.xml
index c2c1532..3881137 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/config-upgrade.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/config-upgrade.xml
@@ -111,6 +111,12 @@
<set key="ranger.plugin.storm.ambari.cluster.name" value="{{cluster_name}}"
if-type="ranger-storm-plugin-properties" if-key="ranger-storm-plugin-enabled" if-key-state="present"/>
</definition>
+ <definition xsi:type="configure" id="storm_nimbus_autocred_config" summary="Update Storm's Nimbus AutoCred config">
+ <type>storm-site</type>
+ <set key="nimbus.autocredential.plugins.classes" value="['org.apache.storm.hdfs.security.AutoHDFS', 'org.apache.storm.hbase.security.AutoHBase', 'org.apache.storm.hive.security.AutoHive']" if-type="streamline-common" if-key="authorizer.class.name" if-key-state="present"/>
+ <set key="nimbus.credential.renewers.classes" value="['org.apache.storm.hdfs.security.AutoHDFS', 'org.apache.storm.hbase.security.AutoHBase', 'org.apache.storm.hive.security.AutoHive']" if-type="streamline-common" if-key="authorizer.class.name" if-key-state="present"/>
+ <set key="nimbus.credential.renewers.freq.secs" value="82800" if-type="streamline-common" if-key="authorizer.class.name" if-key-state="present"/>
+ </definition>
</changes>
</component>
</service>
http://git-wip-us.apache.org/repos/asf/ambari/blob/4f101bde/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml
index df609cd..6d8821b 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml
@@ -328,6 +328,10 @@
<task xsi:type="configure" id="hdp_2_6_maint_ranger_storm_plugin_cluster_name"/>
</execute-stage>
+ <execute-stage service="STORM" component="NIMBUS" title="Apply config changes for Storm AutoCreds">
+ <task xsi:type="configure" id="storm_nimbus_autocred_config"/>
+ </execute-stage>
+
<!-- YARN -->
<execute-stage service="YARN" component="RESOURCEMANAGER" title="Apply config changes for Ranger Yarn plugin">
<task xsi:type="configure" id="hdp_2_6_maint_ranger_yarn_plugin_cluster_name"/>
http://git-wip-us.apache.org/repos/asf/ambari/blob/4f101bde/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/upgrade-2.6.xml
index b376fa7..07f0960 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/upgrade-2.6.xml
@@ -965,6 +965,7 @@
<component name="NIMBUS">
<pre-upgrade>
<task xsi:type="configure" id="hdp_2_6_maint_ranger_storm_plugin_cluster_name"/>
+ <task xsi:type="configure" id="storm_nimbus_autocred_config"/>
</pre-upgrade>
<pre-downgrade/> <!-- no-op to prevent config changes on downgrade -->
<upgrade>
http://git-wip-us.apache.org/repos/asf/ambari/blob/4f101bde/ambari-server/src/main/resources/stacks/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/stack_advisor.py b/ambari-server/src/main/resources/stacks/stack_advisor.py
index ccbb676..8ccbaba 100644
--- a/ambari-server/src/main/resources/stacks/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/stack_advisor.py
@@ -1375,3 +1375,21 @@ class DefaultStackAdvisor(StackAdvisor):
if recommendation:
put_f(name, ",".join(recommendation))
+
+ @classmethod
+ def appendToYamlString(cls, yaml_string, list_classes):
+ updated_yaml_string = ""
+ try:
+ strip_yaml_str = re.sub('[\[\]\']', ' ', yaml_string)
+ klass_array = [x.strip() for x in strip_yaml_str.split(',')]
+ if yaml_string:
+ for klass in list_classes:
+ klass = klass.strip()
+ klass_array.append(klass)
+ klass_set = set(klass_array)
+ klass_list = [("'" + e + "'") for e in klass_set]
+ updated_yaml_string = "[" + ",".join(klass_list) + "]"
+ except Exception:
+ klass_list = [("'" + e + "'") for e in list_classes]
+ updated_yaml_string = "[" + ",".join(klass_list) + "]"
+ return updated_yaml_string
[48/50] [abbrv] ambari git commit: AMBARI-21830 PowerPC - IOP/HDP
migration, service restart fails post Ambari upgrade (dili)
Posted by ja...@apache.org.
AMBARI-21830 PowerPC - IOP/HDP migration, service restart fails post Ambari upgrade (dili)
(cherry picked from commit 909902cdf6496ae5863d76b169da16e2e5d7d55f)
Change-Id: Iec141925bc6d2bbbeec661282d522c47048c8eac
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/99269b1d
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/99269b1d
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/99269b1d
Branch: refs/heads/2.5-maint
Commit: 99269b1d32ea2391c608cc2f85a1648993fd54a3
Parents: e07e92d
Author: Di Li <di...@apache.org>
Authored: Mon Aug 28 12:16:14 2017 -0400
Committer: dili@apache.org <je...@hortonworks.com>
Committed: Mon Aug 28 16:26:17 2017 +0000
----------------------------------------------------------------------
.../stacks/BigInsights/4.2.5/repos/repoinfo.xml | 12 ++++++++++++
1 file changed, 12 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/99269b1d/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/repos/repoinfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/repos/repoinfo.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/repos/repoinfo.xml
index d1b155d..b723f09 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/repos/repoinfo.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/repos/repoinfo.xml
@@ -41,4 +41,16 @@
<reponame>IOP-UTILS</reponame>
</repo>
</os>
+ <os family="redhat-ppc7">
+ <repo>
+ <baseurl>http://ibm-open-platform.ibm.com/repos/IOP/rhel/7/ppc64le/4.2.x/Updates/4.2.5.0</baseurl>
+ <repoid>IOP-4.2.5</repoid>
+ <reponame>IOP</reponame>
+ </repo>
+ <repo>
+ <baseurl>http://ibm-open-platform.ibm.com/repos/IOP-UTILS/rhel/7/ppc64le/1.3</baseurl>
+ <repoid>IOP-UTILS-1.3</repoid>
+ <reponame>IOP-UTILS</reponame>
+ </repo>
+ </os>
</reposinfo>
[15/50] [abbrv] ambari git commit: AMBARI-21729 Owner of yarn.include
stays to be root:root where as dfs.include gets changed to the correct one
upon restart services (dsen)
Posted by ja...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/6d2bab16/ambari-server/src/test/python/stacks/2.0.6/configs/secured_yarn_include_file_manage.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/secured_yarn_include_file_manage.json b/ambari-server/src/test/python/stacks/2.0.6/configs/secured_yarn_include_file_manage.json
new file mode 100644
index 0000000..e92169c
--- /dev/null
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/secured_yarn_include_file_manage.json
@@ -0,0 +1,1078 @@
+{
+ "roleCommand": "INSTALL",
+ "clusterName": "c1",
+ "hostname": "c6401.ambari.apache.org",
+ "hostLevelParams": {
+ "not_managed_hdfs_path_list": "[\"/apps/hive/warehouse\",\"/apps/falcon\",\"/mr-history/done\",\"/app-logs\",\"/tmp\"]",
+ "agent_stack_retry_count": "5",
+ "agent_stack_retry_on_unavailability": "false",
+ "jdk_location": "http://c6401.ambari.apache.org:8080/resources/",
+ "ambari_db_rca_password": "mapred",
+ "current_version" : "2.2.4.2-1234",
+ "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca",
+ "jce_name": "UnlimitedJCEPolicyJDK7.zip",
+ "oracle_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//ojdbc6.jar",
+ "repo_info": "[{\"baseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0\",\"osType\":\"centos6\",\"repoId\":\"HDP-2.0.8\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0\"}]",
+ "package_list": "[{\"type\":\"rpm\",\"name\":\"hadoop-yarn\"},{\"type\":\"rpm\",\"name\":\"hadoop-yarn-nodemanager\"},{\"type\":\"rpm\",\"name\":\"hadoop-mapreduce\"},{\"type\":\"rpm\",\"name\":\"hadoop-yarn-proxyserver\"},{\"type\":\"rpm\",\"name\":\"hadoop-yarn-resourcemanager\"}]",
+ "stack_version": "2.0",
+ "stack_name": "HDP",
+ "db_name": "ambari",
+ "ambari_db_rca_driver": "org.postgresql.Driver",
+ "jdk_name": "jdk-7u67-linux-x64.tar.gz",
+ "ambari_db_rca_username": "mapred",
+ "java_home": "/usr/jdk64/jdk1.7.0_45",
+ "java_version": "8",
+ "mysql_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//mysql-connector-java.jar",
+ "custom_mysql_jdbc_name" : "mysql-connector-java.jar",
+ "custom_oracle_jdbc_name" : "oracle-jdbc-driver.jar",
+ "custom_postgres_jdbc_name" : "test-postgres-jdbc.jar"
+ },
+ "commandType": "EXECUTION_COMMAND",
+ "roleParams": {},
+ "serviceName": "HIVE",
+ "role": "HIVE_SERVER",
+ "commandParams": {
+ "command_timeout": "600",
+ "service_package_folder": "YARN",
+ "script_type": "PYTHON",
+ "script": "scripts/yarn_client.py",
+ "excluded_hosts": "host1",
+ "mark_draining_only" : "false",
+ "update_files_only" : "false"
+ },
+ "taskId": 186,
+ "public_hostname": "c6401.ambari.apache.org",
+ "configurations": {
+ "sqoop-site": {
+ "atlas.cluster.name": "c1",
+ "sqoop.job.data.publish.class": "org.apache.atlas.sqoop.hook.SqoopHook"
+ },
+ "mapred-site": {
+ "mapreduce.jobhistory.address": "c6402.ambari.apache.org:10020",
+ "mapreduce.jobhistory.webapp.spnego-keytab-file": "/etc/security/keytabs/spnego.service.keytab",
+ "mapreduce.reduce.input.buffer.percent": "0.0",
+ "mapreduce.output.fileoutputformat.compress": "false",
+ "mapreduce.framework.name": "yarn",
+ "mapreduce.map.speculative": "false",
+ "mapreduce.reduce.shuffle.merge.percent": "0.66",
+ "yarn.app.mapreduce.am.resource.mb": "683",
+ "mapreduce.map.java.opts": "-Xmx273m",
+ "mapreduce.cluster.administrators": " hadoop",
+ "mapreduce.application.classpath": "$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/*,$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/lib/*",
+ "mapreduce.job.reduce.slowstart.completedmaps": "0.05",
+ "mapreduce.output.fileoutputformat.compress.type": "BLOCK",
+ "mapreduce.reduce.speculative": "false",
+ "mapreduce.reduce.java.opts": "-Xmx546m",
+ "mapreduce.am.max-attempts": "2",
+ "yarn.app.mapreduce.am.admin-command-opts": "-Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN",
+ "mapreduce.reduce.log.level": "INFO",
+ "mapreduce.map.sort.spill.percent": "0.7",
+ "mapreduce.task.io.sort.mb": "136",
+ "mapreduce.task.timeout": "300000",
+ "mapreduce.map.memory.mb": "341",
+ "mapreduce.task.io.sort.factor": "100",
+ "mapreduce.jobhistory.intermediate-done-dir": "/mr-history/tmp",
+ "mapreduce.reduce.memory.mb": "683",
+ "mapreduce.jobhistory.principal": "jhs/_HOST@EXAMPLE.COM",
+ "yarn.app.mapreduce.am.log.level": "INFO",
+ "mapreduce.map.log.level": "INFO",
+ "mapreduce.shuffle.port": "13562",
+ "mapreduce.admin.user.env": "LD_LIBRARY_PATH=/usr/lib/hadoop/lib/native:/usr/lib/hadoop/lib/native/`$JAVA_HOME/bin/java -d32 -version &> /dev/null;if [ $? -eq 0 ]; then echo Linux-i386-32; else echo Linux-amd64-64;fi`",
+ "mapreduce.map.output.compress": "false",
+ "yarn.app.mapreduce.am.staging-dir": "/user",
+ "mapreduce.reduce.shuffle.parallelcopies": "30",
+ "mapreduce.reduce.shuffle.input.buffer.percent": "0.7",
+ "mapreduce.jobhistory.webapp.address": "c6402.ambari.apache.org:19888",
+ "mapreduce.jobhistory.keytab": "/etc/security/keytabs/jhs.service.keytab",
+ "mapreduce.jobhistory.done-dir": "/mr-history/done",
+ "mapreduce.admin.reduce.child.java.opts": "-Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN",
+ "mapreduce.jobhistory.webapp.spnego-principal": "HTTP/_HOST@EXAMPLE.COM",
+ "yarn.app.mapreduce.am.command-opts": "-Xmx546m",
+ "mapreduce.admin.map.child.java.opts": "-Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN"
+ },
+ "oozie-site": {
+ "oozie.service.PurgeService.purge.interval": "3600",
+ "oozie.service.CallableQueueService.queue.size": "1000",
+ "oozie.service.SchemaService.wf.ext.schemas": "shell-action-0.1.xsd,email-action-0.1.xsd,hive-action-0.2.xsd,sqoop-action-0.2.xsd,ssh-action-0.1.xsd,distcp-action-0.1.xsd,shell-action-0.2.xsd,oozie-sla-0.1.xsd,oozie-sla-0.2.xsd,hive-action-0.3.xsd",
+ "oozie.service.JPAService.jdbc.url": "jdbc:derby:${oozie.data.dir}/${oozie.db.schema.name}-db;create=true",
+ "oozie.service.HadoopAccessorService.nameNode.whitelist": " ",
+ "oozie.service.JPAService.jdbc.driver": "org.apache.derby.jdbc.EmbeddedDriver",
+ "local.realm": "EXAMPLE.COM",
+ "use.system.libpath.for.mapreduce.and.pig.jobs": "false",
+ "oozie.service.HadoopAccessorService.kerberos.enabled": "true",
+ "oozie.db.schema.name": "oozie",
+ "oozie.credentials.credentialclasses": "hcat=org.apache.oozie.action.hadoop.HCatCredentials",
+ "oozie.service.JPAService.create.db.schema": "false",
+ "oozie.authentication.kerberos.name.rules": "RULE:[2:$1@$0](rm@.*EXAMPLE.COM)s/.*/yarn/\nRULE:[2:$1@$0](nm@.*EXAMPLE.COM)s/.*/yarn/\nRULE:[2:$1@$0](nn@.*EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](dn@.*EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](hbase@.*EXAMPLE.COM)s/.*/hbase/\nRULE:[2:$1@$0](hbase@.*EXAMPLE.COM)s/.*/hbase/\nDEFAULT",
+ "oozie.authentication.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab",
+ "oozie.service.ActionService.executor.ext.classes": "\n org.apache.oozie.action.email.EmailActionExecutor,\n org.apache.oozie.action.hadoop.HiveActionExecutor,\n org.apache.oozie.action.hadoop.ShellActionExecutor,\n org.apache.oozie.action.hadoop.SqoopActionExecutor,\n org.apache.oozie.action.hadoop.DistcpActionExecutor",
+ "oozie.service.HadoopAccessorService.kerberos.principal": "oozie/c6402.ambari.apache.org@EXAMPLE.COM",
+ "oozie.service.AuthorizationService.authorization.enabled": "true",
+ "oozie.base.url": "http://c6402.ambari.apache.org:11000/oozie",
+ "oozie.service.JPAService.jdbc.password": "asd",
+ "oozie.service.coord.normal.default.timeout": "120",
+ "oozie.service.AuthorizationService.security.enabled": "true",
+ "oozie.service.JPAService.pool.max.active.conn": "10",
+ "oozie.service.PurgeService.older.than": "30",
+ "oozie.service.coord.push.check.requeue.interval": "30000",
+ "oozie.service.HadoopAccessorService.hadoop.configurations": "*=/etc/hadoop/conf",
+ "oozie.service.HadoopAccessorService.jobTracker.whitelist": " ",
+ "oozie.service.CallableQueueService.callable.concurrency": "3",
+ "oozie.service.JPAService.jdbc.username": "oozie",
+ "oozie.service.CallableQueueService.threads": "10",
+ "oozie.services.ext": "org.apache.oozie.service.PartitionDependencyManagerService,org.apache.oozie.service.HCatAccessorService",
+ "oozie.systemmode": "NORMAL",
+ "oozie.service.HadoopAccessorService.keytab.file": "/etc/security/keytabs/oozie.service.keytab",
+ "oozie.service.WorkflowAppService.system.libpath": "/user/${user.name}/share/lib",
+ "oozie.services": "\n org.apache.oozie.service.SchedulerService,\n org.apache.oozie.service.InstrumentationService,\n org.apache.oozie.service.CallableQueueService,\n org.apache.oozie.service.UUIDService,\n org.apache.oozie.service.ELService,\n org.apache.oozie.service.AuthorizationService,\n org.apache.oozie.service.UserGroupInformationService,\n org.apache.oozie.service.HadoopAccessorService,\n org.apache.oozie.service.URIHandlerService,\n org.apache.oozie.service.MemoryLocksService,\n org.apache.oozie.service.DagXLogInfoService,\n org.apache.oozie.service.SchemaService,\n org.apache.oozie.service.LiteWorkflowAppService,\n org.apache.oozie.service.JPAService,\n org.apache.oozie.service.StoreService,\n org.apache.oozie.service.CoordinatorStoreService,\n org.apache.oozie.service.SLAStoreService,\n org.apache.oozie.service.DBLiteWorkflowStoreService,\n
org.apache.oozie.service.CallbackService,\n org.apache.oozie.service.ActionService,\n org.apache.oozie.service.ActionCheckerService,\n org.apache.oozie.service.RecoveryService,\n org.apache.oozie.service.PurgeService,\n org.apache.oozie.service.CoordinatorEngineService,\n org.apache.oozie.service.BundleEngineService,\n org.apache.oozie.service.DagEngineService,\n org.apache.oozie.service.CoordMaterializeTriggerService,\n org.apache.oozie.service.StatusTransitService,\n org.apache.oozie.service.PauseTransitService,\n org.apache.oozie.service.GroupsService,\n org.apache.oozie.service.ProxyUserService",
+ "oozie.service.URIHandlerService.uri.handlers": "org.apache.oozie.dependency.FSURIHandler,org.apache.oozie.dependency.HCatURIHandler",
+ "oozie.authentication.type": "kerberos",
+ "oozie.authentication.kerberos.principal": "HTTP/c6402.ambari.apache.org@EXAMPLE.COM",
+ "oozie.system.id": "oozie-${user.name}",
+ "oozie.https.port": "11443",
+ "oozie.https.keystore.file": "/home/oozie/oozie.keystore",
+ "oozie.https.keystore.pass": "password"
+ },
+ "storm-site": {
+ "topology.tuple.serializer": "backtype.storm.serialization.types.ListDelegateSerializer",
+ "topology.workers": "1",
+ "drpc.worker.threads": "64",
+ "storm.zookeeper.servers": "['c6401.ambari.apache.org','c6402.ambari.apache.org']",
+ "supervisor.heartbeat.frequency.secs": "5",
+ "topology.executor.send.buffer.size": "1024",
+ "drpc.childopts": "-Xmx768m",
+ "nimbus.thrift.port": "6627",
+ "storm.zookeeper.retry.intervalceiling.millis": "30000",
+ "storm.local.dir": "/hadoop/storm",
+ "topology.receiver.buffer.size": "8",
+ "storm.messaging.netty.client_worker_threads": "1",
+ "transactional.zookeeper.root": "/transactional",
+ "drpc.request.timeout.secs": "600",
+ "topology.skip.missing.kryo.registrations": "false",
+ "worker.heartbeat.frequency.secs": "1",
+ "zmq.hwm": "0",
+ "storm.zookeeper.connection.timeout": "15000",
+ "topology.max.error.report.per.interval": "5",
+ "storm.messaging.netty.server_worker_threads": "1",
+ "supervisor.worker.start.timeout.secs": "120",
+ "zmq.threads": "1",
+ "topology.acker.executors": "null",
+ "storm.local.mode.zmq": "false",
+ "topology.max.task.parallelism": "null",
+ "storm.zookeeper.port": "2181",
+ "nimbus.childopts": "-Xmx1024m",
+ "worker.childopts": "-Xmx768m",
+ "drpc.queue.size": "128",
+ "storm.zookeeper.retry.times": "5",
+ "nimbus.monitor.freq.secs": "10",
+ "storm.cluster.mode": "distributed",
+ "dev.zookeeper.path": "/tmp/dev-storm-zookeeper",
+ "drpc.invocations.port": "3773",
+ "storm.zookeeper.root": "/storm",
+ "logviewer.childopts": "-Xmx128m",
+ "transactional.zookeeper.port": "null",
+ "topology.worker.childopts": "null",
+ "topology.max.spout.pending": "null",
+ "nimbus.cleanup.inbox.freq.secs": "600",
+ "storm.messaging.netty.min_wait_ms": "100",
+ "nimbus.task.timeout.secs": "30",
+ "nimbus.thrift.max_buffer_size": "1048576",
+ "topology.sleep.spout.wait.strategy.time.ms": "1",
+ "topology.optimize": "true",
+ "nimbus.reassign": "true",
+ "storm.messaging.transport": "backtype.storm.messaging.netty.Context",
+ "logviewer.appender.name": "A1",
+ "nimbus.host": "c6401.ambari.apache.org",
+ "ui.port": "8744",
+ "supervisor.slots.ports": "[6700, 6701]",
+ "nimbus.file.copy.expiration.secs": "600",
+ "supervisor.monitor.frequency.secs": "3",
+ "ui.childopts": "-Xmx768m",
+ "transactional.zookeeper.servers": "null",
+ "zmq.linger.millis": "5000",
+ "topology.error.throttle.interval.secs": "10",
+ "topology.worker.shared.thread.pool.size": "4",
+ "java.library.path": "/usr/local/lib:/opt/local/lib:/usr/lib",
+ "topology.spout.wait.strategy": "backtype.storm.spout.SleepSpoutWaitStrategy",
+ "task.heartbeat.frequency.secs": "3",
+ "topology.transfer.buffer.size": "1024",
+ "storm.zookeeper.session.timeout": "20000",
+ "topology.executor.receive.buffer.size": "1024",
+ "topology.stats.sample.rate": "0.05",
+ "topology.fall.back.on.java.serialization": "true",
+ "supervisor.childopts": "-Xmx256m",
+ "topology.enable.message.timeouts": "true",
+ "storm.messaging.netty.max_wait_ms": "1000",
+ "nimbus.topology.validator": "backtype.storm.nimbus.DefaultTopologyValidator",
+ "nimbus.supervisor.timeout.secs": "60",
+ "topology.disruptor.wait.strategy": "com.lmax.disruptor.BlockingWaitStrategy",
+ "nimbus.inbox.jar.expiration.secs": "3600",
+ "drpc.port": "3772",
+ "topology.kryo.factory": "backtype.storm.serialization.DefaultKryoFactory",
+ "storm.zookeeper.retry.interval": "1000",
+ "storm.messaging.netty.max_retries": "30",
+ "topology.tick.tuple.freq.secs": "null",
+ "supervisor.enable": "true",
+ "nimbus.task.launch.secs": "120",
+ "task.refresh.poll.secs": "10",
+ "topology.message.timeout.secs": "30",
+ "storm.messaging.netty.buffer_size": "5242880",
+ "topology.state.synchronization.timeout.secs": "60",
+ "supervisor.worker.timeout.secs": "30",
+ "topology.trident.batch.emit.interval.millis": "500",
+ "topology.builtin.metrics.bucket.size.secs": "60",
+ "storm.thrift.transport": "backtype.storm.security.auth.SimpleTransportPlugin",
+ "logviewer.port": "8000",
+ "topology.debug": "false"
+ },
+ "webhcat-site": {
+ "templeton.pig.path": "pig.tar.gz/pig/bin/pig",
+ "templeton.hive.properties": "hive.metastore.local=false,hive.metastore.uris=thrift://c6402.ambari.apache.org:9083,hive.metastore.sasl.enabled=true,hive.metastore.execute.setugi=true,hive.metastore.warehouse.dir=/apps/hive/warehouse,hive.exec.mode.local.auto=false,hive.metastore.kerberos.principal=hive/c6402.ambari.apache.org@EXAMPLE.COM",
+ "templeton.override.enabled": "false",
+ "templeton.jar": "/usr/lib/hcatalog/share/webhcat/svr/webhcat.jar",
+ "templeton.kerberos.secret": "secret",
+ "templeton.kerberos.principal": "HTTP/c6402.ambari.apache.org@EXAMPLE.COM",
+ "templeton.zookeeper.hosts": "c6401.ambari.apache.org:2181,c6402.ambari.apache.org:2181",
+ "templeton.exec.timeout": "60000",
+ "templeton.storage.class": "org.apache.hive.hcatalog.templeton.tool.ZooKeeperStorage",
+ "templeton.hive.archive": "hdfs:///apps/webhcat/hive.tar.gz",
+ "templeton.streaming.jar": "hdfs:///apps/webhcat/hadoop-streaming.jar",
+ "templeton.port": "50111",
+ "templeton.hadoop.conf.dir": "/etc/hadoop/conf",
+ "templeton.libjars": "/usr/lib/zookeeper/zookeeper.jar",
+ "templeton.hadoop": "/usr/bin/hadoop",
+ "templeton.hive.path": "hive.tar.gz/hive/bin/hive",
+ "templeton.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab",
+ "templeton.hcat": "/usr/bin/hcat",
+ "templeton.pig.archive": "hdfs:///apps/webhcat/pig.tar.gz",
+ "templeton.sqoop.archive": "hdfs:///apps/webhcat/sqoop.tar.gz"
+ },
+ "capacity-scheduler": {
+ "yarn.scheduler.capacity.node-locality-delay": "40",
+ "yarn.scheduler.capacity.root.capacity": "100",
+ "yarn.scheduler.capacity.root.acl_administer_queue": "*",
+ "yarn.scheduler.capacity.root.queues": "default",
+ "yarn.scheduler.capacity.maximum-applications": "10000",
+ "yarn.scheduler.capacity.root.default.user-limit-factor": "1",
+ "yarn.scheduler.capacity.root.default.maximum-capacity": "100",
+ "yarn.scheduler.capacity.root.default.state": "RUNNING",
+ "yarn.scheduler.capacity.maximum-am-resource-percent": "0.2",
+ "yarn.scheduler.capacity.root.default.acl_administer_jobs": "*",
+ "yarn.scheduler.capacity.root.default.capacity": "100",
+ "yarn.scheduler.capacity.root.default.acl_submit_applications": "*"
+ },
+ "hdfs-site": {
+ "dfs.namenode.avoid.write.stale.datanode": "true",
+ "dfs.namenode.kerberos.internal.spnego.principal": "${dfs.web.authentication.kerberos.principal}",
+ "dfs.datanode.kerberos.principal": "dn/_HOST@EXAMPLE.COM",
+ "dfs.block.access.token.enable": "true",
+ "dfs.support.append": "true",
+ "dfs.datanode.address": "0.0.0.0:${ambari.dfs.datanode.port}",
+ "dfs.cluster.administrators": " hdfs",
+ "dfs.journalnode.kerberos.principal": "jn/_HOST@EXAMPLE.COM",
+ "ambari.dfs.datanode.http.port": "1022",
+ "dfs.datanode.balance.bandwidthPerSec": "6250000",
+ "dfs.namenode.safemode.threshold-pct": "1.0f",
+ "dfs.namenode.checkpoint.edits.dir": "${dfs.namenode.checkpoint.dir}",
+ "dfs.permissions.enabled": "true",
+ "fs.checkpoint.size": "67108864",
+ "dfs.namenode.checkpoint.dir": "/hadoop/hdfs/namesecondary",
+ "dfs.https.port": "50470",
+ "dfs.namenode.https-address": "c6401.ambari.apache.org:50470",
+ "dfs.blockreport.initialDelay": "120",
+ "dfs.journalnode.edits.dir": "/grid/0/hdfs/journal",
+ "dfs.blocksize": "134217728",
+ "dfs.datanode.max.transfer.threads": "1024",
+ "dfs.heartbeat.interval": "3",
+ "dfs.replication": "3",
+ "dfs.namenode.handler.count": "100",
+ "dfs.web.authentication.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab",
+ "fs.permissions.umask-mode": "022",
+ "dfs.namenode.stale.datanode.interval": "30000",
+ "dfs.datanode.ipc.address": "0.0.0.0:8010",
+ "dfs.namenode.name.dir": "/hadoop/hdfs/namenode",
+ "dfs.secondary.namenode.kerberos.internal.spnego.principal": "${dfs.web.authentication.kerberos.principal}",
+ "dfs.datanode.data.dir": "/hadoop/hdfs/data",
+ "dfs.namenode.http-address": "c6401.ambari.apache.org:50070",
+ "dfs.webhdfs.enabled": "true",
+ "dfs.datanode.failed.volumes.tolerated": "0",
+ "dfs.namenode.accesstime.precision": "0",
+ "dfs.namenode.kerberos.principal": "nn/_HOST@EXAMPLE.COM",
+ "ambari.dfs.datanode.port": "1019",
+ "dfs.namenode.avoid.read.stale.datanode": "true",
+ "dfs.namenode.secondary.http-address": "c6402.ambari.apache.org:50090",
+ "dfs.client.read.shortcircuit": "true",
+ "dfs.journalnode.keytab.file": "/etc/security/keytabs/jn.service.keytab",
+ "dfs.datanode.http.address": "0.0.0.0:1022",
+ "nfs.file.dump.dir": "/tmp/.hdfs-nfs",
+ "dfs.datanode.du.reserved": "1073741824",
+ "dfs.client.read.shortcircuit.streams.cache.size": "4096",
+ "dfs.secondary.namenode.keytab.file": "/etc/security/keytabs/nn.service.keytab",
+ "dfs.web.authentication.kerberos.principal": "HTTP/_HOST@EXAMPLE.COM",
+ "dfs.secondary.namenode.kerberos.principal": "nn/_HOST@EXAMPLE.COM",
+ "dfs.datanode.keytab.file": "/etc/security/keytabs/dn.service.keytab",
+ "dfs.namenode.keytab.file": "/etc/security/keytabs/nn.service.keytab",
+ "dfs.permissions.superusergroup": "hdfs",
+ "dfs.journalnode.http-address": "0.0.0.0:8480",
+ "dfs.journalnode.kerberos.internal.spnego.principal": "HTTP/_HOST@EXAMPLE.COM",
+ "dfs.domain.socket.path": "/var/lib/hadoop-hdfs/dn_socket",
+ "dfs.namenode.write.stale.datanode.ratio": "1.0f",
+ "dfs.hosts.exclude": "/etc/hadoop/conf/dfs.exclude",
+ "dfs.datanode.data.dir.perm": "750",
+ "dfs.namenode.name.dir.restore": "true",
+ "dfs.replication.max": "50",
+ "dfs.namenode.checkpoint.period": "21600"
+ },
+ "hbase-site": {
+ "hbase.hstore.flush.retries.number": "120",
+ "hbase.client.keyvalue.maxsize": "10485760",
+ "hbase.regionserver.keytab.file": "/etc/security/keytabs/hbase.service.keytab",
+ "phoenix.queryserver.keytab.file": "/etc/security/keytabs/hbase.service.keytab",
+ "hbase.hstore.compactionThreshold": "3",
+ "hbase.bulkload.staging.dir": "/apps/hbase/staging",
+ "hbase.rootdir": "hdfs://c6401.ambari.apache.org:8020/apps/hbase/data",
+ "hbase.regionserver.handler.count": "60",
+ "hbase.regionserver.global.memstore.lowerLimit": "0.38",
+ "hbase.master.kerberos.principal": "hbase/_HOST@EXAMPLE.COM",
+ "hbase.hregion.memstore.block.multiplier": "2",
+ "hbase.hregion.memstore.flush.size": "134217728",
+ "hbase.superuser": "hbase",
+ "hbase.coprocessor.region.classes": "org.apache.hadoop.hbase.security.token.TokenProvider,org.apache.hadoop.hbase.security.access.SecureBulkLoadEndpoint,org.apache.hadoop.hbase.security.access.AccessController",
+ "hbase.zookeeper.property.clientPort": "2181",
+ "hbase.hregion.max.filesize": "10737418240",
+ "hbase.regionserver.global.memstore.upperLimit": "0.4",
+ "zookeeper.session.timeout": "30000",
+ "hbase.tmp.dir": "/hadoop/hbase",
+ "hbase.local.dir": "${hbase.tmp.dir}/local",
+ "hfile.block.cache.size": "0.40",
+ "hbase.regionserver.kerberos.principal": "hbase/_HOST@EXAMPLE.COM",
+ "phoenix.queryserver.kerberos.principal": "hbase/_HOST@EXAMPLE.COM",
+ "hbase.security.authentication": "kerberos",
+ "hbase.defaults.for.version.skip": "true",
+ "hbase.zookeeper.quorum": "c6401.ambari.apache.org,c6402.ambari.apache.org",
+ "zookeeper.znode.parent": "/hbase-secure",
+ "hbase.coprocessor.master.classes": "org.apache.hadoop.hbase.security.access.AccessController",
+ "hbase.hstore.blockingStoreFiles": "10",
+ "hbase.master.port": "60000",
+ "hbase.hregion.majorcompaction": "86400000",
+ "hbase.security.authorization": "true",
+ "hbase.master.keytab.file": "/etc/security/keytabs/hbase.service.keytab",
+ "hbase.cluster.distributed": "true",
+ "hbase.hregion.memstore.mslab.enabled": "true",
+ "hbase.client.scanner.caching": "100",
+ "hbase.zookeeper.useMulti": "true"
+ },
+ "core-site": {
+ "gluster.daemon.user": "null",
+ "hadoop.security.authorization": "true",
+ "hadoop.proxyuser.hive.hosts": "c6402.ambari.apache.org",
+ "hadoop.proxyuser.hcat.hosts": "c6402.ambari.apache.org",
+ "hadoop.proxyuser.hcat.groups": "users",
+ "fs.trash.interval": "360",
+ "hadoop.proxyuser.hive.groups": "users",
+ "io.compression.codecs": "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec",
+ "hadoop.proxyuser.HTTP.hosts": "c6402.ambari.apache.org",
+ "ipc.client.idlethreshold": "8000",
+ "io.file.buffer.size": "131072",
+ "io.serializations": "org.apache.hadoop.io.serializer.WritableSerialization",
+ "hadoop.security.authentication": "kerberos",
+ "mapreduce.jobtracker.webinterface.trusted": "false",
+ "hadoop.proxyuser.HTTP.groups": "users",
+ "fs.defaultFS": "hdfs://c6401.ambari.apache.org:8020",
+ "hadoop.proxyuser.oozie.groups": "users",
+ "fs.AbstractFileSystem.glusterfs.impl": "null",
+ "ipc.client.connect.max.retries": "50",
+ "hadoop.security.auth_to_local": "RULE:[2:$1@$0](rm@.*EXAMPLE.COM)s/.*/yarn/\nRULE:[2:$1@$0](nm@.*EXAMPLE.COM)s/.*/yarn/\nRULE:[2:$1@$0](nn@.*EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](dn@.*EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](hbase@.*EXAMPLE.COM)s/.*/hbase/\nRULE:[2:$1@$0](hbase@.*EXAMPLE.COM)s/.*/hbase/\nRULE:[2:$1@$0](oozie@.*EXAMPLE.COM)s/.*/oozie/\nRULE:[2:$1@$0](jhs@.*EXAMPLE.COM)s/.*/mapred/\nRULE:[2:$1@$0](jn/_HOST@.*EXAMPLE.COM)s/.*/hdfs/\nDEFAULT",
+ "hadoop.proxyuser.oozie.hosts": "c6402.ambari.apache.org",
+ "ipc.client.connection.maxidletime": "30000"
+ },
+ "ranger-hdfs-plugin-properties" : {
+ "ranger-hdfs-plugin-enabled":"No"
+ },
+ "ranger-hive-plugin-properties": {
+ "XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS": "900",
+ "XAAUDIT.HDFS.DESTINATION_DIRECTORY": "hdfs://__REPLACE__NAME_NODE_HOST:8020/ranger/audit/%app-type%/%time:yyyyMMdd%",
+ "XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit",
+ "common.name.for.certificate": "-",
+ "XAAUDIT.HDFS.IS_ENABLED": "false",
+ "XAAUDIT.HDFS.LOCAL_BUFFER_FILE": "%time:yyyyMMdd-HHmm.ss%.log",
+ "SSL_KEYSTORE_PASSWORD": "myKeyFilePassword",
+ "XAAUDIT.DB.IS_ENABLED": "true",
+ "XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS": "600",
+ "XAAUDIT.SOLR.SOLR_URL": "http://localhost:6083/solr/ranger_audits",
+ "XAAUDIT.SOLR.IS_ENABLED": "false",
+ "SSL_KEYSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-keystore.jks",
+ "XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS": "60",
+ "policy_user": "ambari-qa",
+ "UPDATE_XAPOLICIES_ON_GRANT_REVOKE": "true",
+ "XAAUDIT.HDFS.DESTINTATION_FILE": "%hostname%-audit.log",
+ "XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS": "86400",
+ "XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT": "10",
+ "SSL_TRUSTSTORE_PASSWORD": "changeit",
+ "XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit/archive",
+ "ranger-hive-plugin-enabled": "No",
+ "jdbc.driverClassName": "org.apache.hive.jdbc.HiveDriver",
+ "REPOSITORY_CONFIG_USERNAME": "hive",
+ "XAAUDIT.SOLR.MAX_FLUSH_INTERVAL_MS": "1000",
+ "XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS": "60",
+ "SSL_TRUSTSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-truststore.jks",
+ "REPOSITORY_CONFIG_PASSWORD": "hive",
+ "XAAUDIT.SOLR.MAX_QUEUE_SIZE": "1"
+ },
+ "ranger-knox-plugin-properties": {
+ "XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS": "900",
+ "KNOX_HOME": "/usr/hdp/current/knox-server",
+ "XAAUDIT.HDFS.DESTINATION_DIRECTORY": "hdfs://__REPLACE__NAME_NODE_HOST:8020/ranger/audit/%app-type%/%time:yyyyMMdd%",
+ "XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit",
+ "common.name.for.certificate": "-",
+ "XAAUDIT.HDFS.IS_ENABLED": "false",
+ "XAAUDIT.HDFS.LOCAL_BUFFER_FILE": "%time:yyyyMMdd-HHmm.ss%.log",
+ "SSL_KEYSTORE_PASSWORD": "myKeyFilePassword",
+ "XAAUDIT.DB.IS_ENABLED": "true",
+ "XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS": "600",
+ "XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS": "60",
+ "XAAUDIT.SOLR.SOLR_URL": "http://localhost:6083/solr/ranger_audits",
+ "XAAUDIT.SOLR.IS_ENABLED": "false",
+ "SSL_KEYSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-keystore.jks",
+ "ranger-knox-plugin-enabled": "No",
+ "policy_user": "ambari-qa",
+ "XAAUDIT.HDFS.DESTINTATION_FILE": "%hostname%-audit.log",
+ "XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS": "86400",
+ "XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT": "10",
+ "SSL_TRUSTSTORE_PASSWORD": "changeit",
+ "XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit/archive",
+ "REPOSITORY_CONFIG_USERNAME": "admin",
+ "XAAUDIT.SOLR.MAX_FLUSH_INTERVAL_MS": "1000",
+ "XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS": "60",
+ "SSL_TRUSTSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-truststore.jks",
+ "REPOSITORY_CONFIG_PASSWORD": "admin-password",
+ "XAAUDIT.SOLR.MAX_QUEUE_SIZE": "1"
+ },
+ "hive-site": {
+ "hive.enforce.sorting": "true",
+ "javax.jdo.option.ConnectionPassword": "!`\"' 1",
+ "javax.jdo.option.ConnectionDriverName": "com.mysql.jdbc.Driver",
+ "hive.optimize.bucketmapjoin.sortedmerge": "true",
+ "hive.security.metastore.authorization.manager": "org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider",
+ "fs.file.impl.disable.cache": "true",
+ "hive.auto.convert.join.noconditionaltask": "true",
+ "hive.server2.authentication.kerberos.principal": "hive/_HOST@EXAMPLE.COM",
+ "hive.optimize.bucketmapjoin": "true",
+ "hive.map.aggr": "true",
+ "hive.optimize.index.filter": "true",
+ "hive.security.authorization.enabled": "true",
+ "hive.optimize.reducededuplication": "true",
+ "hive.metastore.kerberos.keytab.file": "/etc/security/keytabs/hive.service.keytab",
+ "hive.metastore.uris": "thrift://c6402.ambari.apache.org:9083",
+ "hive.mapjoin.bucket.cache.size": "10000",
+ "hive.auto.convert.join.noconditionaltask.size": "1000000000",
+ "hive.vectorized.execution.enabled": "false",
+ "javax.jdo.option.ConnectionUserName": "hive",
+ "hive.metastore.cache.pinobjtypes": "Table,Database,Type,FieldSchema,Order",
+ "hive.server2.authentication": "KERBEROS",
+ "hive.metastore.sasl.enabled": "true",
+ "hive.metastore.warehouse.dir": "/apps/hive/warehouse",
+ "hive.metastore.client.socket.timeout": "60",
+ "hive.metastore.kerberos.principal": "hive/_HOST@EXAMPLE.COM",
+ "hive.optimize.reducededuplication.min.reducer": "1",
+ "hive.auto.convert.join": "true",
+ "hive.enforce.bucketing": "true",
+ "hive.mapred.reduce.tasks.speculative.execution": "false",
+ "hive.security.authenticator.manager": "org.apache.hadoop.hive.ql.security.ProxyUserAuthenticator",
+ "javax.jdo.option.ConnectionURL": "jdbc:mysql://c6402.ambari.apache.org/hive?createDatabaseIfNotExist=true",
+ "hive.auto.convert.sortmerge.join": "true",
+ "fs.hdfs.impl.disable.cache": "true",
+ "hive.security.authorization.manager": "org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider",
+ "ambari.hive.db.schema.name": "hive",
+ "hive.metastore.execute.setugi": "true",
+ "hive.auto.convert.sortmerge.join.noconditionaltask": "true",
+ "hive.server2.enable.doAs": "true",
+ "hive.optimize.mapjoin.mapreduce": "true",
+ "hive.server2.transport.mode": "binary",
+ "hive.server2.authentication.kerberos.keytab": "/etc/security/keytabs/hive.service.keytab",
+ "hive.exec.scratchdir" : "/custompath/tmp/hive"
+ },
+ "hive-interactive-site": {
+ "hive.enforce.sorting": "true",
+ "javax.jdo.option.ConnectionPassword": "!`\"' 1",
+ "javax.jdo.option.ConnectionDriverName": "com.mysql.jdbc.Driver",
+ "hive.optimize.bucketmapjoin.sortedmerge": "true",
+ "hive.security.metastore.authorization.manager": "org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider",
+ "fs.file.impl.disable.cache": "true",
+ "hive.auto.convert.join.noconditionaltask": "true",
+ "hive.server2.authentication.kerberos.principal": "hive/_HOST@EXAMPLE.COM",
+ "hive.optimize.bucketmapjoin": "true",
+ "hive.map.aggr": "true",
+ "hive.optimize.index.filter": "true",
+ "hive.security.authorization.enabled": "true",
+ "hive.optimize.reducededuplication": "true",
+ "hive.metastore.kerberos.keytab.file": "/etc/security/keytabs/hive.service.keytab",
+ "hive.metastore.uris": "thrift://c6402.ambari.apache.org:9084",
+ "hive.mapjoin.bucket.cache.size": "10000",
+ "hive.auto.convert.join.noconditionaltask.size": "1000000000",
+ "hive.vectorized.execution.enabled": "false",
+ "javax.jdo.option.ConnectionUserName": "hive",
+ "hive.metastore.cache.pinobjtypes": "Table,Database,Type,FieldSchema,Order",
+ "hive.server2.authentication": "KERBEROS",
+ "hive.metastore.sasl.enabled": "true",
+ "hive.metastore.warehouse.dir": "/apps/hive/warehouse",
+ "hive.metastore.client.socket.timeout": "60",
+ "hive.metastore.kerberos.principal": "hive/_HOST@EXAMPLE.COM",
+ "hive.optimize.reducededuplication.min.reducer": "1",
+ "hive.auto.convert.join": "true",
+ "hive.enforce.bucketing": "true",
+ "hive.mapred.reduce.tasks.speculative.execution": "false",
+ "hive.security.authenticator.manager": "org.apache.hadoop.hive.ql.security.ProxyUserAuthenticator",
+ "javax.jdo.option.ConnectionURL": "jdbc:mysql://c6402.ambari.apache.org/hive?createDatabaseIfNotExist=true",
+ "hive.auto.convert.sortmerge.join": "true",
+ "fs.hdfs.impl.disable.cache": "true",
+ "hive.security.authorization.manager": "org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider",
+ "ambari.hive.db.schema.name": "hive",
+ "hive.metastore.execute.setugi": "true",
+ "hive.auto.convert.sortmerge.join.noconditionaltask": "true",
+ "hive.server2.enable.doAs": "true",
+ "hive.optimize.mapjoin.mapreduce": "true",
+ "hive.server2.transport.mode": "binary",
+ "hive.server2.authentication.kerberos.keytab": "/etc/security/keytabs/hive.service.keytab",
+ "hive.exec.scratchdir" : "/custompath/tmp/hive"
+ },
+ "hiveserver2-site": {
+ "hive.metastore.metrics.enabled": "true",
+ "hive.service.metrics.reporter": "HADOOP2"
+ },
+ "yarn-site": {
+ "yarn.nodemanager.disk-health-checker.min-healthy-disks": "0.25",
+ "yarn.nodemanager.container-executor.class": "org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor",
+ "yarn.resourcemanager.principal": "rm/_HOST@EXAMPLE.COM",
+ "yarn.nodemanager.webapp.spnego-keytab-file": "/etc/security/keytabs/spnego.service.keytab",
+ "yarn.resourcemanager.resource-tracker.address": "c6402.ambari.apache.org:8025",
+ "yarn.nodemanager.remote-app-log-dir-suffix": "logs",
+ "yarn.resourcemanager.hostname": "c6402.ambari.apache.org",
+ "yarn.nodemanager.health-checker.script.timeout-ms": "60000",
+ "yarn.resourcemanager.scheduler.class": "org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler",
+ "yarn.resourcemanager.nodes.exclude-path": "/etc/hadoop/conf/yarn.exclude",
+ "yarn.resourcemanager.nodes.include-path": "/etc/hadoop/conf_for_include/yarn.include",
+ "yarn.scheduler.minimum-allocation-mb": "683",
+ "yarn.resourcemanager.address": "c6402.ambari.apache.org:8050",
+ "yarn.resourcemanager.scheduler.address": "c6402.ambari.apache.org:8030",
+ "yarn.log-aggregation.retain-seconds": "2592000",
+ "yarn.scheduler.maximum-allocation-mb": "2048",
+ "yarn.log-aggregation-enable": "true",
+ "yarn.nodemanager.address": "0.0.0.0:45454",
+ "yarn.nodemanager.container-monitor.interval-ms": "3000",
+ "yarn.nodemanager.principal": "nm/_HOST@EXAMPLE.COM",
+ "yarn.nodemanager.local-dirs": "/hadoop/yarn/local",
+ "yarn.nodemanager.log-aggregation.compression-type": "gz",
+ "yarn.nodemanager.log.retain-seconds": "604800",
+ "yarn.nodemanager.delete.debug-delay-sec": "0",
+ "yarn.nodemanager.keytab": "/etc/security/keytabs/nm.service.keytab",
+ "yarn.nodemanager.log-dirs": "/hadoop/yarn/log",
+ "yarn.nodemanager.health-checker.interval-ms": "135000",
+ "yarn.resourcemanager.webapp.spnego-principal": "HTTP/_HOST@EXAMPLE.COM",
+ "yarn.resourcemanager.am.max-attempts": "2",
+ "yarn.nodemanager.remote-app-log-dir": "/app-logs",
+ "yarn.nodemanager.admin-env": "MALLOC_ARENA_MAX=$MALLOC_ARENA_MAX",
+ "yarn.nodemanager.aux-services": "mapreduce_shuffle",
+ "yarn.nodemanager.vmem-check-enabled": "false",
+ "yarn.nodemanager.vmem-pmem-ratio": "2.1",
+ "yarn.admin.acl": "*",
+ "yarn.resourcemanager.webapp.address": "c6402.ambari.apache.org:8088",
+ "yarn.nodemanager.resource.memory-mb": "2048",
+ "yarn.nodemanager.linux-container-executor.group": "hadoop",
+ "yarn.acl.enable": "true",
+ "manage.include.files": "true",
+ "yarn.log.server.url": "http://c6402.ambari.apache.org:19888/jobhistory/logs",
+ "yarn.nodemanager.webapp.spnego-principal": "HTTP/_HOST@EXAMPLE.COM",
+ "yarn.application.classpath": "/etc/hadoop/conf,/usr/lib/hadoop/*,/usr/lib/hadoop/lib/*,/usr/lib/hadoop-hdfs/*,/usr/lib/hadoop-hdfs/lib/*,/usr/lib/hadoop-yarn/*,/usr/lib/hadoop-yarn/lib/*,/usr/lib/hadoop-mapreduce/*,/usr/lib/hadoop-mapreduce/lib/*",
+ "yarn.resourcemanager.admin.address": "c6402.ambari.apache.org:8141",
+ "yarn.resourcemanager.webapp.spnego-keytab-file": "/etc/security/keytabs/spnego.service.keytab",
+ "yarn.resourcemanager.keytab": "/etc/security/keytabs/rm.service.keytab",
+ "yarn.nodemanager.aux-services.mapreduce_shuffle.class": "org.apache.hadoop.mapred.ShuffleHandler",
+ "yarn.timeline-service.leveldb-timeline-store.path": "/var/log/hadoop-yarn/timeline",
+ "yarn.timeline-service.principal": "yarn/_HOST@EXAMPLE.COM",
+ "yarn.timeline-service.keytab" : "/etc/security/keytabs/yarn.service.keytab",
+ "yarn.http.policy": "HTTP_ONLY",
+ "yarn.resourcemanager.webapp.https.address": "c6402.ambari.apache.org:8090"
+ },
+ "ranger-yarn-plugin-properties" : {
+ "ranger-yarn-plugin-enabled":"No"
+ },
+ "yarn-env": {
+ "yarn_pid_dir_prefix": "/var/run/hadoop-yarn",
+ "apptimelineserver_heapsize": "1024",
+ "nodemanager_heapsize": "1024",
+ "content": "\nexport HADOOP_YARN_HOME={{hadoop_yarn_home}}\nexport YARN_LOG_DIR={{yarn_log_dir_prefix}}/$USER\nexport YARN_PID_DIR={{yarn_pid_dir_prefix}}/$USER\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\nexport JAVA_HOME={{java64_home}}\n\n# User for YARN daemons\nexport HADOOP_YARN_USER=${HADOOP_YARN_USER:-yarn}\n\n# resolve links - $0 may be a softlink\nexport YARN_CONF_DIR=\"${YARN_CONF_DIR:-$HADOOP_YARN_HOME/conf}\"\n\n# some Java parameters\n# export JAVA_HOME=/home/y/libexec/jdk1.6.0/\nif [ \"$JAVA_HOME\" != \"\" ]; then\n #echo \"run java in $JAVA_HOME\"\n JAVA_HOME=$JAVA_HOME\nfi\n\nif [ \"$JAVA_HOME\" = \"\" ]; then\n echo \"Error: JAVA_HOME is not set.\"\n exit 1\nfi\n\nJAVA=$JAVA_HOME/bin/java\nJAVA_HEAP_MAX=-Xmx1000m\n\n# For setting YARN specific HEAP sizes please use this\n# Parameter and set appropriately\nYARN_HEAPSIZE={{yarn_heapsize}}\n\n# check envvars which might override default args\nif [ \"$YARN_HEAPSIZE\" != \"\" ]; then\n JAVA_HEAP_MAX=\"-
Xmx\"\"$YARN_HEAPSIZE\"\"m\"\nfi\n\n# Resource Manager specific parameters\n\n# Specify the max Heapsize for the ResourceManager using a numerical value\n# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set\n# the value to 1000.\n# This value will be overridden by an Xmx setting specified in either YARN_OPTS\n# and/or YARN_RESOURCEMANAGER_OPTS.\n# If not specified, the default value will be picked from either YARN_HEAPMAX\n# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.\nexport YARN_RESOURCEMANAGER_HEAPSIZE={{resourcemanager_heapsize}}\n\n# Specify the JVM options to be used when starting the ResourceManager.\n# These options will be appended to the options specified as YARN_OPTS\n# and therefore may override any similar flags set in YARN_OPTS\n#export YARN_RESOURCEMANAGER_OPTS=\n\n# Node Manager specific parameters\n\n# Specify the max Heapsize for the NodeManager using a numerical value\n# in the scale of MB. For example, to specif
y an jvm option of -Xmx1000m, set\n# the value to 1000.\n# This value will be overridden by an Xmx setting specified in either YARN_OPTS\n# and/or YARN_NODEMANAGER_OPTS.\n# If not specified, the default value will be picked from either YARN_HEAPMAX\n# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.\nexport YARN_NODEMANAGER_HEAPSIZE={{nodemanager_heapsize}}\n\n# Specify the max Heapsize for the HistoryManager using a numerical value\n# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set\n# the value to 1024.\n# This value will be overridden by an Xmx setting specified in either YARN_OPTS\n# and/or YARN_HISTORYSERVER_OPTS.\n# If not specified, the default value will be picked from either YARN_HEAPMAX\n# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.\nexport YARN_HISTORYSERVER_HEAPSIZE={{apptimelineserver_heapsize}}\n\n# Specify the JVM options to be used when starting the NodeManager.\n# These options will be appen
ded to the options specified as YARN_OPTS\n# and therefore may override any similar flags set in YARN_OPTS\n#export YARN_NODEMANAGER_OPTS=\n\n# so that filenames w/ spaces are handled correctly in loops below\nIFS=\n\n\n# default log directory and file\nif [ \"$YARN_LOG_DIR\" = \"\" ]; then\n YARN_LOG_DIR=\"$HADOOP_YARN_HOME/logs\"\nfi\nif [ \"$YARN_LOGFILE\" = \"\" ]; then\n YARN_LOGFILE='yarn.log'\nfi\n\n# default policy file for service-level authorization\nif [ \"$YARN_POLICYFILE\" = \"\" ]; then\n YARN_POLICYFILE=\"hadoop-policy.xml\"\nfi\n\n# restore ordinary behaviour\nunset IFS\n\n\nYARN_OPTS=\"$YARN_OPTS -Dhadoop.log.dir=$YARN_LOG_DIR\"\nYARN_OPTS=\"$YARN_OPTS -Dyarn.log.dir=$YARN_LOG_DIR\"\nYARN_OPTS=\"$YARN_OPTS -Dhadoop.log.file=$YARN_LOGFILE\"\nYARN_OPTS=\"$YARN_OPTS -Dyarn.log.file=$YARN_LOGFILE\"\nYARN_OPTS=\"$YARN_OPTS -Dyarn.home.dir=$YARN_COMMON_HOME\"\nYARN_OPTS=\"$YARN_OPTS -Dyarn.id.str=$YARN_IDENT_STRING\"\nYARN_OPTS=\"$YARN_OPTS -Dhadoop.root.logger=${YARN_
ROOT_LOGGER:-INFO,console}\"\nYARN_OPTS=\"$YARN_OPTS -Dyarn.root.logger=${YARN_ROOT_LOGGER:-INFO,console}\"\nif [ \"x$JAVA_LIBRARY_PATH\" != \"x\" ]; then\n YARN_OPTS=\"$YARN_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH\"\nfi\nYARN_OPTS=\"$YARN_OPTS -Dyarn.policy.file=$YARN_POLICYFILE\"",
+ "yarn_heapsize": "1024",
+ "yarn_user": "yarn",
+ "resourcemanager_heapsize": "1024",
+ "yarn_log_dir_prefix": "/var/log/hadoop-yarn",
+ "min_user_id": "1000",
+ "is_supported_yarn_ranger": "false",
+ "service_check.queue.name": "default"
+ },
+ "cluster-env": {
+ "managed_hdfs_resource_property_names": "",
+ "security_enabled": "true",
+ "ignore_groupsusers_create": "false",
+ "smokeuser": "ambari-qa",
+ "smokeuser_principal_name": "ambari-qa@EXAMPLE.COM",
+ "kerberos_domain": "EXAMPLE.COM",
+ "user_group": "hadoop",
+ "smokeuser_keytab": "/etc/security/keytabs/smokeuser.headless.keytab",
+ "kinit_path_local": "/usr/bin",
+ "manage_dirs_on_root": "true",
+ "ignore_bad_mounts": "false"
+ },
+ "hadoop-env": {
+ "hdfs_tmp_dir": "/tmp",
+ "namenode_opt_maxnewsize": "200m",
+ "hdfs_log_dir_prefix": "/var/log/hadoop",
+ "namenode_heapsize": "1024m",
+ "namenode_opt_newsize": "200m",
+ "namenode_opt_permsize" : "128m",
+ "namenode_opt_maxpermsize" : "256m",
+ "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME. All others are\n# optional. When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use. Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop}\n\n# Hadoop Configuration Directory\n#TODO: if env var set that can cause problems\nexport HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-{{hadoop_conf_dir}}}\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options. Empty by default.\nex
port HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appended to HADOOP_OPTS when specified\nexport HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}\"\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USE
R/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\nHADOOP_DATANODE_OPTS=\"-Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_DATANODE_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/
gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS\"\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER={{hdfs_user}}\n\n# Extra ssh options. Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored. $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SE
CURE_DN_USER\n\n# File naming remote slave hosts. $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from. Unset by default.\n# export HADOOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands. Unset by default. This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The s
cheduling priority for daemon processes. See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Use libraries from standard classpath\nJAVA_JDBC_LIBS=\"\"\n#Add libraries required by mysql connector\nfor jarFile in `ls /usr/share/java/*mysql* 2>/dev/null`\ndo\n JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n#Add libraries required by oracle connector\nfor jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`\ndo\n JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n#Add libraries required by nodemanager\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}\n\nif [ -d \"/usr/lib/tez\" ]; then\n export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:/usr/lib/tez/*:/usr/lib/tez/lib/*:/etc/tez/conf\nfi\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n#Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/lib/hadoop/lib/native/Linux-amd64-64",
+ "hdfs_user": "hdfs",
+ "hdfs_principal_name": "hdfs",
+ "dtnode_heapsize": "1024m",
+ "proxyuser_group": "users",
+ "hadoop_heapsize": "1024",
+ "hadoop_pid_dir_prefix": "/var/run/hadoop",
+ "hdfs_user_keytab": "/etc/security/keytabs/hdfs.headless.keytab"
+ },
+ "hive-env": {
+ "hcat_pid_dir": "/var/run/webhcat",
+ "hcat_user": "hcat",
+ "hive_ambari_database": "MySQL",
+ "hive_hostname": "abtest-3.c.pramod-thangali.internal",
+ "hive_metastore_port": "9083",
+ "webhcat_user": "hcat",
+ "content": "\n if [ \"$SERVICE\" = \"cli\" ]; then\n if [ -z \"$DEBUG\" ]; then\n export HADOOP_OPTS=\"$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseParNewGC -XX:-UseGCOverheadLimit\"\n else\n export HADOOP_OPTS=\"$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit\"\n fi\n fi\n\n# The heap size of the jvm stared by hive shell script can be controlled via:\n\nexport HADOOP_HEAPSIZE=\"{{hive_heapsize}}\"\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS\"\n\n# Larger heap size may be required when running queries over large number of files or partitions.\n# By default hive shell scripts use a heap size of 256 (MB). Larger heap size would also be\n# appropriate for hive server (hwi etc).\n\n\n# Set HADOOP_HOME to point to a specific hadoop install directory\nHADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hive Configuration Directory can
be controlled by:\nexport HIVE_CONF_DIR={{conf_dir}}\n\n# Folder containing extra ibraries required for hive compilation/execution can be controlled by:\nif [ \"${HIVE_AUX_JARS_PATH}\" != \"\" ]; then\n export HIVE_AUX_JARS_PATH=${HIVE_AUX_JARS_PATH}\nelif [ -d \"/usr/lib/hive-hcatalog/\" ]; then\n export HIVE_AUX_JARS_PATH=/usr/lib/hive-hcatalog/share/hcatalog/hive-hcatalog-core-*.jar\nelse\n export HIVE_AUX_JARS_PATH=/usr/lib/hcatalog/share/hcatalog/hcatalog-core.jar\nfi\nexport METASTORE_PORT={{hive_metastore_port}}",
+ "hive_database_name": "hive",
+ "hive_database_type": "mysql",
+ "hive_pid_dir": "/var/run/hive",
+ "hive_log_dir": "/var/log/hive",
+ "hive_user": "hive",
+ "hcat_log_dir": "/var/log/webhcat",
+ "hive_database": "New MySQL Database",
+ "hive_security_authorization": "None"
+ },
+ "hbase-env": {
+ "hbase_pid_dir": "/var/run/hbase",
+ "hbase_user": "hbase",
+ "hbase_master_heapsize": "1024m",
+ "hbase_principal_name": "hbase",
+ "content": "\n# Set environment variables here.\n\n# The java implementation to use. Java 1.6 required.\nexport JAVA_HOME={{java64_home}}\n\n# HBase Configuration directory\nexport HBASE_CONF_DIR=${HBASE_CONF_DIR:-{{hbase_conf_dir}}}\n\n# Extra Java CLASSPATH elements. Optional.\nexport HBASE_CLASSPATH=${HBASE_CLASSPATH}\n\n# The maximum amount of heap to use, in MB. Default is 1000.\n# export HBASE_HEAPSIZE=1000\n\n# Extra Java runtime options.\n# Below are what we set by default. May only work with SUN JVM.\n# For more on why as well as other possible settings,\n# see http://wiki.apache.org/hadoop/PerformanceTuning\nexport HBASE_OPTS=\"-XX:+UseConcMarkSweepGC -XX:ErrorFile={{log_dir}}/hs_err_pid%p.log\"\nexport SERVER_GC_OPTS=\"-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:{{log_dir}}/gc.log-`date +'%Y%m%d%H%M'`\"\n# Uncomment below to enable java garbage collection logging.\n# export HBASE_OPTS=\"$HBASE_OPTS -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateS
tamps -Xloggc:$HBASE_HOME/logs/gc-hbase.log\"\n\n# Uncomment and adjust to enable JMX exporting\n# See jmxremote.password and jmxremote.access in $JRE_HOME/lib/management to configure remote password access.\n# More details at: http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html\n#\n# export HBASE_JMX_BASE=\"-Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false\"\nexport HBASE_MASTER_OPTS=\"-Xmx{{master_heapsize}}\"\nexport HBASE_REGIONSERVER_OPTS=\"-Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70 -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}}\"\n# export HBASE_THRIFT_OPTS=\"$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10103\"\n# export HBASE_ZOOKEEPER_OPTS=\"$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10104\"\n\n# File naming hosts on which HRegionServers will run. $HBASE_HOME/conf/regionservers by default.\nexport HBASE_REGIONSERVERS=${HBASE_CONF_DIR}/regionservers\n\n# Extra
ssh options. Empty by default.\n# export HBASE_SSH_OPTS=\"-o ConnectTimeout=1 -o SendEnv=HBASE_CONF_DIR\"\n\n# Where log files are stored. $HBASE_HOME/logs by default.\nexport HBASE_LOG_DIR={{log_dir}}\n\n# A string representing this instance of hbase. $USER by default.\n# export HBASE_IDENT_STRING=$USER\n\n# The scheduling priority for daemon processes. See 'man nice'.\n# export HBASE_NICENESS=10\n\n# The directory where pid files are stored. /tmp by default.\nexport HBASE_PID_DIR={{pid_dir}}\n\n# Seconds to sleep between slave commands. Unset by default. This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HBASE_SLAVE_SLEEP=0.1\n\n# Tell HBase whether it should manage it's own instance of Zookeeper or not.\nexport HBASE_MANAGES_ZK=false\n\n{% if security_enabled %}\nexport HBASE_OPTS=\"$HBASE_OPTS -Djava.security.auth.login.config={{client_jaas_config_file}}\"\nexport HBASE_MASTER_OPTS=\"$HBAS
E_MASTER_OPTS -Djava.security.auth.login.config={{master_jaas_config_file}}\"\nexport HBASE_REGIONSERVER_OPTS=\"$HBASE_REGIONSERVER_OPTS -Djava.security.auth.login.config={{regionserver_jaas_config_file}}\"\n{% endif %}",
+ "hbase_regionserver_heapsize": "1024m",
+ "hbase_regionserver_xmn_max": "512",
+ "hbase_regionserver_xmn_ratio": "0.2",
+ "hbase_log_dir": "/var/log/hbase",
+ "hbase_user_keytab": "/etc/security/keytabs/hbase.headless.keytab",
+ "hbase_java_io_tmpdir" : "/tmp",
+ "hbase_regionserver_shutdown_timeout": "30"
+ },
+ "flume-env": {
+ "content": "export JAVA_HOME={{java64_home}}",
+ "flume_user": "flume"
+ },
+ "ganglia-env": {
+ "gmond_user": "nobody",
+ "ganglia_runtime_dir": "/var/run/ganglia/hdp",
+ "rrdcached_base_dir": "/var/lib/ganglia/rrds",
+ "rrdcached_flush_timeout": "7200",
+ "gmetad_user": "nobody",
+ "rrdcached_write_threads": "4",
+ "rrdcached_delay": "1800",
+ "rrdcached_timeout": "3600"
+ },
+ "zookeeper-env": {
+ "zk_user": "zookeeper",
+ "zk_log_dir": "/var/log/zookeeper",
+ "content": "\nexport JAVA_HOME={{java64_home}}\nexport ZOO_LOG_DIR={{zk_log_dir}}\nexport ZOOPIDFILE={{zk_pid_file}}\nexport SERVER_JVMFLAGS={{zk_server_heapsize}}\nexport JAVA=$JAVA_HOME/bin/java\nexport CLASSPATH=$CLASSPATH:/usr/share/zookeeper/*\n\n{% if security_enabled %}\nexport SERVER_JVMFLAGS=\"$SERVER_JVMFLAGS -Djava.security.auth.login.config={{zk_server_jaas_file}}\"\nexport CLIENT_JVMFLAGS=\"$CLIENT_JVMFLAGS -Djava.security.auth.login.config={{zk_client_jaas_file}}\"\n{% endif %}",
+ "zk_pid_dir": "/var/run/zookeeper",
+ "zookeeper_principal_name": "zookeeper/_HOST@EXAMPLE.COM",
+ "zookeeper_keytab_path": "/etc/security/keytabs/zk.service.keytab"
+ },
+ "zoo.cfg": {
+ "clientPort": "2181",
+ "syncLimit": "5",
+ "initLimit": "10",
+ "dataDir": "/hadoop/zookeeper",
+ "tickTime": "2000"
+ },
+ "mapred-env": {
+ "content": "\n# export JAVA_HOME=/home/y/libexec/jdk1.6.0/\n\nexport HADOOP_JOB_HISTORYSERVER_HEAPSIZE={{jobhistory_heapsize}}\n\nexport HADOOP_MAPRED_ROOT_LOGGER=INFO,RFA\n\n#export HADOOP_JOB_HISTORYSERVER_OPTS=\n#export HADOOP_MAPRED_LOG_DIR=\"\" # Where log files are stored. $HADOOP_MAPRED_HOME/logs by default.\n#export HADOOP_JHS_LOGGER=INFO,RFA # Hadoop JobSummary logger.\n#export HADOOP_MAPRED_PID_DIR= # The pid files are stored. /tmp by default.\n#export HADOOP_MAPRED_IDENT_STRING= #A string representing this instance of hadoop. $USER by default\n#export HADOOP_MAPRED_NICENESS= #The scheduling priority for daemons. Defaults to 0.",
+ "mapred_pid_dir_prefix": "/var/run/hadoop-mapreduce",
+ "mapred_user": "mapred",
+ "jobhistory_heapsize": "900",
+ "mapred_log_dir_prefix": "/var/log/hadoop-mapreduce"
+ },
+ "oozie-env": {
+ "oozie_derby_database": "Derby",
+ "oozie_admin_port": "11001",
+ "oozie_hostname": "abtest-3.c.pramod-thangali.internal",
+ "oozie_pid_dir": "/var/run/oozie",
+ "content": "\n#!/bin/bash\n\nif [ -d \"/usr/lib/bigtop-tomcat\" ]; then\n export OOZIE_CONFIG=${OOZIE_CONFIG:-/etc/oozie/conf}\n export CATALINA_BASE=${CATALINA_BASE:-/var/lib/oozie/oozie-server}\n export CATALINA_TMPDIR=${CATALINA_TMPDIR:-/var/tmp/oozie}\n export OOZIE_CATALINA_HOME=/usr/lib/bigtop-tomcat\nfi\n\n#Set JAVA HOME\nexport JAVA_HOME={{java_home}}\n\nexport JRE_HOME=${JAVA_HOME}\n\n# Set Oozie specific environment variables here.\n\n# Settings for the Embedded Tomcat that runs Oozie\n# Java System properties for Oozie should be specified in this variable\n#\n# export CATALINA_OPTS=\n\n# Oozie configuration file to load from Oozie configuration directory\n#\n# export OOZIE_CONFIG_FILE=oozie-site.xml\n\n# Oozie logs directory\n#\nexport OOZIE_LOG={{oozie_log_dir}}\n\n# Oozie pid directory\n#\nexport CATALINA_PID={{pid_file}}\n\n#Location of the data for oozie\nexport OOZIE_DATA={{oozie_data_dir}}\n\n# Oozie Log4J configuration file to load from Oozie configuratio
n directory\n#\n# export OOZIE_LOG4J_FILE=oozie-log4j.properties\n\n# Reload interval of the Log4J configuration file, in seconds\n#\n# export OOZIE_LOG4J_RELOAD=10\n\n# The port Oozie server runs\n#\nexport OOZIE_HTTP_PORT={{oozie_server_port}}\n\n# The admin port Oozie server runs\n#\nexport OOZIE_ADMIN_PORT={{oozie_server_admin_port}}\n\n# The host name Oozie server runs on\n#\n# export OOZIE_HTTP_HOSTNAME=`hostname -f`\n\n# The base URL for callback URLs to Oozie\n#\n# export OOZIE_BASE_URL=\"http://${OOZIE_HTTP_HOSTNAME}:${OOZIE_HTTP_PORT}/oozie\"\nexport JAVA_LIBRARY_PATH=/usr/lib/hadoop/lib/native/Linux-amd64-64",
+ "oozie_user": "oozie",
+ "oozie_admin_users": "{oozie_user}, {oozie_user}-admin",
+ "oozie_database": "New Derby Database",
+ "oozie_data_dir": "/hadoop/oozie/data",
+ "oozie_log_dir": "/var/log/oozie",
+ "oozie_keytab": "/etc/security/keytabs/oozie.service.keytab",
+ "oozie_tmp_dir": "/var/tmp/oozie"
+ },
+ "ranger-env": {
+ "xml_configurations_supported" : "false"
+ },
+
+ "webhcat-env": {
+ "content": "\n# The file containing the running pid\nPID_FILE={{pid_file}}\n\nTEMPLETON_LOG_DIR={{templeton_log_dir}}/\n\n\nWEBHCAT_LOG_DIR={{templeton_log_dir}}/\n\n# The console error log\nERROR_LOG={{templeton_log_dir}}/webhcat-console-error.log\n\n# The console log\nCONSOLE_LOG={{templeton_log_dir}}/webhcat-console.log\n\n#TEMPLETON_JAR=templeton_jar_name\n\n#HADOOP_PREFIX=hadoop_prefix\n\n#HCAT_PREFIX=hive_prefix\n\n# Set HADOOP_HOME to point to a specific hadoop install directory\nexport HADOOP_HOME=/usr/lib/hadoop"
+ },
+ "hcat-env": {
+ "content": "JAVA_HOME={{java64_home}}\n HCAT_PID_DIR={{hcat_pid_dir}}/\n HCAT_LOG_DIR={{hcat_log_dir}}/\n HCAT_CONF_DIR={{hcat_conf_dir}}\n HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n #DBROOT is the path where the connector jars are downloaded\n DBROOT={{hcat_dbroot}}\n USER={{hcat_user}}\n METASTORE_PORT={{hive_metastore_port}}"
+ },
+ "pig-env": {
+ "content": "\nJAVA_HOME={{java64_home}}\nHADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\nif [ -d \"/usr/lib/tez\" ]; then\n PIG_OPTS=\"$PIG_OPTS -Dmapreduce.framework.name=yarn\"\nfi"
+ },
+ "sqoop-env": {
+ "content": "\n# Set Hadoop-specific environment variables here.\n\n#Set path to where bin/hadoop is available\n#Set path to where bin/hadoop is available\nexport HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop}\n\n#set the path to where bin/hbase is available\nexport HBASE_HOME=${HBASE_HOME:-/usr/lib/hbase}\n\n#Set the path to where bin/hive is available\nexport HIVE_HOME=${HIVE_HOME:-/usr/lib/hive}\n\n#Set the path for where zookeper config dir is\nexport ZOOCFGDIR=${ZOOCFGDIR:-/etc/zookeeper/conf}\n\n# add libthrift in hive to sqoop class path first so hive imports work\nexport SQOOP_USER_CLASSPATH=\"`ls ${HIVE_HOME}/lib/libthrift-*.jar 2> /dev/null`:${SQOOP_USER_CLASSPATH}\"",
+ "sqoop_user": "sqoop"
+ },
+ "hdfs-log4j": {
+ "content": "log4jproperties\nline2"
+ },
+ "yarn-log4j": {
+ "content": "log4jproperties\nline2"
+ },
+ "hbase-log4j": {
+ "content": "log4jproperties\nline2"
+ },
+ "hive-log4j": {
+ "content": "log4jproperties\nline2"
+ },
+ "hive-exec-log4j": {
+ "content": "log4jproperties\nline2"
+ },
+ "webhcat-log4j": {
+ "content": "log4jproperties\nline2"
+ },
+ "zookeeper-log4j": {
+ "content": "log4jproperties\nline2"
+ },
+ "pig-log4j": {
+ "content": "log4jproperties\nline2"
+ },
+ "pig-properties": {
+ "content": "pigproperties\nline2"
+ },
+ "oozie-log4j": {
+ "content": "log4jproperties\nline2"
+ },
+ "ranger-hbase-plugin-properties": {
+ "POLICY_MGR_URL": "{{policymgr_mgr_url}}",
+ "XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS": "900",
+ "XAAUDIT.HDFS.DESTINATION_DIRECTORY": "hdfs://__REPLACE__NAME_NODE_HOST:8020/ranger/audit/%app-type%/%time:yyyyMMdd%",
+ "XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit",
+ "common.name.for.certificate": "-",
+ "XAAUDIT.HDFS.IS_ENABLED": "false",
+ "SQL_CONNECTOR_JAR": "{{sql_connector_jar}}",
+ "XAAUDIT.HDFS.LOCAL_BUFFER_FILE": "%time:yyyyMMdd-HHmm.ss%.log",
+ "ranger-hbase-plugin-enabled": "No",
+ "REPOSITORY_NAME": "{{repo_name}}",
+ "SSL_KEYSTORE_PASSWORD": "myKeyFilePassword",
+ "XAAUDIT.DB.IS_ENABLED": "true",
+ "XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS": "600",
+ "XAAUDIT.SOLR.SOLR_URL": "http://localhost:6083/solr/ranger_audits",
+ "XAAUDIT.DB.DATABASE_NAME": "{{xa_audit_db_name}}",
+ "XAAUDIT.DB.HOSTNAME": "{{xa_db_host}}",
+ "XAAUDIT.SOLR.IS_ENABLED": "false",
+ "SSL_KEYSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-keystore.jks",
+ "XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS": "60",
+ "XAAUDIT.DB.USER_NAME": "{{xa_audit_db_user}}",
+ "policy_user": "ambari-qa",
+ "UPDATE_XAPOLICIES_ON_GRANT_REVOKE": "true",
+ "XAAUDIT.HDFS.DESTINTATION_FILE": "%hostname%-audit.log",
+ "XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS": "86400",
+ "XAAUDIT.DB.PASSWORD": "{{xa_audit_db_password}}",
+ "XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT": "10",
+ "SSL_TRUSTSTORE_PASSWORD": "changeit",
+ "XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit/archive",
+ "REPOSITORY_CONFIG_USERNAME": "hbase",
+ "XAAUDIT.SOLR.MAX_FLUSH_INTERVAL_MS": "1000",
+ "XAAUDIT.DB.FLAVOUR": "{{xa_audit_db_flavor}}",
+ "XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS": "60",
+ "SSL_TRUSTSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-truststore.jks",
+ "REPOSITORY_CONFIG_PASSWORD": "hbase",
+ "XAAUDIT.SOLR.MAX_QUEUE_SIZE": "1"
+ },
+ "ranger-hdfs-plugin-properties": {
+ "XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS": "900",
+ "XAAUDIT.HDFS.DESTINATION_DIRECTORY": "hdfs://__REPLACE__NAME_NODE_HOST:8020/ranger/audit/%app-type%/%time:yyyyMMdd%",
+ "XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit",
+ "common.name.for.certificate": "-",
+ "XAAUDIT.HDFS.IS_ENABLED": "false",
+ "XAAUDIT.HDFS.LOCAL_BUFFER_FILE": "%time:yyyyMMdd-HHmm.ss%.log",
+ "SSL_KEYSTORE_PASSWORD": "myKeyFilePassword",
+ "XAAUDIT.DB.IS_ENABLED": "true",
+ "XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS": "600",
+ "XAAUDIT.SOLR.SOLR_URL": "http://localhost:6083/solr/ranger_audits",
+ "XAAUDIT.SOLR.IS_ENABLED": "false",
+ "hadoop.rpc.protection": "-",
+ "ranger-hdfs-plugin-enabled": "No",
+ "SSL_KEYSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-keystore.jks",
+ "XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS": "60",
+ "policy_user": "ambari-qa",
+ "XAAUDIT.HDFS.DESTINTATION_FILE": "%hostname%-audit.log",
+ "XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS": "86400",
+ "XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT": "10",
+ "SSL_TRUSTSTORE_PASSWORD": "changeit",
+ "XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit/archive",
+ "REPOSITORY_CONFIG_USERNAME": "hadoop",
+ "XAAUDIT.SOLR.MAX_FLUSH_INTERVAL_MS": "1000",
+ "XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS": "60",
+ "SSL_TRUSTSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-truststore.jks",
+ "REPOSITORY_CONFIG_PASSWORD": "hadoop",
+ "XAAUDIT.SOLR.MAX_QUEUE_SIZE": "1"
+ },
+ "hadoop-metrics2.properties": {
+ "content": "# Licensed to the Apache Software Foundation (ASF) under one or more\r\n# contributor license agreements. See the NOTICE file distributed with\r\n# this work for additional information regarding copyright ownership.\r\n# The ASF licenses this file to You under the Apache License, Version 2.0\r\n# (the \"License\"); you may not use this file except in compliance with\r\n# the License. You may obtain a copy of the License at\r\n#\r\n# http:\/\/www.apache.org\/licenses\/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n\r\n# syntax: [prefix].[source|sink|jmx].[instance].[options]\r\n# See package.html for org.apache.hadoop.metrics2 for details\r\n\r\n{% if has_ganglia_se
rver %}\r\n*.period=60\r\n\r\n*.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31\r\n*.sink.ganglia.period=10\r\n\r\n# default for supportsparse is false\r\n*.sink.ganglia.supportsparse=true\r\n\r\n.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both\r\n.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40\r\n\r\n# Hook up to the server\r\nnamenode.sink.ganglia.servers={{ganglia_server_host}}:8661\r\ndatanode.sink.ganglia.servers={{ganglia_server_host}}:8659\r\njobtracker.sink.ganglia.servers={{ganglia_server_host}}:8662\r\ntasktracker.sink.ganglia.servers={{ganglia_server_host}}:8658\r\nmaptask.sink.ganglia.servers={{ganglia_server_host}}:8660\r\nreducetask.sink.ganglia.servers={{ganglia_server_host}}:8660\r\nresourcemanager.sink.ganglia.servers={{ganglia_server_host}}:8664\r\nnodemanager.sink.ganglia.servers={{ganglia_server_host}}:8657\r\nhistoryserver.sink.ganglia.servers={{ganglia_server_host}}:8666\r\njournaln
ode.sink.ganglia.servers={{ganglia_server_host}}:8654\r\nnimbus.sink.ganglia.servers={{ganglia_server_host}}:8649\r\nsupervisor.sink.ganglia.servers={{ganglia_server_host}}:8650\r\n\r\nresourcemanager.sink.ganglia.tagsForPrefix.yarn=Queue\r\n\r\n{% endif %}\r\n\r\n{% if has_metric_collector %}\r\n\r\n*.period={{metrics_collection_period}}\r\n*.sink.timeline.plugin.urls=file:\/\/\/usr\/lib\/ambari-metrics-hadoop-sink\/ambari-metrics-hadoop-sink.jar\r\n*.sink.timeline.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink\r\n*.sink.timeline.period={{metrics_collection_period}}\r\n*.sink.timeline.sendInterval={{metrics_report_interval}}000\r\n*.sink.timeline.slave.host.name={{hostname}}\r\n*.sink.timeline.zookeeper.quorum={{zookeeper_quorum}}\r\n*.sink.timeline.protocol={{metric_collector_protocol}}\r\n*.sink.timeline.port={{metric_collector_port}}\r\n\r\n# HTTPS properties\r\n*.sink.timeline.truststore.path = {{metric_truststore_path}}\r\n*.sink.timeline.truststore.t
ype = {{metric_truststore_type}}\r\n*.sink.timeline.truststore.password = {{metric_truststore_password}}\r\n\r\ndatanode.sink.timeline.collector={{metric_collector_hosts}}\r\nnamenode.sink.timeline.collector={{metric_collector_hosts}}\r\nresourcemanager.sink.timeline.collector={{metric_collector_hosts}}\r\nnodemanager.sink.timeline.collector={{metric_collector_hosts}}\r\njobhistoryserver.sink.timeline.collector={{metric_collector_hosts}}\r\njournalnode.sink.timeline.collector={{metric_collector_hosts}}\r\nmaptask.sink.timeline.collector={{metric_collector_hosts}}\r\nreducetask.sink.timeline.collector={{metric_collector_hosts}}\r\napplicationhistoryserver.sink.timeline.collector={{metric_collector_hosts}}\r\n\r\nresourcemanager.sink.timeline.tagsForPrefix.yarn=Queue\r\n\r\n{% if is_nn_client_port_configured %}\r\n# Namenode rpc ports customization\r\nnamenode.sink.timeline.metric.rpc.client.port={{nn_rpc_client_port}}\r\n{% endif %}\r\n{% if is_nn_dn_port_configured %}\r\nnamenode.si
nk.timeline.metric.rpc.datanode.port={{nn_rpc_dn_port}}\r\n{% endif %}\r\n{% if is_nn_healthcheck_port_configured %}\r\nnamenode.sink.timeline.metric.rpc.healthcheck.port={{nn_rpc_healthcheck_port}}\r\n{% endif %}\r\n\r\n{% endif %}"
+ }
+ },
+ "configuration_attributes": {
+ "sqoop-site": {},
+ "yarn-site": {
+ "final": {
+ "yarn.nodemanager.disk-health-checker.min-healthy-disks": "true",
+ "yarn.nodemanager.container-executor.class": "true",
+ "yarn.nodemanager.local-dirs": "true"
+ }
+ },
+ "tez-site": {
+ "final": {
+ "tez.am.log.level": "true"
+ }
+ },
+ "capacity-scheduler": {
+ "final": {
+ "yarn.scheduler.capacity.node-locality-delay": "true"
+ }
+ },
+ "mapred-site": {
+ "final": {
+ "mapred.healthChecker.script.path": "true",
+ "mapreduce.jobtracker.staging.root.dir": "true"
+ }
+ },
+ "oozie-site": {
+ "final": {
+ "oozie.service.PurgeService.purge.interval": "true",
+ "oozie.service.CallableQueueService.queue.size": "true"
+ }
+ },
+ "webhcat-site": {
+ "final": {
+ "templeton.pig.path": "true",
+ "templeton.exec.timeout": "true",
+ "templeton.override.enabled": "true"
+ }
+ },
+ "hdfs-site": {
+ "final": {
+ "dfs.web.ugi": "true",
+ "dfs.support.append": "true",
+ "dfs.cluster.administrators": "true"
+ }
+ },
+ "hbase-site": {
+ "final": {
+ "hbase.client.keyvalue.maxsize": "true",
+ "hbase.hstore.compactionThreshold": "true",
+ "hbase.rootdir": "true"
+ }
+ },
+ "core-site": {
+ "final": {
+ "hadoop.proxyuser.hive.groups": "true",
+ "webinterface.private.actions": "true",
+ "hadoop.proxyuser.oozie.hosts": "true"
+ }
+ },
+ "hive-site": {
+ "final": {
+ "javax.jdo.option.ConnectionPassword": "true",
+ "javax.jdo.option.ConnectionDriverName": "true",
+ "hive.optimize.bucketmapjoin.sortedmerge": "true"
+ }
+ },
+ "hiveserver2-site": {
+ "final": {
+ "javax.jdo.option.ConnectionPassword": "true",
+ "javax.jdo.option.ConnectionDriverName": "true",
+ "hive.optimize.bucketmapjoin.sortedmerge": "true"
+ }
+ },
+ "hive-interactive-site": {
+ "final": {
+ "javax.jdo.option.ConnectionPassword": "true",
+ "javax.jdo.option.ConnectionDriverName": "true",
+ "hive.optimize.bucketmapjoin.sortedmerge": "true"
+ }
+ }
+ },
+ "configurationTags": {
+ "capacity-scheduler": {
+ "tag": "version1"
+ },
+ "oozie-site": {
+ "tag": "version1389972945450"
+ },
+ "storm-site": {
+ "tag": "version1"
+ },
+ "webhcat-site": {
+ "tag": "version1389972945450"
+ },
+ "global": {
+ "tag": "version1389972945449"
+ },
+ "mapred-site": {
+ "tag": "version1389972945450"
+ },
+ "hdfs-site": {
+ "tag": "version1389972945450"
+ },
+ "hbase-site": {
+ "tag": "version1389972945449"
+ },
+ "core-site": {
+ "tag": "version1389972945450"
+ },
+ "yarn-site": {
+ "tag": "version1389972945449"
+ },
+ "hive-site": {
+ "tag": "version1389972945450"
+ },
+ "hiveserver2-site": {
+ "tag": "version1389972945450"
+ },
+ "hive-interactive-site": {
+ "tag": "version1389972945450"
+ },
+ "hdfs-log4j": {
+ "tag": "version1389972945450"
+ },
+ "yarn-log4j": {
+ "tag": "version1389972945450"
+ },
+ "hbase-log4j": {
+ "tag": "version1389972945450"
+ },
+ "hive-log4j": {
+ "tag": "version1389972945450"
+ },
+ "hive-exec-log4j": {
+ "tag": "version1389972945450"
+ },
+ "zookeeper-log4j": {
+ "tag": "version1389972945450"
+ },
+ "oozie-log4j": {
+ "tag": "version1389972945450"
+ },
+ "pig-log4j": {
+ "tag": "version1389972945450"
+ }
+ },
+ "commandId": "9-1",
+ "clusterHostInfo": {
+ "ambari_server_host": [
+ "c6401.ambari.apache.org"
+ ],
+ "snamenode_host": [
+ "c6402.ambari.apache.org"
+ ],
+ "nm_hosts": [
+ "c6402.ambari.apache.org"
+ ],
+ "drpc_server_hosts": [
+ "c6402.ambari.apache.org"
+ ],
+ "slave_hosts": [
+ "c6402.ambari.apache.org"
+ ],
+ "ganglia_server_host": [
+ "c6402.ambari.apache.org"
+ ],
+ "hive_server_host": [
+ "c6402.ambari.apache.org"
+ ],
+ "logviewer_server_hosts": [
+ "c6402.ambari.apache.org"
+ ],
+ "hive_metastore_host": [
+ "c6402.ambari.apache.org"
+ ],
+ "hbase_rs_hosts": [
+ "c6402.ambari.apache.org"
+ ],
+ "webhcat_server_host": [
+ "c6402.ambari.apache.org"
+ ],
+ "zookeeper_hosts": [
+ "c6401.ambari.apache.org",
+ "c6402.ambari.apache.org"
+ ],
+ "supervisor_hosts": [
+ "c6401.ambari.apache.org",
+ "c6402.ambari.apache.org"
+ ],
+ "ganglia_monitor_hosts": [
+ "c6401.ambari.apache.org",
+ "c6402.ambari.apache.org"
+ ],
+ "all_ping_ports": [
+ "8670",
+ "8670"
+ ],
+ "rm_host": [
+ "c6402.ambari.apache.org"
+ ],
+ "all_hosts": [
+ "c6401.ambari.apache.org",
+ "c6402.ambari.apache.org"
+ ],
+ "all_racks": [
+ "/default/rack_01",
+ "/default/rack_02"
+ ],
+ "all_ipv4_ips": [
+ "127.0.0.1",
+ "127.0.0.1"
+ ],
+ "storm_ui_server_hosts": [
+ "c6401.ambari.apache.org"
+ ],
+ "oozie_server": [
+ "c6402.ambari.apache.org"
+ ],
+ "hs_host": [
+ "c6402.ambari.apache.org"
+ ],
+ "nimbus_hosts": [
+ "c6401.ambari.apache.org"
+ ],
+ "namenode_host": [
+ "c6401.ambari.apache.org"
+ ],
+ "hbase_master_hosts": [
+ "c6402.ambari.apache.org"
+ ],
+ "hive_mysql_host": [
+ "c6402.ambari.apache.org"
+ ],
+ "falcon_server_hosts": [
+ "c6402.ambari.apache.org"
+ ]
+ }
+}
http://git-wip-us.apache.org/repos/asf/ambari/blob/6d2bab16/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/package/scripts/params_linux.py b/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/package/scripts/params_linux.py
index da54b7c..7db9ea9 100755
--- a/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/package/scripts/params_linux.py
+++ b/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/package/scripts/params_linux.py
@@ -163,9 +163,6 @@ rm_hosts = config['clusterHostInfo']['rm_host']
rm_host = rm_hosts[0]
rm_port = config['configurations']['yarn-site']['yarn.resourcemanager.webapp.address'].split(':')[-1]
rm_https_port = default('/configurations/yarn-site/yarn.resourcemanager.webapp.https.address', ":8090").split(':')[-1]
-# TODO UPGRADE default, update site during upgrade
-rm_nodes_exclude_path = default("/configurations/yarn-site/yarn.resourcemanager.nodes.exclude-path","/etc/hadoop/conf/yarn.exclude")
-rm_nodes_exclude_dir = os.path.dirname(rm_nodes_exclude_path)
java64_home = config['hostLevelParams']['java_home']
hadoop_ssl_enabled = default("/configurations/core-site/hadoop.ssl.enabled", False)
@@ -227,6 +224,7 @@ user_group = config['configurations']['cluster-env']['user_group']
#exclude file
exclude_hosts = default("/clusterHostInfo/decom_nm_hosts", [])
exclude_file_path = default("/configurations/yarn-site/yarn.resourcemanager.nodes.exclude-path","/etc/hadoop/conf/yarn.exclude")
+rm_nodes_exclude_dir = os.path.dirname(exclude_file_path)
ats_host = set(default("/clusterHostInfo/app_timeline_server_hosts", []))
has_ats = not len(ats_host) == 0
@@ -238,6 +236,7 @@ include_file_path = default("/configurations/yarn-site/yarn.resourcemanager.node
include_hosts = None
manage_include_files = default("/configurations/yarn-site/manage.include.files", False)
if include_file_path and manage_include_files:
+ rm_nodes_include_dir = os.path.dirname(include_file_path)
include_hosts = list(set(nm_hosts) - set(exclude_hosts))
# don't using len(nm_hosts) here, because check can take too much time on large clusters
http://git-wip-us.apache.org/repos/asf/ambari/blob/6d2bab16/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/package/scripts/yarn.py
----------------------------------------------------------------------
diff --git a/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/package/scripts/yarn.py b/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/package/scripts/yarn.py
index f5acb11..147fb38 100755
--- a/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/package/scripts/yarn.py
+++ b/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/package/scripts/yarn.py
@@ -289,10 +289,22 @@ def yarn(name=None, config_dir=None):
create_parents=True,
cd_access='a',
)
- File(params.rm_nodes_exclude_path,
+ File(params.exclude_file_path,
+ content=Template("exclude_hosts_list.j2"),
owner=params.yarn_user,
group=params.user_group
)
+ if params.include_hosts:
+ Directory(params.rm_nodes_include_dir,
+ mode=0755,
+ create_parents=True,
+ cd_access='a',
+ )
+ File(params.include_file_path,
+ content=Template("include_hosts_list.j2"),
+ owner=params.yarn_user,
+ group=params.user_group
+ )
File(params.yarn_job_summary_log,
owner=params.yarn_user,
group=params.user_group
@@ -496,4 +508,4 @@ def yarn(name=None, config_dir=None):
File(os.path.join(config_dir, 'ssl-server.xml.example'),
owner=params.mapred_user,
group=params.user_group
- )
\ No newline at end of file
+ )
[06/50] [abbrv] ambari git commit: AMBARI-21690 Updating
get_stack_version pattern match (mugdha)
Posted by ja...@apache.org.
AMBARI-21690 Updating get_stack_version pattern match (mugdha)
(cherry picked from commit ee87b6ab60edc43ef94a404ae2ab72da42f3549f)
Change-Id: If45e425a0b3e978842c43b1b18b4a4c7ac35c02e
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/591b4067
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/591b4067
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/591b4067
Branch: refs/heads/2.5-maint
Commit: 591b4067256021dd9a30ac90bc01faff68263fad
Parents: ad88657
Author: Mugdha Varadkar <mu...@apache.org>
Authored: Thu Aug 10 11:57:28 2017 +0530
Committer: Zuul <re...@hortonworks.com>
Committed: Thu Aug 10 22:22:52 2017 -0700
----------------------------------------------------------------------
----------------------------------------------------------------------
[37/50] [abbrv] ambari git commit: AMBARI-21782 - Service Accounts
page shows no data for stack services after Ambari is upgraded on IOP cluster
(jonathanhurley)
Posted by ja...@apache.org.
AMBARI-21782 - Service Accounts page shows no data for stack services after Ambari is upgraded on IOP cluster (jonathanhurley)
Change-Id: Ia45343211b757f7531aff11751c3594e1ffa82eb
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/4e453f6b
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/4e453f6b
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/4e453f6b
Branch: refs/heads/2.5-maint
Commit: 4e453f6b3e8ff1f0a3ada7e31a626bb7c057ba40
Parents: c014199
Author: Jonathan Hurley <jh...@hortonworks.com>
Authored: Tue Aug 22 16:47:08 2017 -0400
Committer: Zuul <re...@hortonworks.com>
Committed: Tue Aug 22 16:28:50 2017 -0700
----------------------------------------------------------------------
.../BigInsights/4.0/configuration/cluster-env.xml | 6 +++++-
.../services/AMBARI_METRICS/configuration/ams-env.xml | 4 ++++
.../4.0/services/FLUME/configuration/flume-env.xml | 5 +++++
.../4.0/services/HBASE/configuration/hbase-env.xml | 4 ++++
.../4.0/services/HDFS/configuration/hadoop-env.xml | 8 ++++++++
.../4.0/services/HIVE/configuration/hive-env.xml | 13 +++++++++++++
.../4.0/services/KAFKA/configuration/kafka-env.xml | 4 ++++
.../4.0/services/KNOX/configuration/knox-env.xml | 8 ++++++++
.../4.0/services/OOZIE/configuration/oozie-env.xml | 4 ++++
.../4.0/services/SOLR/configuration/solr-env.xml | 4 ++++
.../4.0/services/SPARK/configuration/spark-env.xml | 8 ++++++++
.../4.0/services/SQOOP/configuration/sqoop-env.xml | 4 ++++
.../services/YARN/configuration-mapred/mapred-env.xml | 4 ++++
.../4.0/services/YARN/configuration/yarn-env.xml | 4 ++++
.../services/ZOOKEEPER/configuration/zookeeper-env.xml | 4 ++++
.../4.2/services/FLUME/configuration/flume-env.xml | 4 ++++
.../4.2/services/HBASE/configuration/hbase-env.xml | 4 ++++
.../4.2/services/HDFS/configuration/hadoop-env.xml | 8 ++++++++
.../4.2/services/HIVE/configuration/hive-env.xml | 12 ++++++++++++
.../4.2/services/KAFKA/configuration/kafka-env.xml | 4 ++++
.../4.2/services/KNOX/configuration/knox-env.xml | 8 ++++++++
.../4.2/services/OOZIE/configuration/oozie-env.xml | 4 ++++
.../4.2/services/RANGER/configuration/ranger-env.xml | 1 +
.../4.2/services/RANGER_KMS/configuration/kms-env.xml | 8 ++++++++
.../4.2/services/SOLR/configuration/solr-env.xml | 4 ++++
.../4.2/services/SPARK/configuration/spark-env.xml | 8 ++++++++
.../4.2/services/SQOOP/configuration/sqoop-env.xml | 4 ++++
.../4.2/services/TITAN/configuration/titan-env.xml | 4 ++++
.../services/YARN/configuration-mapred/mapred-env.xml | 4 ++++
.../4.2/services/YARN/configuration/yarn-env.xml | 4 ++++
.../services/ZOOKEEPER/configuration/zookeeper-env.xml | 4 ++++
31 files changed, 168 insertions(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/4e453f6b/ambari-server/src/main/resources/stacks/BigInsights/4.0/configuration/cluster-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/configuration/cluster-env.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/configuration/cluster-env.xml
index c73b6e4..450d997 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.0/configuration/cluster-env.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/configuration/cluster-env.xml
@@ -88,7 +88,11 @@
<on-ambari-upgrade add="true"/>
<property-type>USER</property-type>
<description>User executing service checks</description>
- </property>
+ <value-attributes>
+ <type>user</type>
+ <overridable>false</overridable>
+ </value-attributes>
+ </property>
<property>
<name>smokeuser_keytab</name>
<value>/etc/security/keytabs/smokeuser.headless.keytab</value>
http://git-wip-us.apache.org/repos/asf/ambari/blob/4e453f6b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/configuration/ams-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/configuration/ams-env.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/configuration/ams-env.xml
index 158c68f..a654eec 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/configuration/ams-env.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/configuration/ams-env.xml
@@ -26,6 +26,10 @@
<on-ambari-upgrade add="true"/>
<property-type>USER</property-type>
<description>Ambari Metrics User Name.</description>
+ <value-attributes>
+ <type>user</type>
+ <overridable>false</overridable>
+ </value-attributes>
</property>
<property>
<name>metrics_collector_log_dir</name>
http://git-wip-us.apache.org/repos/asf/ambari/blob/4e453f6b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/FLUME/configuration/flume-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/FLUME/configuration/flume-env.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/FLUME/configuration/flume-env.xml
index 3b14a58..bde7196 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/FLUME/configuration/flume-env.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/FLUME/configuration/flume-env.xml
@@ -51,8 +51,13 @@
<on-ambari-upgrade add="true"/>
<property-type>USER</property-type>
<description>Flume User</description>
+ <value-attributes>
+ <type>user</type>
+ <overridable>false</overridable>
+ </value-attributes>
</property>
+
<!-- flume-env.sh -->
<property>
<name>content</name>
http://git-wip-us.apache.org/repos/asf/ambari/blob/4e453f6b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/configuration/hbase-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/configuration/hbase-env.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/configuration/hbase-env.xml
index d9700ee..655775d 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/configuration/hbase-env.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/configuration/hbase-env.xml
@@ -103,6 +103,10 @@
<on-ambari-upgrade add="true"/>
<property-type>USER</property-type>
<description>HBase User Name.</description>
+ <value-attributes>
+ <type>user</type>
+ <overridable>false</overridable>
+ </value-attributes>
</property>
<!-- hbase-env.sh -->
http://git-wip-us.apache.org/repos/asf/ambari/blob/4e453f6b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/configuration/hadoop-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/configuration/hadoop-env.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/configuration/hadoop-env.xml
index c79ac73..b356ab1 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/configuration/hadoop-env.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/configuration/hadoop-env.xml
@@ -173,6 +173,10 @@
<on-ambari-upgrade add="true"/>
<property-type>GROUP</property-type>
<description>Proxy user group.</description>
+ <value-attributes>
+ <type>user</type>
+ <overridable>false</overridable>
+ </value-attributes>
</property>
<property>
<name>hdfs_user</name>
@@ -180,6 +184,10 @@
<on-ambari-upgrade add="true"/>
<property-type>USER</property-type>
<description>User to run HDFS as</description>
+ <value-attributes>
+ <type>user</type>
+ <overridable>false</overridable>
+ </value-attributes>
</property>
<property>
<name>dfs.datanode.data.dir.mount.file</name>
http://git-wip-us.apache.org/repos/asf/ambari/blob/4e453f6b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/configuration/hive-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/configuration/hive-env.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/configuration/hive-env.xml
index 666be68..31a186d 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/configuration/hive-env.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/configuration/hive-env.xml
@@ -109,8 +109,13 @@
<on-ambari-upgrade add="true"/>
<property-type>USER</property-type>
<description>Hive User.</description>
+ <value-attributes>
+ <type>user</type>
+ <overridable>false</overridable>
+ </value-attributes>
</property>
+
<!--HCAT-->
<property>
@@ -143,6 +148,10 @@
<on-ambari-upgrade add="true"/>
<property-type>USER</property-type>
<description>HCat User.</description>
+ <value-attributes>
+ <type>user</type>
+ <overridable>false</overridable>
+ </value-attributes>
</property>
<property>
<name>webhcat_user</name>
@@ -150,6 +159,10 @@
<on-ambari-upgrade add="true"/>
<property-type>USER</property-type>
<description>WebHCat User.</description>
+ <value-attributes>
+ <type>user</type>
+ <overridable>false</overridable>
+ </value-attributes>
</property>
<!-- hive-env.sh -->
http://git-wip-us.apache.org/repos/asf/ambari/blob/4e453f6b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KAFKA/configuration/kafka-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KAFKA/configuration/kafka-env.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KAFKA/configuration/kafka-env.xml
index 8069f89..60b01dd 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KAFKA/configuration/kafka-env.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KAFKA/configuration/kafka-env.xml
@@ -27,6 +27,10 @@
<on-ambari-upgrade add="true"/>
<property-type>USER</property-type>
<description></description>
+ <value-attributes>
+ <type>user</type>
+ <overridable>false</overridable>
+ </value-attributes>
</property>
<property>
<name>kafka_log_dir</name>
http://git-wip-us.apache.org/repos/asf/ambari/blob/4e453f6b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KNOX/configuration/knox-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KNOX/configuration/knox-env.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KNOX/configuration/knox-env.xml
index a1faa9a..2916ccf 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KNOX/configuration/knox-env.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KNOX/configuration/knox-env.xml
@@ -42,6 +42,10 @@
<on-ambari-upgrade add="true"/>
<property-type>USER</property-type>
<description>Knox Username.</description>
+ <value-attributes>
+ <type>user</type>
+ <overridable>false</overridable>
+ </value-attributes>
</property>
<property>
@@ -50,6 +54,10 @@
<on-ambari-upgrade add="true"/>
<property-type>GROUP</property-type>
<description>Knox Group.</description>
+ <value-attributes>
+ <type>user</type>
+ <overridable>false</overridable>
+ </value-attributes>
</property>
<property>
http://git-wip-us.apache.org/repos/asf/ambari/blob/4e453f6b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/configuration/oozie-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/configuration/oozie-env.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/configuration/oozie-env.xml
index b72e331..7c8429f 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/configuration/oozie-env.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/configuration/oozie-env.xml
@@ -28,6 +28,10 @@
<on-ambari-upgrade add="true"/>
<property-type>USER</property-type>
<description>Oozie User.</description>
+ <value-attributes>
+ <type>user</type>
+ <overridable>false</overridable>
+ </value-attributes>
</property>
<property>
<name>oozie_admin_users</name>
http://git-wip-us.apache.org/repos/asf/ambari/blob/4e453f6b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SOLR/configuration/solr-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SOLR/configuration/solr-env.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SOLR/configuration/solr-env.xml
index 03d4e99..468905b 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SOLR/configuration/solr-env.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SOLR/configuration/solr-env.xml
@@ -7,6 +7,10 @@
<on-ambari-upgrade add="true"/>
<property-type>USER</property-type>
<value>solr</value>
+ <value-attributes>
+ <type>user</type>
+ <overridable>false</overridable>
+ </value-attributes>
</property>
<property>
http://git-wip-us.apache.org/repos/asf/ambari/blob/4e453f6b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SPARK/configuration/spark-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SPARK/configuration/spark-env.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SPARK/configuration/spark-env.xml
index f000892..cd26f71 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SPARK/configuration/spark-env.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SPARK/configuration/spark-env.xml
@@ -37,6 +37,10 @@
<on-ambari-upgrade add="true"/>
<property-type>USER</property-type>
<description>Spark User.</description>
+ <value-attributes>
+ <type>user</type>
+ <overridable>false</overridable>
+ </value-attributes>
</property>
<property>
@@ -45,6 +49,10 @@
<on-ambari-upgrade add="true"/>
<property-type>GROUP</property-type>
<description>spark group</description>
+ <value-attributes>
+ <type>user</type>
+ <overridable>false</overridable>
+ </value-attributes>
</property>
<property>
http://git-wip-us.apache.org/repos/asf/ambari/blob/4e453f6b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SQOOP/configuration/sqoop-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SQOOP/configuration/sqoop-env.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SQOOP/configuration/sqoop-env.xml
index d9070c2..e78160b 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SQOOP/configuration/sqoop-env.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SQOOP/configuration/sqoop-env.xml
@@ -52,6 +52,10 @@ export SQOOP_USER_CLASSPATH="`ls ${HIVE_HOME}/lib/libthrift-*.jar 2> /dev/null`:
<on-ambari-upgrade add="true"/>
<property-type>USER</property-type>
<value>sqoop</value>
+ <value-attributes>
+ <type>user</type>
+ <overridable>false</overridable>
+ </value-attributes>
</property>
<property>
<name>jdbc_drivers</name>
http://git-wip-us.apache.org/repos/asf/ambari/blob/4e453f6b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/configuration-mapred/mapred-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/configuration-mapred/mapred-env.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/configuration-mapred/mapred-env.xml
index 3926fec..bba56fe 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/configuration-mapred/mapred-env.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/configuration-mapred/mapred-env.xml
@@ -51,6 +51,10 @@
<on-ambari-upgrade add="true"/>
<property-type>USER</property-type>
<description>Mapreduce User</description>
+ <value-attributes>
+ <type>user</type>
+ <overridable>false</overridable>
+ </value-attributes>
</property>
<property>
<name>jobhistory_heapsize</name>
http://git-wip-us.apache.org/repos/asf/ambari/blob/4e453f6b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/configuration/yarn-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/configuration/yarn-env.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/configuration/yarn-env.xml
index 956fa42..fc03309 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/configuration/yarn-env.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/configuration/yarn-env.xml
@@ -51,6 +51,10 @@
<on-ambari-upgrade add="true"/>
<property-type>USER</property-type>
<description>YARN User</description>
+ <value-attributes>
+ <type>user</type>
+ <overridable>false</overridable>
+ </value-attributes>
</property>
<property>
<name>yarn_heapsize</name>
http://git-wip-us.apache.org/repos/asf/ambari/blob/4e453f6b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/configuration/zookeeper-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/configuration/zookeeper-env.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/configuration/zookeeper-env.xml
index b5dca83..a07ced5 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/configuration/zookeeper-env.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/configuration/zookeeper-env.xml
@@ -27,6 +27,10 @@
<on-ambari-upgrade add="true"/>
<property-type>USER</property-type>
<description>ZooKeeper User.</description>
+ <value-attributes>
+ <type>user</type>
+ <overridable>false</overridable>
+ </value-attributes>
</property>
<property>
<name>zk_log_dir</name>
http://git-wip-us.apache.org/repos/asf/ambari/blob/4e453f6b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/FLUME/configuration/flume-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/FLUME/configuration/flume-env.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/FLUME/configuration/flume-env.xml
index ed8f8f8..a942ab8 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/FLUME/configuration/flume-env.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/FLUME/configuration/flume-env.xml
@@ -51,6 +51,10 @@
<on-ambari-upgrade add="true"/>
<property-type>USER</property-type>
<description>Flume User</description>
+ <value-attributes>
+ <type>user</type>
+ <overridable>false</overridable>
+ </value-attributes>
</property>
<property>
<name>flume_run_dir</name>
http://git-wip-us.apache.org/repos/asf/ambari/blob/4e453f6b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HBASE/configuration/hbase-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HBASE/configuration/hbase-env.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HBASE/configuration/hbase-env.xml
index 64b79d6..ff7dcef 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HBASE/configuration/hbase-env.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HBASE/configuration/hbase-env.xml
@@ -103,6 +103,10 @@
<on-ambari-upgrade add="true"/>
<property-type>USER</property-type>
<description>HBase User Name.</description>
+ <value-attributes>
+ <type>user</type>
+ <overridable>false</overridable>
+ </value-attributes>
</property>
<property>
<name>phoenix_sql_enabled</name>
http://git-wip-us.apache.org/repos/asf/ambari/blob/4e453f6b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HDFS/configuration/hadoop-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HDFS/configuration/hadoop-env.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HDFS/configuration/hadoop-env.xml
index a299e75..21fc028 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HDFS/configuration/hadoop-env.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HDFS/configuration/hadoop-env.xml
@@ -181,6 +181,10 @@
<on-ambari-upgrade add="true"/>
<property-type>GROUP</property-type>
<description>Proxy user group.</description>
+ <value-attributes>
+ <type>user</type>
+ <overridable>false</overridable>
+ </value-attributes>
</property>
<property>
<name>hdfs_user</name>
@@ -189,6 +193,10 @@
<on-ambari-upgrade add="true"/>
<property-type>USER</property-type>
<description>User to run HDFS as</description>
+ <value-attributes>
+ <type>user</type>
+ <overridable>false</overridable>
+ </value-attributes>
</property>
<property>
http://git-wip-us.apache.org/repos/asf/ambari/blob/4e453f6b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HIVE/configuration/hive-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HIVE/configuration/hive-env.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HIVE/configuration/hive-env.xml
index 96880ec..b66cb01 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HIVE/configuration/hive-env.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HIVE/configuration/hive-env.xml
@@ -141,6 +141,10 @@
<on-ambari-upgrade add="true"/>
<property-type>USER</property-type>
<description>Hive User.</description>
+ <value-attributes>
+ <type>user</type>
+ <overridable>false</overridable>
+ </value-attributes>
</property>
<!--HCAT-->
@@ -176,6 +180,10 @@
<on-ambari-upgrade add="true"/>
<property-type>USER</property-type>
<description>HCat User.</description>
+ <value-attributes>
+ <type>user</type>
+ <overridable>false</overridable>
+ </value-attributes>
</property>
<property>
<name>webhcat_user</name>
@@ -184,6 +192,10 @@
<on-ambari-upgrade add="true"/>
<property-type>USER</property-type>
<description>WebHCat User.</description>
+ <value-attributes>
+ <type>user</type>
+ <overridable>false</overridable>
+ </value-attributes>
</property>
<property>
http://git-wip-us.apache.org/repos/asf/ambari/blob/4e453f6b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/KAFKA/configuration/kafka-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/KAFKA/configuration/kafka-env.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/KAFKA/configuration/kafka-env.xml
index cc22c91..599e0f6 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/KAFKA/configuration/kafka-env.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/KAFKA/configuration/kafka-env.xml
@@ -28,6 +28,10 @@
<on-ambari-upgrade add="true"/>
<property-type>USER</property-type>
<description></description>
+ <value-attributes>
+ <type>user</type>
+ <overridable>false</overridable>
+ </value-attributes>
</property>
<property>
<name>kafka_log_dir</name>
http://git-wip-us.apache.org/repos/asf/ambari/blob/4e453f6b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/KNOX/configuration/knox-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/KNOX/configuration/knox-env.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/KNOX/configuration/knox-env.xml
index ec15d06..86a27c2 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/KNOX/configuration/knox-env.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/KNOX/configuration/knox-env.xml
@@ -42,6 +42,10 @@
<on-ambari-upgrade add="true"/>
<property-type>USER</property-type>
<description>Knox Username.</description>
+ <value-attributes>
+ <type>user</type>
+ <overridable>false</overridable>
+ </value-attributes>
</property>
<property>
@@ -50,6 +54,10 @@
<on-ambari-upgrade add="true"/>
<property-type>GROUP</property-type>
<description>Knox Group.</description>
+ <value-attributes>
+ <type>user</type>
+ <overridable>false</overridable>
+ </value-attributes>
</property>
<property>
http://git-wip-us.apache.org/repos/asf/ambari/blob/4e453f6b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/OOZIE/configuration/oozie-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/OOZIE/configuration/oozie-env.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/OOZIE/configuration/oozie-env.xml
index fbf1d2f..7e6eb2d 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/OOZIE/configuration/oozie-env.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/OOZIE/configuration/oozie-env.xml
@@ -28,6 +28,10 @@
<on-ambari-upgrade add="true"/>
<property-type>USER</property-type>
<description>Oozie User.</description>
+ <value-attributes>
+ <type>user</type>
+ <overridable>false</overridable>
+ </value-attributes>
</property>
<property>
<name>oozie_admin_users</name>
http://git-wip-us.apache.org/repos/asf/ambari/blob/4e453f6b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/RANGER/configuration/ranger-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/RANGER/configuration/ranger-env.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/RANGER/configuration/ranger-env.xml
index 9d7e385..59688cb 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/RANGER/configuration/ranger-env.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/RANGER/configuration/ranger-env.xml
@@ -40,6 +40,7 @@
<property-type>GROUP</property-type>
<description>Ranger group</description>
<value-attributes>
+ <type>user</type>
<overridable>false</overridable>
</value-attributes>
</property>
http://git-wip-us.apache.org/repos/asf/ambari/blob/4e453f6b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/RANGER_KMS/configuration/kms-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/RANGER_KMS/configuration/kms-env.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/RANGER_KMS/configuration/kms-env.xml
index 14e29b4..c8f0b8f 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/RANGER_KMS/configuration/kms-env.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/RANGER_KMS/configuration/kms-env.xml
@@ -27,6 +27,10 @@
<on-ambari-upgrade add="true"/>
<property-type>USER</property-type>
<description>Kms username</description>
+ <value-attributes>
+ <type>user</type>
+ <overridable>false</overridable>
+ </value-attributes>
</property>
<property>
@@ -36,6 +40,10 @@
<on-ambari-upgrade add="true"/>
<property-type>GROUP</property-type>
<description>Kms group</description>
+ <value-attributes>
+ <type>user</type>
+ <overridable>false</overridable>
+ </value-attributes>
</property>
<property>
http://git-wip-us.apache.org/repos/asf/ambari/blob/4e453f6b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SOLR/configuration/solr-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SOLR/configuration/solr-env.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SOLR/configuration/solr-env.xml
index 370cef4..d642dd7 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SOLR/configuration/solr-env.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SOLR/configuration/solr-env.xml
@@ -7,6 +7,10 @@
<on-ambari-upgrade add="true"/>
<property-type>USER</property-type>
<value>solr</value>
+ <value-attributes>
+ <type>user</type>
+ <overridable>false</overridable>
+ </value-attributes>
</property>
<property>
http://git-wip-us.apache.org/repos/asf/ambari/blob/4e453f6b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SPARK/configuration/spark-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SPARK/configuration/spark-env.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SPARK/configuration/spark-env.xml
index 993820d..4eb2ee7 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SPARK/configuration/spark-env.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SPARK/configuration/spark-env.xml
@@ -37,6 +37,10 @@
<on-ambari-upgrade add="true"/>
<property-type>USER</property-type>
<description>Spark User.</description>
+ <value-attributes>
+ <type>user</type>
+ <overridable>false</overridable>
+ </value-attributes>
</property>
<property>
@@ -45,6 +49,10 @@
<on-ambari-upgrade add="true"/>
<property-type>GROUP</property-type>
<description>spark group</description>
+ <value-attributes>
+ <type>user</type>
+ <overridable>false</overridable>
+ </value-attributes>
</property>
<property>
http://git-wip-us.apache.org/repos/asf/ambari/blob/4e453f6b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SQOOP/configuration/sqoop-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SQOOP/configuration/sqoop-env.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SQOOP/configuration/sqoop-env.xml
index 1eed273..42035c7 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SQOOP/configuration/sqoop-env.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SQOOP/configuration/sqoop-env.xml
@@ -52,6 +52,10 @@ export SQOOP_USER_CLASSPATH="`ls ${HIVE_HOME}/lib/libthrift-*.jar 2> /dev/null`:
<on-ambari-upgrade add="true"/>
<property-type>USER</property-type>
<value>sqoop</value>
+ <value-attributes>
+ <type>user</type>
+ <overridable>false</overridable>
+ </value-attributes>
</property>
<property>
<name>jdbc_drivers</name>
http://git-wip-us.apache.org/repos/asf/ambari/blob/4e453f6b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/configuration/titan-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/configuration/titan-env.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/configuration/titan-env.xml
index dda05e4..d6c5ecf 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/configuration/titan-env.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/configuration/titan-env.xml
@@ -27,6 +27,10 @@
<description>User to run Titan as</description>
<property-type>USER</property-type>
<value>titan</value>
+ <value-attributes>
+ <type>user</type>
+ <overridable>false</overridable>
+ </value-attributes>
<on-ambari-upgrade add="false"/>
</property>
http://git-wip-us.apache.org/repos/asf/ambari/blob/4e453f6b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/YARN/configuration-mapred/mapred-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/YARN/configuration-mapred/mapred-env.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/YARN/configuration-mapred/mapred-env.xml
index 1536ff6..f37e8d9 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/YARN/configuration-mapred/mapred-env.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/YARN/configuration-mapred/mapred-env.xml
@@ -51,6 +51,10 @@
<on-ambari-upgrade add="true"/>
<property-type>USER</property-type>
<description>Mapreduce User</description>
+ <value-attributes>
+ <type>user</type>
+ <overridable>false</overridable>
+ </value-attributes>
</property>
<property>
<name>jobhistory_heapsize</name>
http://git-wip-us.apache.org/repos/asf/ambari/blob/4e453f6b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/YARN/configuration/yarn-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/YARN/configuration/yarn-env.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/YARN/configuration/yarn-env.xml
index 55384cc..b7940d1 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/YARN/configuration/yarn-env.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/YARN/configuration/yarn-env.xml
@@ -51,6 +51,10 @@
<on-ambari-upgrade add="true"/>
<property-type>USER</property-type>
<description>YARN User</description>
+ <value-attributes>
+ <type>user</type>
+ <overridable>false</overridable>
+ </value-attributes>
</property>
<property>
<name>yarn_heapsize</name>
http://git-wip-us.apache.org/repos/asf/ambari/blob/4e453f6b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/ZOOKEEPER/configuration/zookeeper-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/ZOOKEEPER/configuration/zookeeper-env.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/ZOOKEEPER/configuration/zookeeper-env.xml
index 332cd4d..da0012e 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/ZOOKEEPER/configuration/zookeeper-env.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/ZOOKEEPER/configuration/zookeeper-env.xml
@@ -27,6 +27,10 @@
<on-ambari-upgrade add="true"/>
<property-type>USER</property-type>
<description>ZooKeeper User.</description>
+ <value-attributes>
+ <type>user</type>
+ <overridable>false</overridable>
+ </value-attributes>
</property>
<property>
<name>zk_log_dir</name>
[28/50] [abbrv] ambari git commit: AMBARI-21675.Add Secure Flag to
Cookie / JSESSIONID in Zeppelin(Prabhjyot Singh via Venkata Sairam)
Posted by ja...@apache.org.
AMBARI-21675.Add Secure Flag to Cookie / JSESSIONID in Zeppelin(Prabhjyot Singh via Venkata Sairam)
(cherry picked from commit 2b50a560c34a278b67352b699e698a1cfd050406)
Change-Id: Ie9413cf613616c89301dd955eb8e3255eb9c0b91
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/5a57ad34
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/5a57ad34
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/5a57ad34
Branch: refs/heads/2.5-maint
Commit: 5a57ad34b305c86bcb952ceae8979d6c4713339a
Parents: f146158
Author: Venkata Sairam <ve...@gmail.com>
Authored: Mon Aug 21 14:03:58 2017 +0530
Committer: Sumit Mohanty <sm...@hortonworks.com>
Committed: Mon Aug 21 13:49:56 2017 -0700
----------------------------------------------------------------------
.../ZEPPELIN/0.6.0.2.5/configuration/zeppelin-shiro-ini.xml | 6 ++++++
1 file changed, 6 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/5a57ad34/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/configuration/zeppelin-shiro-ini.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/configuration/zeppelin-shiro-ini.xml b/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/configuration/zeppelin-shiro-ini.xml
index 971e4af..742f5be 100644
--- a/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/configuration/zeppelin-shiro-ini.xml
+++ b/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/configuration/zeppelin-shiro-ini.xml
@@ -61,6 +61,12 @@ sessionManager = org.apache.shiro.web.session.mgt.DefaultWebSessionManager
cacheManager = org.apache.shiro.cache.MemoryConstrainedCacheManager
securityManager.cacheManager = $cacheManager
+cookie = org.apache.shiro.web.servlet.SimpleCookie
+cookie.name = JSESSIONID
+cookie.secure = true
+cookie.httpOnly = true
+sessionManager.sessionIdCookie = $cookie
+
securityManager.sessionManager = $sessionManager
# 86,400,000 milliseconds = 24 hour
securityManager.sessionManager.globalSessionTimeout = 86400000
[25/50] [abbrv] ambari git commit: AMBARI-21753. Unable to add new
host to the upgraded PPC cluster due to exception:Local OS is not compatible
with cluster primary OS family (aonishuk)
Posted by ja...@apache.org.
AMBARI-21753. Unable to add new host to the upgraded PPC cluster due to exception:Local OS is not compatible with cluster primary OS family (aonishuk)
(cherry picked from commit 85801149d811f2e84669c30fba316d074546afaa)
Change-Id: Ie1ff9d5441840bcd2bf458ac143d36d770615d0b
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/2030db8c
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/2030db8c
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/2030db8c
Branch: refs/heads/2.5-maint
Commit: 2030db8ccc0a78645e9256d786e9719a8a91a3c5
Parents: dcf9a4a
Author: Andrew Onishuk <ao...@hortonworks.com>
Authored: Fri Aug 18 16:41:05 2017 +0300
Committer: Sumit Mohanty <sm...@hortonworks.com>
Committed: Sat Aug 19 08:09:03 2017 -0700
----------------------------------------------------------------------
.../src/main/python/ambari_server/serverConfiguration.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/2030db8c/ambari-server/src/main/python/ambari_server/serverConfiguration.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/python/ambari_server/serverConfiguration.py b/ambari-server/src/main/python/ambari_server/serverConfiguration.py
index f6f3f43..24656b9 100644
--- a/ambari-server/src/main/python/ambari_server/serverConfiguration.py
+++ b/ambari-server/src/main/python/ambari_server/serverConfiguration.py
@@ -1178,8 +1178,8 @@ def update_ambari_properties():
if NR_USER_PROPERTY not in new_properties.keys():
new_properties.process_pair(NR_USER_PROPERTY, "root")
- if OS_FAMILY_PROPERTY not in new_properties.keys():
- new_properties.process_pair(OS_FAMILY_PROPERTY, OS_FAMILY + OS_VERSION)
+ # update the os. In case os detection routine changed
+ new_properties.process_pair(OS_FAMILY_PROPERTY, OS_FAMILY + OS_VERSION)
with open(conf_file, 'w') as hfW:
new_properties.store(hfW)
[16/50] [abbrv] ambari git commit: AMBARI-21729 Owner of yarn.include
stays to be root:root where as dfs.include gets changed to the correct one
upon restart services (dsen)
Posted by ja...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/6d2bab16/ambari-server/src/test/python/stacks/2.0.6/configs/secured_yarn_include_file_dont_manage.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/secured_yarn_include_file_dont_manage.json b/ambari-server/src/test/python/stacks/2.0.6/configs/secured_yarn_include_file_dont_manage.json
new file mode 100644
index 0000000..0e0c7e0
--- /dev/null
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/secured_yarn_include_file_dont_manage.json
@@ -0,0 +1,1078 @@
+{
+ "roleCommand": "INSTALL",
+ "clusterName": "c1",
+ "hostname": "c6401.ambari.apache.org",
+ "hostLevelParams": {
+ "not_managed_hdfs_path_list": "[\"/apps/hive/warehouse\",\"/apps/falcon\",\"/mr-history/done\",\"/app-logs\",\"/tmp\"]",
+ "agent_stack_retry_count": "5",
+ "agent_stack_retry_on_unavailability": "false",
+ "jdk_location": "http://c6401.ambari.apache.org:8080/resources/",
+ "ambari_db_rca_password": "mapred",
+ "current_version" : "2.2.4.2-1234",
+ "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca",
+ "jce_name": "UnlimitedJCEPolicyJDK7.zip",
+ "oracle_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//ojdbc6.jar",
+ "repo_info": "[{\"baseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0\",\"osType\":\"centos6\",\"repoId\":\"HDP-2.0.8\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0\"}]",
+ "package_list": "[{\"type\":\"rpm\",\"name\":\"hadoop-yarn\"},{\"type\":\"rpm\",\"name\":\"hadoop-yarn-nodemanager\"},{\"type\":\"rpm\",\"name\":\"hadoop-mapreduce\"},{\"type\":\"rpm\",\"name\":\"hadoop-yarn-proxyserver\"},{\"type\":\"rpm\",\"name\":\"hadoop-yarn-resourcemanager\"}]",
+ "stack_version": "2.0",
+ "stack_name": "HDP",
+ "db_name": "ambari",
+ "ambari_db_rca_driver": "org.postgresql.Driver",
+ "jdk_name": "jdk-7u67-linux-x64.tar.gz",
+ "ambari_db_rca_username": "mapred",
+ "java_home": "/usr/jdk64/jdk1.7.0_45",
+ "java_version": "8",
+ "mysql_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//mysql-connector-java.jar",
+ "custom_mysql_jdbc_name" : "mysql-connector-java.jar",
+ "custom_oracle_jdbc_name" : "oracle-jdbc-driver.jar",
+ "custom_postgres_jdbc_name" : "test-postgres-jdbc.jar"
+ },
+ "commandType": "EXECUTION_COMMAND",
+ "roleParams": {},
+ "serviceName": "HIVE",
+ "role": "HIVE_SERVER",
+ "commandParams": {
+ "command_timeout": "600",
+ "service_package_folder": "YARN",
+ "script_type": "PYTHON",
+ "script": "scripts/yarn_client.py",
+ "excluded_hosts": "host1",
+ "mark_draining_only" : "false",
+ "update_files_only" : "false"
+ },
+ "taskId": 186,
+ "public_hostname": "c6401.ambari.apache.org",
+ "configurations": {
+ "sqoop-site": {
+ "atlas.cluster.name": "c1",
+ "sqoop.job.data.publish.class": "org.apache.atlas.sqoop.hook.SqoopHook"
+ },
+ "mapred-site": {
+ "mapreduce.jobhistory.address": "c6402.ambari.apache.org:10020",
+ "mapreduce.jobhistory.webapp.spnego-keytab-file": "/etc/security/keytabs/spnego.service.keytab",
+ "mapreduce.reduce.input.buffer.percent": "0.0",
+ "mapreduce.output.fileoutputformat.compress": "false",
+ "mapreduce.framework.name": "yarn",
+ "mapreduce.map.speculative": "false",
+ "mapreduce.reduce.shuffle.merge.percent": "0.66",
+ "yarn.app.mapreduce.am.resource.mb": "683",
+ "mapreduce.map.java.opts": "-Xmx273m",
+ "mapreduce.cluster.administrators": " hadoop",
+ "mapreduce.application.classpath": "$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/*,$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/lib/*",
+ "mapreduce.job.reduce.slowstart.completedmaps": "0.05",
+ "mapreduce.output.fileoutputformat.compress.type": "BLOCK",
+ "mapreduce.reduce.speculative": "false",
+ "mapreduce.reduce.java.opts": "-Xmx546m",
+ "mapreduce.am.max-attempts": "2",
+ "yarn.app.mapreduce.am.admin-command-opts": "-Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN",
+ "mapreduce.reduce.log.level": "INFO",
+ "mapreduce.map.sort.spill.percent": "0.7",
+ "mapreduce.task.io.sort.mb": "136",
+ "mapreduce.task.timeout": "300000",
+ "mapreduce.map.memory.mb": "341",
+ "mapreduce.task.io.sort.factor": "100",
+ "mapreduce.jobhistory.intermediate-done-dir": "/mr-history/tmp",
+ "mapreduce.reduce.memory.mb": "683",
+ "mapreduce.jobhistory.principal": "jhs/_HOST@EXAMPLE.COM",
+ "yarn.app.mapreduce.am.log.level": "INFO",
+ "mapreduce.map.log.level": "INFO",
+ "mapreduce.shuffle.port": "13562",
+ "mapreduce.admin.user.env": "LD_LIBRARY_PATH=/usr/lib/hadoop/lib/native:/usr/lib/hadoop/lib/native/`$JAVA_HOME/bin/java -d32 -version &> /dev/null;if [ $? -eq 0 ]; then echo Linux-i386-32; else echo Linux-amd64-64;fi`",
+ "mapreduce.map.output.compress": "false",
+ "yarn.app.mapreduce.am.staging-dir": "/user",
+ "mapreduce.reduce.shuffle.parallelcopies": "30",
+ "mapreduce.reduce.shuffle.input.buffer.percent": "0.7",
+ "mapreduce.jobhistory.webapp.address": "c6402.ambari.apache.org:19888",
+ "mapreduce.jobhistory.keytab": "/etc/security/keytabs/jhs.service.keytab",
+ "mapreduce.jobhistory.done-dir": "/mr-history/done",
+ "mapreduce.admin.reduce.child.java.opts": "-Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN",
+ "mapreduce.jobhistory.webapp.spnego-principal": "HTTP/_HOST@EXAMPLE.COM",
+ "yarn.app.mapreduce.am.command-opts": "-Xmx546m",
+ "mapreduce.admin.map.child.java.opts": "-Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN"
+ },
+ "oozie-site": {
+ "oozie.service.PurgeService.purge.interval": "3600",
+ "oozie.service.CallableQueueService.queue.size": "1000",
+ "oozie.service.SchemaService.wf.ext.schemas": "shell-action-0.1.xsd,email-action-0.1.xsd,hive-action-0.2.xsd,sqoop-action-0.2.xsd,ssh-action-0.1.xsd,distcp-action-0.1.xsd,shell-action-0.2.xsd,oozie-sla-0.1.xsd,oozie-sla-0.2.xsd,hive-action-0.3.xsd",
+ "oozie.service.JPAService.jdbc.url": "jdbc:derby:${oozie.data.dir}/${oozie.db.schema.name}-db;create=true",
+ "oozie.service.HadoopAccessorService.nameNode.whitelist": " ",
+ "oozie.service.JPAService.jdbc.driver": "org.apache.derby.jdbc.EmbeddedDriver",
+ "local.realm": "EXAMPLE.COM",
+ "use.system.libpath.for.mapreduce.and.pig.jobs": "false",
+ "oozie.service.HadoopAccessorService.kerberos.enabled": "true",
+ "oozie.db.schema.name": "oozie",
+ "oozie.credentials.credentialclasses": "hcat=org.apache.oozie.action.hadoop.HCatCredentials",
+ "oozie.service.JPAService.create.db.schema": "false",
+ "oozie.authentication.kerberos.name.rules": "RULE:[2:$1@$0](rm@.*EXAMPLE.COM)s/.*/yarn/\nRULE:[2:$1@$0](nm@.*EXAMPLE.COM)s/.*/yarn/\nRULE:[2:$1@$0](nn@.*EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](dn@.*EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](hbase@.*EXAMPLE.COM)s/.*/hbase/\nRULE:[2:$1@$0](hbase@.*EXAMPLE.COM)s/.*/hbase/\nDEFAULT",
+ "oozie.authentication.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab",
+ "oozie.service.ActionService.executor.ext.classes": "\n org.apache.oozie.action.email.EmailActionExecutor,\n org.apache.oozie.action.hadoop.HiveActionExecutor,\n org.apache.oozie.action.hadoop.ShellActionExecutor,\n org.apache.oozie.action.hadoop.SqoopActionExecutor,\n org.apache.oozie.action.hadoop.DistcpActionExecutor",
+ "oozie.service.HadoopAccessorService.kerberos.principal": "oozie/c6402.ambari.apache.org@EXAMPLE.COM",
+ "oozie.service.AuthorizationService.authorization.enabled": "true",
+ "oozie.base.url": "http://c6402.ambari.apache.org:11000/oozie",
+ "oozie.service.JPAService.jdbc.password": "asd",
+ "oozie.service.coord.normal.default.timeout": "120",
+ "oozie.service.AuthorizationService.security.enabled": "true",
+ "oozie.service.JPAService.pool.max.active.conn": "10",
+ "oozie.service.PurgeService.older.than": "30",
+ "oozie.service.coord.push.check.requeue.interval": "30000",
+ "oozie.service.HadoopAccessorService.hadoop.configurations": "*=/etc/hadoop/conf",
+ "oozie.service.HadoopAccessorService.jobTracker.whitelist": " ",
+ "oozie.service.CallableQueueService.callable.concurrency": "3",
+ "oozie.service.JPAService.jdbc.username": "oozie",
+ "oozie.service.CallableQueueService.threads": "10",
+ "oozie.services.ext": "org.apache.oozie.service.PartitionDependencyManagerService,org.apache.oozie.service.HCatAccessorService",
+ "oozie.systemmode": "NORMAL",
+ "oozie.service.HadoopAccessorService.keytab.file": "/etc/security/keytabs/oozie.service.keytab",
+ "oozie.service.WorkflowAppService.system.libpath": "/user/${user.name}/share/lib",
+ "oozie.services": "\n org.apache.oozie.service.SchedulerService,\n org.apache.oozie.service.InstrumentationService,\n org.apache.oozie.service.CallableQueueService,\n org.apache.oozie.service.UUIDService,\n org.apache.oozie.service.ELService,\n org.apache.oozie.service.AuthorizationService,\n org.apache.oozie.service.UserGroupInformationService,\n org.apache.oozie.service.HadoopAccessorService,\n org.apache.oozie.service.URIHandlerService,\n org.apache.oozie.service.MemoryLocksService,\n org.apache.oozie.service.DagXLogInfoService,\n org.apache.oozie.service.SchemaService,\n org.apache.oozie.service.LiteWorkflowAppService,\n org.apache.oozie.service.JPAService,\n org.apache.oozie.service.StoreService,\n org.apache.oozie.service.CoordinatorStoreService,\n org.apache.oozie.service.SLAStoreService,\n org.apache.oozie.service.DBLiteWorkflowStoreServic
e,\n org.apache.oozie.service.CallbackService,\n org.apache.oozie.service.ActionService,\n org.apache.oozie.service.ActionCheckerService,\n org.apache.oozie.service.RecoveryService,\n org.apache.oozie.service.PurgeService,\n org.apache.oozie.service.CoordinatorEngineService,\n org.apache.oozie.service.BundleEngineService,\n org.apache.oozie.service.DagEngineService,\n org.apache.oozie.service.CoordMaterializeTriggerService,\n org.apache.oozie.service.StatusTransitService,\n org.apache.oozie.service.PauseTransitService,\n org.apache.oozie.service.GroupsService,\n org.apache.oozie.service.ProxyUserService",
+ "oozie.service.URIHandlerService.uri.handlers": "org.apache.oozie.dependency.FSURIHandler,org.apache.oozie.dependency.HCatURIHandler",
+ "oozie.authentication.type": "kerberos",
+ "oozie.authentication.kerberos.principal": "HTTP/c6402.ambari.apache.org@EXAMPLE.COM",
+ "oozie.system.id": "oozie-${user.name}",
+ "oozie.https.port": "11443",
+ "oozie.https.keystore.file": "/home/oozie/oozie.keystore",
+ "oozie.https.keystore.pass": "password"
+ },
+ "storm-site": {
+ "topology.tuple.serializer": "backtype.storm.serialization.types.ListDelegateSerializer",
+ "topology.workers": "1",
+ "drpc.worker.threads": "64",
+ "storm.zookeeper.servers": "['c6401.ambari.apache.org','c6402.ambari.apache.org']",
+ "supervisor.heartbeat.frequency.secs": "5",
+ "topology.executor.send.buffer.size": "1024",
+ "drpc.childopts": "-Xmx768m",
+ "nimbus.thrift.port": "6627",
+ "storm.zookeeper.retry.intervalceiling.millis": "30000",
+ "storm.local.dir": "/hadoop/storm",
+ "topology.receiver.buffer.size": "8",
+ "storm.messaging.netty.client_worker_threads": "1",
+ "transactional.zookeeper.root": "/transactional",
+ "drpc.request.timeout.secs": "600",
+ "topology.skip.missing.kryo.registrations": "false",
+ "worker.heartbeat.frequency.secs": "1",
+ "zmq.hwm": "0",
+ "storm.zookeeper.connection.timeout": "15000",
+ "topology.max.error.report.per.interval": "5",
+ "storm.messaging.netty.server_worker_threads": "1",
+ "supervisor.worker.start.timeout.secs": "120",
+ "zmq.threads": "1",
+ "topology.acker.executors": "null",
+ "storm.local.mode.zmq": "false",
+ "topology.max.task.parallelism": "null",
+ "storm.zookeeper.port": "2181",
+ "nimbus.childopts": "-Xmx1024m",
+ "worker.childopts": "-Xmx768m",
+ "drpc.queue.size": "128",
+ "storm.zookeeper.retry.times": "5",
+ "nimbus.monitor.freq.secs": "10",
+ "storm.cluster.mode": "distributed",
+ "dev.zookeeper.path": "/tmp/dev-storm-zookeeper",
+ "drpc.invocations.port": "3773",
+ "storm.zookeeper.root": "/storm",
+ "logviewer.childopts": "-Xmx128m",
+ "transactional.zookeeper.port": "null",
+ "topology.worker.childopts": "null",
+ "topology.max.spout.pending": "null",
+ "nimbus.cleanup.inbox.freq.secs": "600",
+ "storm.messaging.netty.min_wait_ms": "100",
+ "nimbus.task.timeout.secs": "30",
+ "nimbus.thrift.max_buffer_size": "1048576",
+ "topology.sleep.spout.wait.strategy.time.ms": "1",
+ "topology.optimize": "true",
+ "nimbus.reassign": "true",
+ "storm.messaging.transport": "backtype.storm.messaging.netty.Context",
+ "logviewer.appender.name": "A1",
+ "nimbus.host": "c6401.ambari.apache.org",
+ "ui.port": "8744",
+ "supervisor.slots.ports": "[6700, 6701]",
+ "nimbus.file.copy.expiration.secs": "600",
+ "supervisor.monitor.frequency.secs": "3",
+ "ui.childopts": "-Xmx768m",
+ "transactional.zookeeper.servers": "null",
+ "zmq.linger.millis": "5000",
+ "topology.error.throttle.interval.secs": "10",
+ "topology.worker.shared.thread.pool.size": "4",
+ "java.library.path": "/usr/local/lib:/opt/local/lib:/usr/lib",
+ "topology.spout.wait.strategy": "backtype.storm.spout.SleepSpoutWaitStrategy",
+ "task.heartbeat.frequency.secs": "3",
+ "topology.transfer.buffer.size": "1024",
+ "storm.zookeeper.session.timeout": "20000",
+ "topology.executor.receive.buffer.size": "1024",
+ "topology.stats.sample.rate": "0.05",
+ "topology.fall.back.on.java.serialization": "true",
+ "supervisor.childopts": "-Xmx256m",
+ "topology.enable.message.timeouts": "true",
+ "storm.messaging.netty.max_wait_ms": "1000",
+ "nimbus.topology.validator": "backtype.storm.nimbus.DefaultTopologyValidator",
+ "nimbus.supervisor.timeout.secs": "60",
+ "topology.disruptor.wait.strategy": "com.lmax.disruptor.BlockingWaitStrategy",
+ "nimbus.inbox.jar.expiration.secs": "3600",
+ "drpc.port": "3772",
+ "topology.kryo.factory": "backtype.storm.serialization.DefaultKryoFactory",
+ "storm.zookeeper.retry.interval": "1000",
+ "storm.messaging.netty.max_retries": "30",
+ "topology.tick.tuple.freq.secs": "null",
+ "supervisor.enable": "true",
+ "nimbus.task.launch.secs": "120",
+ "task.refresh.poll.secs": "10",
+ "topology.message.timeout.secs": "30",
+ "storm.messaging.netty.buffer_size": "5242880",
+ "topology.state.synchronization.timeout.secs": "60",
+ "supervisor.worker.timeout.secs": "30",
+ "topology.trident.batch.emit.interval.millis": "500",
+ "topology.builtin.metrics.bucket.size.secs": "60",
+ "storm.thrift.transport": "backtype.storm.security.auth.SimpleTransportPlugin",
+ "logviewer.port": "8000",
+ "topology.debug": "false"
+ },
+ "webhcat-site": {
+ "templeton.pig.path": "pig.tar.gz/pig/bin/pig",
+ "templeton.hive.properties": "hive.metastore.local=false,hive.metastore.uris=thrift://c6402.ambari.apache.org:9083,hive.metastore.sasl.enabled=true,hive.metastore.execute.setugi=true,hive.metastore.warehouse.dir=/apps/hive/warehouse,hive.exec.mode.local.auto=false,hive.metastore.kerberos.principal=hive/c6402.ambari.apache.org@EXAMPLE.COM",
+ "templeton.override.enabled": "false",
+ "templeton.jar": "/usr/lib/hcatalog/share/webhcat/svr/webhcat.jar",
+ "templeton.kerberos.secret": "secret",
+ "templeton.kerberos.principal": "HTTP/c6402.ambari.apache.org@EXAMPLE.COM",
+ "templeton.zookeeper.hosts": "c6401.ambari.apache.org:2181,c6402.ambari.apache.org:2181",
+ "templeton.exec.timeout": "60000",
+ "templeton.storage.class": "org.apache.hive.hcatalog.templeton.tool.ZooKeeperStorage",
+ "templeton.hive.archive": "hdfs:///apps/webhcat/hive.tar.gz",
+ "templeton.streaming.jar": "hdfs:///apps/webhcat/hadoop-streaming.jar",
+ "templeton.port": "50111",
+ "templeton.hadoop.conf.dir": "/etc/hadoop/conf",
+ "templeton.libjars": "/usr/lib/zookeeper/zookeeper.jar",
+ "templeton.hadoop": "/usr/bin/hadoop",
+ "templeton.hive.path": "hive.tar.gz/hive/bin/hive",
+ "templeton.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab",
+ "templeton.hcat": "/usr/bin/hcat",
+ "templeton.pig.archive": "hdfs:///apps/webhcat/pig.tar.gz",
+ "templeton.sqoop.archive": "hdfs:///apps/webhcat/sqoop.tar.gz"
+ },
+ "capacity-scheduler": {
+ "yarn.scheduler.capacity.node-locality-delay": "40",
+ "yarn.scheduler.capacity.root.capacity": "100",
+ "yarn.scheduler.capacity.root.acl_administer_queue": "*",
+ "yarn.scheduler.capacity.root.queues": "default",
+ "yarn.scheduler.capacity.maximum-applications": "10000",
+ "yarn.scheduler.capacity.root.default.user-limit-factor": "1",
+ "yarn.scheduler.capacity.root.default.maximum-capacity": "100",
+ "yarn.scheduler.capacity.root.default.state": "RUNNING",
+ "yarn.scheduler.capacity.maximum-am-resource-percent": "0.2",
+ "yarn.scheduler.capacity.root.default.acl_administer_jobs": "*",
+ "yarn.scheduler.capacity.root.default.capacity": "100",
+ "yarn.scheduler.capacity.root.default.acl_submit_applications": "*"
+ },
+ "hdfs-site": {
+ "dfs.namenode.avoid.write.stale.datanode": "true",
+ "dfs.namenode.kerberos.internal.spnego.principal": "${dfs.web.authentication.kerberos.principal}",
+ "dfs.datanode.kerberos.principal": "dn/_HOST@EXAMPLE.COM",
+ "dfs.block.access.token.enable": "true",
+ "dfs.support.append": "true",
+ "dfs.datanode.address": "0.0.0.0:${ambari.dfs.datanode.port}",
+ "dfs.cluster.administrators": " hdfs",
+ "dfs.journalnode.kerberos.principal": "jn/_HOST@EXAMPLE.COM",
+ "ambari.dfs.datanode.http.port": "1022",
+ "dfs.datanode.balance.bandwidthPerSec": "6250000",
+ "dfs.namenode.safemode.threshold-pct": "1.0f",
+ "dfs.namenode.checkpoint.edits.dir": "${dfs.namenode.checkpoint.dir}",
+ "dfs.permissions.enabled": "true",
+ "fs.checkpoint.size": "67108864",
+ "dfs.namenode.checkpoint.dir": "/hadoop/hdfs/namesecondary",
+ "dfs.https.port": "50470",
+ "dfs.namenode.https-address": "c6401.ambari.apache.org:50470",
+ "dfs.blockreport.initialDelay": "120",
+ "dfs.journalnode.edits.dir": "/grid/0/hdfs/journal",
+ "dfs.blocksize": "134217728",
+ "dfs.datanode.max.transfer.threads": "1024",
+ "dfs.heartbeat.interval": "3",
+ "dfs.replication": "3",
+ "dfs.namenode.handler.count": "100",
+ "dfs.web.authentication.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab",
+ "fs.permissions.umask-mode": "022",
+ "dfs.namenode.stale.datanode.interval": "30000",
+ "dfs.datanode.ipc.address": "0.0.0.0:8010",
+ "dfs.namenode.name.dir": "/hadoop/hdfs/namenode",
+ "dfs.secondary.namenode.kerberos.internal.spnego.principal": "${dfs.web.authentication.kerberos.principal}",
+ "dfs.datanode.data.dir": "/hadoop/hdfs/data",
+ "dfs.namenode.http-address": "c6401.ambari.apache.org:50070",
+ "dfs.webhdfs.enabled": "true",
+ "dfs.datanode.failed.volumes.tolerated": "0",
+ "dfs.namenode.accesstime.precision": "0",
+ "dfs.namenode.kerberos.principal": "nn/_HOST@EXAMPLE.COM",
+ "ambari.dfs.datanode.port": "1019",
+ "dfs.namenode.avoid.read.stale.datanode": "true",
+ "dfs.namenode.secondary.http-address": "c6402.ambari.apache.org:50090",
+ "dfs.client.read.shortcircuit": "true",
+ "dfs.journalnode.keytab.file": "/etc/security/keytabs/jn.service.keytab",
+ "dfs.datanode.http.address": "0.0.0.0:1022",
+ "nfs.file.dump.dir": "/tmp/.hdfs-nfs",
+ "dfs.datanode.du.reserved": "1073741824",
+ "dfs.client.read.shortcircuit.streams.cache.size": "4096",
+ "dfs.secondary.namenode.keytab.file": "/etc/security/keytabs/nn.service.keytab",
+ "dfs.web.authentication.kerberos.principal": "HTTP/_HOST@EXAMPLE.COM",
+ "dfs.secondary.namenode.kerberos.principal": "nn/_HOST@EXAMPLE.COM",
+ "dfs.datanode.keytab.file": "/etc/security/keytabs/dn.service.keytab",
+ "dfs.namenode.keytab.file": "/etc/security/keytabs/nn.service.keytab",
+ "dfs.permissions.superusergroup": "hdfs",
+ "dfs.journalnode.http-address": "0.0.0.0:8480",
+ "dfs.journalnode.kerberos.internal.spnego.principal": "HTTP/_HOST@EXAMPLE.COM",
+ "dfs.domain.socket.path": "/var/lib/hadoop-hdfs/dn_socket",
+ "dfs.namenode.write.stale.datanode.ratio": "1.0f",
+ "dfs.hosts.exclude": "/etc/hadoop/conf/dfs.exclude",
+ "dfs.datanode.data.dir.perm": "750",
+ "dfs.namenode.name.dir.restore": "true",
+ "dfs.replication.max": "50",
+ "dfs.namenode.checkpoint.period": "21600"
+ },
+ "hbase-site": {
+ "hbase.hstore.flush.retries.number": "120",
+ "hbase.client.keyvalue.maxsize": "10485760",
+ "hbase.regionserver.keytab.file": "/etc/security/keytabs/hbase.service.keytab",
+ "phoenix.queryserver.keytab.file": "/etc/security/keytabs/hbase.service.keytab",
+ "hbase.hstore.compactionThreshold": "3",
+ "hbase.bulkload.staging.dir": "/apps/hbase/staging",
+ "hbase.rootdir": "hdfs://c6401.ambari.apache.org:8020/apps/hbase/data",
+ "hbase.regionserver.handler.count": "60",
+ "hbase.regionserver.global.memstore.lowerLimit": "0.38",
+ "hbase.master.kerberos.principal": "hbase/_HOST@EXAMPLE.COM",
+ "hbase.hregion.memstore.block.multiplier": "2",
+ "hbase.hregion.memstore.flush.size": "134217728",
+ "hbase.superuser": "hbase",
+ "hbase.coprocessor.region.classes": "org.apache.hadoop.hbase.security.token.TokenProvider,org.apache.hadoop.hbase.security.access.SecureBulkLoadEndpoint,org.apache.hadoop.hbase.security.access.AccessController",
+ "hbase.zookeeper.property.clientPort": "2181",
+ "hbase.hregion.max.filesize": "10737418240",
+ "hbase.regionserver.global.memstore.upperLimit": "0.4",
+ "zookeeper.session.timeout": "30000",
+ "hbase.tmp.dir": "/hadoop/hbase",
+ "hbase.local.dir": "${hbase.tmp.dir}/local",
+ "hfile.block.cache.size": "0.40",
+ "hbase.regionserver.kerberos.principal": "hbase/_HOST@EXAMPLE.COM",
+ "phoenix.queryserver.kerberos.principal": "hbase/_HOST@EXAMPLE.COM",
+ "hbase.security.authentication": "kerberos",
+ "hbase.defaults.for.version.skip": "true",
+ "hbase.zookeeper.quorum": "c6401.ambari.apache.org,c6402.ambari.apache.org",
+ "zookeeper.znode.parent": "/hbase-secure",
+ "hbase.coprocessor.master.classes": "org.apache.hadoop.hbase.security.access.AccessController",
+ "hbase.hstore.blockingStoreFiles": "10",
+ "hbase.master.port": "60000",
+ "hbase.hregion.majorcompaction": "86400000",
+ "hbase.security.authorization": "true",
+ "hbase.master.keytab.file": "/etc/security/keytabs/hbase.service.keytab",
+ "hbase.cluster.distributed": "true",
+ "hbase.hregion.memstore.mslab.enabled": "true",
+ "hbase.client.scanner.caching": "100",
+ "hbase.zookeeper.useMulti": "true"
+ },
+ "core-site": {
+ "gluster.daemon.user": "null",
+ "hadoop.security.authorization": "true",
+ "hadoop.proxyuser.hive.hosts": "c6402.ambari.apache.org",
+ "hadoop.proxyuser.hcat.hosts": "c6402.ambari.apache.org",
+ "hadoop.proxyuser.hcat.groups": "users",
+ "fs.trash.interval": "360",
+ "hadoop.proxyuser.hive.groups": "users",
+ "io.compression.codecs": "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec",
+ "hadoop.proxyuser.HTTP.hosts": "c6402.ambari.apache.org",
+ "ipc.client.idlethreshold": "8000",
+ "io.file.buffer.size": "131072",
+ "io.serializations": "org.apache.hadoop.io.serializer.WritableSerialization",
+ "hadoop.security.authentication": "kerberos",
+ "mapreduce.jobtracker.webinterface.trusted": "false",
+ "hadoop.proxyuser.HTTP.groups": "users",
+ "fs.defaultFS": "hdfs://c6401.ambari.apache.org:8020",
+ "hadoop.proxyuser.oozie.groups": "users",
+ "fs.AbstractFileSystem.glusterfs.impl": "null",
+ "ipc.client.connect.max.retries": "50",
+ "hadoop.security.auth_to_local": "RULE:[2:$1@$0](rm@.*EXAMPLE.COM)s/.*/yarn/\nRULE:[2:$1@$0](nm@.*EXAMPLE.COM)s/.*/yarn/\nRULE:[2:$1@$0](nn@.*EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](dn@.*EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](hbase@.*EXAMPLE.COM)s/.*/hbase/\nRULE:[2:$1@$0](hbase@.*EXAMPLE.COM)s/.*/hbase/\nRULE:[2:$1@$0](oozie@.*EXAMPLE.COM)s/.*/oozie/\nRULE:[2:$1@$0](jhs@.*EXAMPLE.COM)s/.*/mapred/\nRULE:[2:$1@$0](jn/_HOST@.*EXAMPLE.COM)s/.*/hdfs/\nDEFAULT",
+ "hadoop.proxyuser.oozie.hosts": "c6402.ambari.apache.org",
+ "ipc.client.connection.maxidletime": "30000"
+ },
+ "ranger-hdfs-plugin-properties" : {
+ "ranger-hdfs-plugin-enabled":"No"
+ },
+ "ranger-hive-plugin-properties": {
+ "XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS": "900",
+ "XAAUDIT.HDFS.DESTINATION_DIRECTORY": "hdfs://__REPLACE__NAME_NODE_HOST:8020/ranger/audit/%app-type%/%time:yyyyMMdd%",
+ "XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit",
+ "common.name.for.certificate": "-",
+ "XAAUDIT.HDFS.IS_ENABLED": "false",
+ "XAAUDIT.HDFS.LOCAL_BUFFER_FILE": "%time:yyyyMMdd-HHmm.ss%.log",
+ "SSL_KEYSTORE_PASSWORD": "myKeyFilePassword",
+ "XAAUDIT.DB.IS_ENABLED": "true",
+ "XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS": "600",
+ "XAAUDIT.SOLR.SOLR_URL": "http://localhost:6083/solr/ranger_audits",
+ "XAAUDIT.SOLR.IS_ENABLED": "false",
+ "SSL_KEYSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-keystore.jks",
+ "XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS": "60",
+ "policy_user": "ambari-qa",
+ "UPDATE_XAPOLICIES_ON_GRANT_REVOKE": "true",
+ "XAAUDIT.HDFS.DESTINTATION_FILE": "%hostname%-audit.log",
+ "XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS": "86400",
+ "XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT": "10",
+ "SSL_TRUSTSTORE_PASSWORD": "changeit",
+ "XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit/archive",
+ "ranger-hive-plugin-enabled": "No",
+ "jdbc.driverClassName": "org.apache.hive.jdbc.HiveDriver",
+ "REPOSITORY_CONFIG_USERNAME": "hive",
+ "XAAUDIT.SOLR.MAX_FLUSH_INTERVAL_MS": "1000",
+ "XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS": "60",
+ "SSL_TRUSTSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-truststore.jks",
+ "REPOSITORY_CONFIG_PASSWORD": "hive",
+ "XAAUDIT.SOLR.MAX_QUEUE_SIZE": "1"
+ },
+ "ranger-knox-plugin-properties": {
+ "XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS": "900",
+ "KNOX_HOME": "/usr/hdp/current/knox-server",
+ "XAAUDIT.HDFS.DESTINATION_DIRECTORY": "hdfs://__REPLACE__NAME_NODE_HOST:8020/ranger/audit/%app-type%/%time:yyyyMMdd%",
+ "XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit",
+ "common.name.for.certificate": "-",
+ "XAAUDIT.HDFS.IS_ENABLED": "false",
+ "XAAUDIT.HDFS.LOCAL_BUFFER_FILE": "%time:yyyyMMdd-HHmm.ss%.log",
+ "SSL_KEYSTORE_PASSWORD": "myKeyFilePassword",
+ "XAAUDIT.DB.IS_ENABLED": "true",
+ "XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS": "600",
+ "XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS": "60",
+ "XAAUDIT.SOLR.SOLR_URL": "http://localhost:6083/solr/ranger_audits",
+ "XAAUDIT.SOLR.IS_ENABLED": "false",
+ "SSL_KEYSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-keystore.jks",
+ "ranger-knox-plugin-enabled": "No",
+ "policy_user": "ambari-qa",
+ "XAAUDIT.HDFS.DESTINTATION_FILE": "%hostname%-audit.log",
+ "XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS": "86400",
+ "XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT": "10",
+ "SSL_TRUSTSTORE_PASSWORD": "changeit",
+ "XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit/archive",
+ "REPOSITORY_CONFIG_USERNAME": "admin",
+ "XAAUDIT.SOLR.MAX_FLUSH_INTERVAL_MS": "1000",
+ "XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS": "60",
+ "SSL_TRUSTSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-truststore.jks",
+ "REPOSITORY_CONFIG_PASSWORD": "admin-password",
+ "XAAUDIT.SOLR.MAX_QUEUE_SIZE": "1"
+ },
+ "hive-site": {
+ "hive.enforce.sorting": "true",
+ "javax.jdo.option.ConnectionPassword": "!`\"' 1",
+ "javax.jdo.option.ConnectionDriverName": "com.mysql.jdbc.Driver",
+ "hive.optimize.bucketmapjoin.sortedmerge": "true",
+ "hive.security.metastore.authorization.manager": "org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider",
+ "fs.file.impl.disable.cache": "true",
+ "hive.auto.convert.join.noconditionaltask": "true",
+ "hive.server2.authentication.kerberos.principal": "hive/_HOST@EXAMPLE.COM",
+ "hive.optimize.bucketmapjoin": "true",
+ "hive.map.aggr": "true",
+ "hive.optimize.index.filter": "true",
+ "hive.security.authorization.enabled": "true",
+ "hive.optimize.reducededuplication": "true",
+ "hive.metastore.kerberos.keytab.file": "/etc/security/keytabs/hive.service.keytab",
+ "hive.metastore.uris": "thrift://c6402.ambari.apache.org:9083",
+ "hive.mapjoin.bucket.cache.size": "10000",
+ "hive.auto.convert.join.noconditionaltask.size": "1000000000",
+ "hive.vectorized.execution.enabled": "false",
+ "javax.jdo.option.ConnectionUserName": "hive",
+ "hive.metastore.cache.pinobjtypes": "Table,Database,Type,FieldSchema,Order",
+ "hive.server2.authentication": "KERBEROS",
+ "hive.metastore.sasl.enabled": "true",
+ "hive.metastore.warehouse.dir": "/apps/hive/warehouse",
+ "hive.metastore.client.socket.timeout": "60",
+ "hive.metastore.kerberos.principal": "hive/_HOST@EXAMPLE.COM",
+ "hive.optimize.reducededuplication.min.reducer": "1",
+ "hive.auto.convert.join": "true",
+ "hive.enforce.bucketing": "true",
+ "hive.mapred.reduce.tasks.speculative.execution": "false",
+ "hive.security.authenticator.manager": "org.apache.hadoop.hive.ql.security.ProxyUserAuthenticator",
+ "javax.jdo.option.ConnectionURL": "jdbc:mysql://c6402.ambari.apache.org/hive?createDatabaseIfNotExist=true",
+ "hive.auto.convert.sortmerge.join": "true",
+ "fs.hdfs.impl.disable.cache": "true",
+ "hive.security.authorization.manager": "org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider",
+ "ambari.hive.db.schema.name": "hive",
+ "hive.metastore.execute.setugi": "true",
+ "hive.auto.convert.sortmerge.join.noconditionaltask": "true",
+ "hive.server2.enable.doAs": "true",
+ "hive.optimize.mapjoin.mapreduce": "true",
+ "hive.server2.transport.mode": "binary",
+ "hive.server2.authentication.kerberos.keytab": "/etc/security/keytabs/hive.service.keytab",
+ "hive.exec.scratchdir" : "/custompath/tmp/hive"
+ },
+ "hive-interactive-site": {
+ "hive.enforce.sorting": "true",
+ "javax.jdo.option.ConnectionPassword": "!`\"' 1",
+ "javax.jdo.option.ConnectionDriverName": "com.mysql.jdbc.Driver",
+ "hive.optimize.bucketmapjoin.sortedmerge": "true",
+ "hive.security.metastore.authorization.manager": "org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider",
+ "fs.file.impl.disable.cache": "true",
+ "hive.auto.convert.join.noconditionaltask": "true",
+ "hive.server2.authentication.kerberos.principal": "hive/_HOST@EXAMPLE.COM",
+ "hive.optimize.bucketmapjoin": "true",
+ "hive.map.aggr": "true",
+ "hive.optimize.index.filter": "true",
+ "hive.security.authorization.enabled": "true",
+ "hive.optimize.reducededuplication": "true",
+ "hive.metastore.kerberos.keytab.file": "/etc/security/keytabs/hive.service.keytab",
+ "hive.metastore.uris": "thrift://c6402.ambari.apache.org:9084",
+ "hive.mapjoin.bucket.cache.size": "10000",
+ "hive.auto.convert.join.noconditionaltask.size": "1000000000",
+ "hive.vectorized.execution.enabled": "false",
+ "javax.jdo.option.ConnectionUserName": "hive",
+ "hive.metastore.cache.pinobjtypes": "Table,Database,Type,FieldSchema,Order",
+ "hive.server2.authentication": "KERBEROS",
+ "hive.metastore.sasl.enabled": "true",
+ "hive.metastore.warehouse.dir": "/apps/hive/warehouse",
+ "hive.metastore.client.socket.timeout": "60",
+ "hive.metastore.kerberos.principal": "hive/_HOST@EXAMPLE.COM",
+ "hive.optimize.reducededuplication.min.reducer": "1",
+ "hive.auto.convert.join": "true",
+ "hive.enforce.bucketing": "true",
+ "hive.mapred.reduce.tasks.speculative.execution": "false",
+ "hive.security.authenticator.manager": "org.apache.hadoop.hive.ql.security.ProxyUserAuthenticator",
+ "javax.jdo.option.ConnectionURL": "jdbc:mysql://c6402.ambari.apache.org/hive?createDatabaseIfNotExist=true",
+ "hive.auto.convert.sortmerge.join": "true",
+ "fs.hdfs.impl.disable.cache": "true",
+ "hive.security.authorization.manager": "org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider",
+ "ambari.hive.db.schema.name": "hive",
+ "hive.metastore.execute.setugi": "true",
+ "hive.auto.convert.sortmerge.join.noconditionaltask": "true",
+ "hive.server2.enable.doAs": "true",
+ "hive.optimize.mapjoin.mapreduce": "true",
+ "hive.server2.transport.mode": "binary",
+ "hive.server2.authentication.kerberos.keytab": "/etc/security/keytabs/hive.service.keytab",
+ "hive.exec.scratchdir" : "/custompath/tmp/hive"
+ },
+ "hiveserver2-site": {
+ "hive.metastore.metrics.enabled": "true",
+ "hive.service.metrics.reporter": "HADOOP2"
+ },
+ "yarn-site": {
+ "yarn.nodemanager.disk-health-checker.min-healthy-disks": "0.25",
+ "yarn.nodemanager.container-executor.class": "org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor",
+ "yarn.resourcemanager.principal": "rm/_HOST@EXAMPLE.COM",
+ "yarn.nodemanager.webapp.spnego-keytab-file": "/etc/security/keytabs/spnego.service.keytab",
+ "yarn.resourcemanager.resource-tracker.address": "c6402.ambari.apache.org:8025",
+ "yarn.nodemanager.remote-app-log-dir-suffix": "logs",
+ "yarn.resourcemanager.hostname": "c6402.ambari.apache.org",
+ "yarn.nodemanager.health-checker.script.timeout-ms": "60000",
+ "yarn.resourcemanager.scheduler.class": "org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler",
+ "yarn.resourcemanager.nodes.exclude-path": "/etc/hadoop/conf/yarn.exclude",
+ "yarn.resourcemanager.nodes.include-path": "/etc/hadoop/conf_for_include/yarn.include",
+ "yarn.scheduler.minimum-allocation-mb": "683",
+ "yarn.resourcemanager.address": "c6402.ambari.apache.org:8050",
+ "yarn.resourcemanager.scheduler.address": "c6402.ambari.apache.org:8030",
+ "yarn.log-aggregation.retain-seconds": "2592000",
+ "yarn.scheduler.maximum-allocation-mb": "2048",
+ "yarn.log-aggregation-enable": "true",
+ "yarn.nodemanager.address": "0.0.0.0:45454",
+ "yarn.nodemanager.container-monitor.interval-ms": "3000",
+ "yarn.nodemanager.principal": "nm/_HOST@EXAMPLE.COM",
+ "yarn.nodemanager.local-dirs": "/hadoop/yarn/local",
+ "yarn.nodemanager.log-aggregation.compression-type": "gz",
+ "yarn.nodemanager.log.retain-seconds": "604800",
+ "yarn.nodemanager.delete.debug-delay-sec": "0",
+ "yarn.nodemanager.keytab": "/etc/security/keytabs/nm.service.keytab",
+ "yarn.nodemanager.log-dirs": "/hadoop/yarn/log",
+ "yarn.nodemanager.health-checker.interval-ms": "135000",
+ "yarn.resourcemanager.webapp.spnego-principal": "HTTP/_HOST@EXAMPLE.COM",
+ "yarn.resourcemanager.am.max-attempts": "2",
+ "yarn.nodemanager.remote-app-log-dir": "/app-logs",
+ "yarn.nodemanager.admin-env": "MALLOC_ARENA_MAX=$MALLOC_ARENA_MAX",
+ "yarn.nodemanager.aux-services": "mapreduce_shuffle",
+ "yarn.nodemanager.vmem-check-enabled": "false",
+ "yarn.nodemanager.vmem-pmem-ratio": "2.1",
+ "yarn.admin.acl": "*",
+ "yarn.resourcemanager.webapp.address": "c6402.ambari.apache.org:8088",
+ "yarn.nodemanager.resource.memory-mb": "2048",
+ "yarn.nodemanager.linux-container-executor.group": "hadoop",
+ "yarn.acl.enable": "true",
+ "manage.include.files": "false",
+ "yarn.log.server.url": "http://c6402.ambari.apache.org:19888/jobhistory/logs",
+ "yarn.nodemanager.webapp.spnego-principal": "HTTP/_HOST@EXAMPLE.COM",
+ "yarn.application.classpath": "/etc/hadoop/conf,/usr/lib/hadoop/*,/usr/lib/hadoop/lib/*,/usr/lib/hadoop-hdfs/*,/usr/lib/hadoop-hdfs/lib/*,/usr/lib/hadoop-yarn/*,/usr/lib/hadoop-yarn/lib/*,/usr/lib/hadoop-mapreduce/*,/usr/lib/hadoop-mapreduce/lib/*",
+ "yarn.resourcemanager.admin.address": "c6402.ambari.apache.org:8141",
+ "yarn.resourcemanager.webapp.spnego-keytab-file": "/etc/security/keytabs/spnego.service.keytab",
+ "yarn.resourcemanager.keytab": "/etc/security/keytabs/rm.service.keytab",
+ "yarn.nodemanager.aux-services.mapreduce_shuffle.class": "org.apache.hadoop.mapred.ShuffleHandler",
+ "yarn.timeline-service.leveldb-timeline-store.path": "/var/log/hadoop-yarn/timeline",
+ "yarn.timeline-service.principal": "yarn/_HOST@EXAMPLE.COM",
+ "yarn.timeline-service.keytab" : "/etc/security/keytabs/yarn.service.keytab",
+ "yarn.http.policy": "HTTP_ONLY",
+ "yarn.resourcemanager.webapp.https.address": "c6402.ambari.apache.org:8090"
+ },
+ "ranger-yarn-plugin-properties" : {
+ "ranger-yarn-plugin-enabled":"No"
+ },
+ "yarn-env": {
+ "yarn_pid_dir_prefix": "/var/run/hadoop-yarn",
+ "apptimelineserver_heapsize": "1024",
+ "nodemanager_heapsize": "1024",
+ "content": "\nexport HADOOP_YARN_HOME={{hadoop_yarn_home}}\nexport YARN_LOG_DIR={{yarn_log_dir_prefix}}/$USER\nexport YARN_PID_DIR={{yarn_pid_dir_prefix}}/$USER\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\nexport JAVA_HOME={{java64_home}}\n\n# User for YARN daemons\nexport HADOOP_YARN_USER=${HADOOP_YARN_USER:-yarn}\n\n# resolve links - $0 may be a softlink\nexport YARN_CONF_DIR=\"${YARN_CONF_DIR:-$HADOOP_YARN_HOME/conf}\"\n\n# some Java parameters\n# export JAVA_HOME=/home/y/libexec/jdk1.6.0/\nif [ \"$JAVA_HOME\" != \"\" ]; then\n #echo \"run java in $JAVA_HOME\"\n JAVA_HOME=$JAVA_HOME\nfi\n\nif [ \"$JAVA_HOME\" = \"\" ]; then\n echo \"Error: JAVA_HOME is not set.\"\n exit 1\nfi\n\nJAVA=$JAVA_HOME/bin/java\nJAVA_HEAP_MAX=-Xmx1000m\n\n# For setting YARN specific HEAP sizes please use this\n# Parameter and set appropriately\nYARN_HEAPSIZE={{yarn_heapsize}}\n\n# check envvars which might override default args\nif [ \"$YARN_HEAPSIZE\" != \"\" ]; then\n JAVA_HEAP_M
AX=\"-Xmx\"\"$YARN_HEAPSIZE\"\"m\"\nfi\n\n# Resource Manager specific parameters\n\n# Specify the max Heapsize for the ResourceManager using a numerical value\n# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set\n# the value to 1000.\n# This value will be overridden by an Xmx setting specified in either YARN_OPTS\n# and/or YARN_RESOURCEMANAGER_OPTS.\n# If not specified, the default value will be picked from either YARN_HEAPMAX\n# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.\nexport YARN_RESOURCEMANAGER_HEAPSIZE={{resourcemanager_heapsize}}\n\n# Specify the JVM options to be used when starting the ResourceManager.\n# These options will be appended to the options specified as YARN_OPTS\n# and therefore may override any similar flags set in YARN_OPTS\n#export YARN_RESOURCEMANAGER_OPTS=\n\n# Node Manager specific parameters\n\n# Specify the max Heapsize for the NodeManager using a numerical value\n# in the scale of MB. For example, to
specify an jvm option of -Xmx1000m, set\n# the value to 1000.\n# This value will be overridden by an Xmx setting specified in either YARN_OPTS\n# and/or YARN_NODEMANAGER_OPTS.\n# If not specified, the default value will be picked from either YARN_HEAPMAX\n# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.\nexport YARN_NODEMANAGER_HEAPSIZE={{nodemanager_heapsize}}\n\n# Specify the max Heapsize for the HistoryManager using a numerical value\n# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set\n# the value to 1024.\n# This value will be overridden by an Xmx setting specified in either YARN_OPTS\n# and/or YARN_HISTORYSERVER_OPTS.\n# If not specified, the default value will be picked from either YARN_HEAPMAX\n# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.\nexport YARN_HISTORYSERVER_HEAPSIZE={{apptimelineserver_heapsize}}\n\n# Specify the JVM options to be used when starting the NodeManager.\n# These options will be
appended to the options specified as YARN_OPTS\n# and therefore may override any similar flags set in YARN_OPTS\n#export YARN_NODEMANAGER_OPTS=\n\n# so that filenames w/ spaces are handled correctly in loops below\nIFS=\n\n\n# default log directory and file\nif [ \"$YARN_LOG_DIR\" = \"\" ]; then\n YARN_LOG_DIR=\"$HADOOP_YARN_HOME/logs\"\nfi\nif [ \"$YARN_LOGFILE\" = \"\" ]; then\n YARN_LOGFILE='yarn.log'\nfi\n\n# default policy file for service-level authorization\nif [ \"$YARN_POLICYFILE\" = \"\" ]; then\n YARN_POLICYFILE=\"hadoop-policy.xml\"\nfi\n\n# restore ordinary behaviour\nunset IFS\n\n\nYARN_OPTS=\"$YARN_OPTS -Dhadoop.log.dir=$YARN_LOG_DIR\"\nYARN_OPTS=\"$YARN_OPTS -Dyarn.log.dir=$YARN_LOG_DIR\"\nYARN_OPTS=\"$YARN_OPTS -Dhadoop.log.file=$YARN_LOGFILE\"\nYARN_OPTS=\"$YARN_OPTS -Dyarn.log.file=$YARN_LOGFILE\"\nYARN_OPTS=\"$YARN_OPTS -Dyarn.home.dir=$YARN_COMMON_HOME\"\nYARN_OPTS=\"$YARN_OPTS -Dyarn.id.str=$YARN_IDENT_STRING\"\nYARN_OPTS=\"$YARN_OPTS -Dhadoop.root.logger=$
{YARN_ROOT_LOGGER:-INFO,console}\"\nYARN_OPTS=\"$YARN_OPTS -Dyarn.root.logger=${YARN_ROOT_LOGGER:-INFO,console}\"\nif [ \"x$JAVA_LIBRARY_PATH\" != \"x\" ]; then\n YARN_OPTS=\"$YARN_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH\"\nfi\nYARN_OPTS=\"$YARN_OPTS -Dyarn.policy.file=$YARN_POLICYFILE\"",
+ "yarn_heapsize": "1024",
+ "yarn_user": "yarn",
+ "resourcemanager_heapsize": "1024",
+ "yarn_log_dir_prefix": "/var/log/hadoop-yarn",
+ "min_user_id": "1000",
+ "is_supported_yarn_ranger": "false",
+ "service_check.queue.name": "default"
+ },
+ "cluster-env": {
+ "managed_hdfs_resource_property_names": "",
+ "security_enabled": "true",
+ "ignore_groupsusers_create": "false",
+ "smokeuser": "ambari-qa",
+ "smokeuser_principal_name": "ambari-qa@EXAMPLE.COM",
+ "kerberos_domain": "EXAMPLE.COM",
+ "user_group": "hadoop",
+ "smokeuser_keytab": "/etc/security/keytabs/smokeuser.headless.keytab",
+ "kinit_path_local": "/usr/bin",
+ "manage_dirs_on_root": "true",
+ "ignore_bad_mounts": "false"
+ },
+ "hadoop-env": {
+ "hdfs_tmp_dir": "/tmp",
+ "namenode_opt_maxnewsize": "200m",
+ "hdfs_log_dir_prefix": "/var/log/hadoop",
+ "namenode_heapsize": "1024m",
+ "namenode_opt_newsize": "200m",
+ "namenode_opt_permsize" : "128m",
+ "namenode_opt_maxpermsize" : "256m",
+ "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME. All others are\n# optional. When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use. Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop}\n\n# Hadoop Configuration Directory\n#TODO: if env var set that can cause problems\nexport HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-{{hadoop_conf_dir}}}\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options. Empty by defaul
t.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appended to HADOOP_OPTS when specified\nexport HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}\"\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}
}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\nHADOOP_DATANODE_OPTS=\"-Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_DATANODE_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/
$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS\"\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER={{hdfs_user}}\n\n# Extra ssh options. Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored. $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HAD
OOP_SECURE_DN_USER\n\n# File naming remote slave hosts. $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from. Unset by default.\n# export HADOOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands. Unset by default. This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n#
The scheduling priority for daemon processes. See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Use libraries from standard classpath\nJAVA_JDBC_LIBS=\"\"\n#Add libraries required by mysql connector\nfor jarFile in `ls /usr/share/java/*mysql* 2>/dev/null`\ndo\n JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n#Add libraries required by oracle connector\nfor jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`\ndo\n JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n#Add libraries required by nodemanager\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}\n\nif [ -d \"/usr/lib/tez\" ]; then\n export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:/usr/lib/tez/*:/usr/lib/tez/lib/*:/etc/tez/conf\nfi\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n#Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/lib/hadoop/lib/native/Linux-amd64-64",
+ "hdfs_user": "hdfs",
+ "hdfs_principal_name": "hdfs",
+ "dtnode_heapsize": "1024m",
+ "proxyuser_group": "users",
+ "hadoop_heapsize": "1024",
+ "hadoop_pid_dir_prefix": "/var/run/hadoop",
+ "hdfs_user_keytab": "/etc/security/keytabs/hdfs.headless.keytab"
+ },
+ "hive-env": {
+ "hcat_pid_dir": "/var/run/webhcat",
+ "hcat_user": "hcat",
+ "hive_ambari_database": "MySQL",
+ "hive_hostname": "abtest-3.c.pramod-thangali.internal",
+ "hive_metastore_port": "9083",
+ "webhcat_user": "hcat",
+ "content": "\n if [ \"$SERVICE\" = \"cli\" ]; then\n if [ -z \"$DEBUG\" ]; then\n export HADOOP_OPTS=\"$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseParNewGC -XX:-UseGCOverheadLimit\"\n else\n export HADOOP_OPTS=\"$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit\"\n fi\n fi\n\n# The heap size of the jvm stared by hive shell script can be controlled via:\n\nexport HADOOP_HEAPSIZE=\"{{hive_heapsize}}\"\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS\"\n\n# Larger heap size may be required when running queries over large number of files or partitions.\n# By default hive shell scripts use a heap size of 256 (MB). Larger heap size would also be\n# appropriate for hive server (hwi etc).\n\n\n# Set HADOOP_HOME to point to a specific hadoop install directory\nHADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hive Configuration Director
y can be controlled by:\nexport HIVE_CONF_DIR={{conf_dir}}\n\n# Folder containing extra ibraries required for hive compilation/execution can be controlled by:\nif [ \"${HIVE_AUX_JARS_PATH}\" != \"\" ]; then\n export HIVE_AUX_JARS_PATH=${HIVE_AUX_JARS_PATH}\nelif [ -d \"/usr/lib/hive-hcatalog/\" ]; then\n export HIVE_AUX_JARS_PATH=/usr/lib/hive-hcatalog/share/hcatalog/hive-hcatalog-core-*.jar\nelse\n export HIVE_AUX_JARS_PATH=/usr/lib/hcatalog/share/hcatalog/hcatalog-core.jar\nfi\nexport METASTORE_PORT={{hive_metastore_port}}",
+ "hive_database_name": "hive",
+ "hive_database_type": "mysql",
+ "hive_pid_dir": "/var/run/hive",
+ "hive_log_dir": "/var/log/hive",
+ "hive_user": "hive",
+ "hcat_log_dir": "/var/log/webhcat",
+ "hive_database": "New MySQL Database",
+ "hive_security_authorization": "None"
+ },
+ "hbase-env": {
+ "hbase_pid_dir": "/var/run/hbase",
+ "hbase_user": "hbase",
+ "hbase_master_heapsize": "1024m",
+ "hbase_principal_name": "hbase",
+ "content": "\n# Set environment variables here.\n\n# The java implementation to use. Java 1.6 required.\nexport JAVA_HOME={{java64_home}}\n\n# HBase Configuration directory\nexport HBASE_CONF_DIR=${HBASE_CONF_DIR:-{{hbase_conf_dir}}}\n\n# Extra Java CLASSPATH elements. Optional.\nexport HBASE_CLASSPATH=${HBASE_CLASSPATH}\n\n# The maximum amount of heap to use, in MB. Default is 1000.\n# export HBASE_HEAPSIZE=1000\n\n# Extra Java runtime options.\n# Below are what we set by default. May only work with SUN JVM.\n# For more on why as well as other possible settings,\n# see http://wiki.apache.org/hadoop/PerformanceTuning\nexport HBASE_OPTS=\"-XX:+UseConcMarkSweepGC -XX:ErrorFile={{log_dir}}/hs_err_pid%p.log\"\nexport SERVER_GC_OPTS=\"-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:{{log_dir}}/gc.log-`date +'%Y%m%d%H%M'`\"\n# Uncomment below to enable java garbage collection logging.\n# export HBASE_OPTS=\"$HBASE_OPTS -verbose:gc -XX:+PrintGCDetails -XX:+PrintG
CDateStamps -Xloggc:$HBASE_HOME/logs/gc-hbase.log\"\n\n# Uncomment and adjust to enable JMX exporting\n# See jmxremote.password and jmxremote.access in $JRE_HOME/lib/management to configure remote password access.\n# More details at: http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html\n#\n# export HBASE_JMX_BASE=\"-Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false\"\nexport HBASE_MASTER_OPTS=\"-Xmx{{master_heapsize}}\"\nexport HBASE_REGIONSERVER_OPTS=\"-Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70 -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}}\"\n# export HBASE_THRIFT_OPTS=\"$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10103\"\n# export HBASE_ZOOKEEPER_OPTS=\"$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10104\"\n\n# File naming hosts on which HRegionServers will run. $HBASE_HOME/conf/regionservers by default.\nexport HBASE_REGIONSERVERS=${HBASE_CONF_DIR}/regionservers\n\n#
Extra ssh options. Empty by default.\n# export HBASE_SSH_OPTS=\"-o ConnectTimeout=1 -o SendEnv=HBASE_CONF_DIR\"\n\n# Where log files are stored. $HBASE_HOME/logs by default.\nexport HBASE_LOG_DIR={{log_dir}}\n\n# A string representing this instance of hbase. $USER by default.\n# export HBASE_IDENT_STRING=$USER\n\n# The scheduling priority for daemon processes. See 'man nice'.\n# export HBASE_NICENESS=10\n\n# The directory where pid files are stored. /tmp by default.\nexport HBASE_PID_DIR={{pid_dir}}\n\n# Seconds to sleep between slave commands. Unset by default. This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HBASE_SLAVE_SLEEP=0.1\n\n# Tell HBase whether it should manage it's own instance of Zookeeper or not.\nexport HBASE_MANAGES_ZK=false\n\n{% if security_enabled %}\nexport HBASE_OPTS=\"$HBASE_OPTS -Djava.security.auth.login.config={{client_jaas_config_file}}\"\nexport HBASE_MASTER_OPTS=\
"$HBASE_MASTER_OPTS -Djava.security.auth.login.config={{master_jaas_config_file}}\"\nexport HBASE_REGIONSERVER_OPTS=\"$HBASE_REGIONSERVER_OPTS -Djava.security.auth.login.config={{regionserver_jaas_config_file}}\"\n{% endif %}",
+ "hbase_regionserver_heapsize": "1024m",
+ "hbase_regionserver_xmn_max": "512",
+ "hbase_regionserver_xmn_ratio": "0.2",
+ "hbase_log_dir": "/var/log/hbase",
+ "hbase_user_keytab": "/etc/security/keytabs/hbase.headless.keytab",
+ "hbase_java_io_tmpdir" : "/tmp",
+ "hbase_regionserver_shutdown_timeout": "30"
+ },
+ "flume-env": {
+ "content": "export JAVA_HOME={{java64_home}}",
+ "flume_user": "flume"
+ },
+ "ganglia-env": {
+ "gmond_user": "nobody",
+ "ganglia_runtime_dir": "/var/run/ganglia/hdp",
+ "rrdcached_base_dir": "/var/lib/ganglia/rrds",
+ "rrdcached_flush_timeout": "7200",
+ "gmetad_user": "nobody",
+ "rrdcached_write_threads": "4",
+ "rrdcached_delay": "1800",
+ "rrdcached_timeout": "3600"
+ },
+ "zookeeper-env": {
+ "zk_user": "zookeeper",
+ "zk_log_dir": "/var/log/zookeeper",
+ "content": "\nexport JAVA_HOME={{java64_home}}\nexport ZOO_LOG_DIR={{zk_log_dir}}\nexport ZOOPIDFILE={{zk_pid_file}}\nexport SERVER_JVMFLAGS={{zk_server_heapsize}}\nexport JAVA=$JAVA_HOME/bin/java\nexport CLASSPATH=$CLASSPATH:/usr/share/zookeeper/*\n\n{% if security_enabled %}\nexport SERVER_JVMFLAGS=\"$SERVER_JVMFLAGS -Djava.security.auth.login.config={{zk_server_jaas_file}}\"\nexport CLIENT_JVMFLAGS=\"$CLIENT_JVMFLAGS -Djava.security.auth.login.config={{zk_client_jaas_file}}\"\n{% endif %}",
+ "zk_pid_dir": "/var/run/zookeeper",
+ "zookeeper_principal_name": "zookeeper/_HOST@EXAMPLE.COM",
+ "zookeeper_keytab_path": "/etc/security/keytabs/zk.service.keytab"
+ },
+ "zoo.cfg": {
+ "clientPort": "2181",
+ "syncLimit": "5",
+ "initLimit": "10",
+ "dataDir": "/hadoop/zookeeper",
+ "tickTime": "2000"
+ },
+ "mapred-env": {
+ "content": "\n# export JAVA_HOME=/home/y/libexec/jdk1.6.0/\n\nexport HADOOP_JOB_HISTORYSERVER_HEAPSIZE={{jobhistory_heapsize}}\n\nexport HADOOP_MAPRED_ROOT_LOGGER=INFO,RFA\n\n#export HADOOP_JOB_HISTORYSERVER_OPTS=\n#export HADOOP_MAPRED_LOG_DIR=\"\" # Where log files are stored. $HADOOP_MAPRED_HOME/logs by default.\n#export HADOOP_JHS_LOGGER=INFO,RFA # Hadoop JobSummary logger.\n#export HADOOP_MAPRED_PID_DIR= # The pid files are stored. /tmp by default.\n#export HADOOP_MAPRED_IDENT_STRING= #A string representing this instance of hadoop. $USER by default\n#export HADOOP_MAPRED_NICENESS= #The scheduling priority for daemons. Defaults to 0.",
+ "mapred_pid_dir_prefix": "/var/run/hadoop-mapreduce",
+ "mapred_user": "mapred",
+ "jobhistory_heapsize": "900",
+ "mapred_log_dir_prefix": "/var/log/hadoop-mapreduce"
+ },
+ "oozie-env": {
+ "oozie_derby_database": "Derby",
+ "oozie_admin_port": "11001",
+ "oozie_hostname": "abtest-3.c.pramod-thangali.internal",
+ "oozie_pid_dir": "/var/run/oozie",
+ "content": "\n#!/bin/bash\n\nif [ -d \"/usr/lib/bigtop-tomcat\" ]; then\n export OOZIE_CONFIG=${OOZIE_CONFIG:-/etc/oozie/conf}\n export CATALINA_BASE=${CATALINA_BASE:-/var/lib/oozie/oozie-server}\n export CATALINA_TMPDIR=${CATALINA_TMPDIR:-/var/tmp/oozie}\n export OOZIE_CATALINA_HOME=/usr/lib/bigtop-tomcat\nfi\n\n#Set JAVA HOME\nexport JAVA_HOME={{java_home}}\n\nexport JRE_HOME=${JAVA_HOME}\n\n# Set Oozie specific environment variables here.\n\n# Settings for the Embedded Tomcat that runs Oozie\n# Java System properties for Oozie should be specified in this variable\n#\n# export CATALINA_OPTS=\n\n# Oozie configuration file to load from Oozie configuration directory\n#\n# export OOZIE_CONFIG_FILE=oozie-site.xml\n\n# Oozie logs directory\n#\nexport OOZIE_LOG={{oozie_log_dir}}\n\n# Oozie pid directory\n#\nexport CATALINA_PID={{pid_file}}\n\n#Location of the data for oozie\nexport OOZIE_DATA={{oozie_data_dir}}\n\n# Oozie Log4J configuration file to load from Oozie config
uration directory\n#\n# export OOZIE_LOG4J_FILE=oozie-log4j.properties\n\n# Reload interval of the Log4J configuration file, in seconds\n#\n# export OOZIE_LOG4J_RELOAD=10\n\n# The port Oozie server runs\n#\nexport OOZIE_HTTP_PORT={{oozie_server_port}}\n\n# The admin port Oozie server runs\n#\nexport OOZIE_ADMIN_PORT={{oozie_server_admin_port}}\n\n# The host name Oozie server runs on\n#\n# export OOZIE_HTTP_HOSTNAME=`hostname -f`\n\n# The base URL for callback URLs to Oozie\n#\n# export OOZIE_BASE_URL=\"http://${OOZIE_HTTP_HOSTNAME}:${OOZIE_HTTP_PORT}/oozie\"\nexport JAVA_LIBRARY_PATH=/usr/lib/hadoop/lib/native/Linux-amd64-64",
+ "oozie_user": "oozie",
+ "oozie_admin_users": "{oozie_user}, {oozie_user}-admin",
+ "oozie_database": "New Derby Database",
+ "oozie_data_dir": "/hadoop/oozie/data",
+ "oozie_log_dir": "/var/log/oozie",
+ "oozie_keytab": "/etc/security/keytabs/oozie.service.keytab",
+ "oozie_tmp_dir": "/var/tmp/oozie"
+ },
+ "ranger-env": {
+ "xml_configurations_supported" : "false"
+ },
+
+ "webhcat-env": {
+ "content": "\n# The file containing the running pid\nPID_FILE={{pid_file}}\n\nTEMPLETON_LOG_DIR={{templeton_log_dir}}/\n\n\nWEBHCAT_LOG_DIR={{templeton_log_dir}}/\n\n# The console error log\nERROR_LOG={{templeton_log_dir}}/webhcat-console-error.log\n\n# The console log\nCONSOLE_LOG={{templeton_log_dir}}/webhcat-console.log\n\n#TEMPLETON_JAR=templeton_jar_name\n\n#HADOOP_PREFIX=hadoop_prefix\n\n#HCAT_PREFIX=hive_prefix\n\n# Set HADOOP_HOME to point to a specific hadoop install directory\nexport HADOOP_HOME=/usr/lib/hadoop"
+ },
+ "hcat-env": {
+ "content": "JAVA_HOME={{java64_home}}\n HCAT_PID_DIR={{hcat_pid_dir}}/\n HCAT_LOG_DIR={{hcat_log_dir}}/\n HCAT_CONF_DIR={{hcat_conf_dir}}\n HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n #DBROOT is the path where the connector jars are downloaded\n DBROOT={{hcat_dbroot}}\n USER={{hcat_user}}\n METASTORE_PORT={{hive_metastore_port}}"
+ },
+ "pig-env": {
+ "content": "\nJAVA_HOME={{java64_home}}\nHADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\nif [ -d \"/usr/lib/tez\" ]; then\n PIG_OPTS=\"$PIG_OPTS -Dmapreduce.framework.name=yarn\"\nfi"
+ },
+ "sqoop-env": {
+ "content": "\n# Set Hadoop-specific environment variables here.\n\n#Set path to where bin/hadoop is available\n#Set path to where bin/hadoop is available\nexport HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop}\n\n#set the path to where bin/hbase is available\nexport HBASE_HOME=${HBASE_HOME:-/usr/lib/hbase}\n\n#Set the path to where bin/hive is available\nexport HIVE_HOME=${HIVE_HOME:-/usr/lib/hive}\n\n#Set the path for where zookeper config dir is\nexport ZOOCFGDIR=${ZOOCFGDIR:-/etc/zookeeper/conf}\n\n# add libthrift in hive to sqoop class path first so hive imports work\nexport SQOOP_USER_CLASSPATH=\"`ls ${HIVE_HOME}/lib/libthrift-*.jar 2> /dev/null`:${SQOOP_USER_CLASSPATH}\"",
+ "sqoop_user": "sqoop"
+ },
+ "hdfs-log4j": {
+ "content": "log4jproperties\nline2"
+ },
+ "yarn-log4j": {
+ "content": "log4jproperties\nline2"
+ },
+ "hbase-log4j": {
+ "content": "log4jproperties\nline2"
+ },
+ "hive-log4j": {
+ "content": "log4jproperties\nline2"
+ },
+ "hive-exec-log4j": {
+ "content": "log4jproperties\nline2"
+ },
+ "webhcat-log4j": {
+ "content": "log4jproperties\nline2"
+ },
+ "zookeeper-log4j": {
+ "content": "log4jproperties\nline2"
+ },
+ "pig-log4j": {
+ "content": "log4jproperties\nline2"
+ },
+ "pig-properties": {
+ "content": "pigproperties\nline2"
+ },
+ "oozie-log4j": {
+ "content": "log4jproperties\nline2"
+ },
+ "ranger-hbase-plugin-properties": {
+ "POLICY_MGR_URL": "{{policymgr_mgr_url}}",
+ "XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS": "900",
+ "XAAUDIT.HDFS.DESTINATION_DIRECTORY": "hdfs://__REPLACE__NAME_NODE_HOST:8020/ranger/audit/%app-type%/%time:yyyyMMdd%",
+ "XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit",
+ "common.name.for.certificate": "-",
+ "XAAUDIT.HDFS.IS_ENABLED": "false",
+ "SQL_CONNECTOR_JAR": "{{sql_connector_jar}}",
+ "XAAUDIT.HDFS.LOCAL_BUFFER_FILE": "%time:yyyyMMdd-HHmm.ss%.log",
+ "ranger-hbase-plugin-enabled": "No",
+ "REPOSITORY_NAME": "{{repo_name}}",
+ "SSL_KEYSTORE_PASSWORD": "myKeyFilePassword",
+ "XAAUDIT.DB.IS_ENABLED": "true",
+ "XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS": "600",
+ "XAAUDIT.SOLR.SOLR_URL": "http://localhost:6083/solr/ranger_audits",
+ "XAAUDIT.DB.DATABASE_NAME": "{{xa_audit_db_name}}",
+ "XAAUDIT.DB.HOSTNAME": "{{xa_db_host}}",
+ "XAAUDIT.SOLR.IS_ENABLED": "false",
+ "SSL_KEYSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-keystore.jks",
+ "XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS": "60",
+ "XAAUDIT.DB.USER_NAME": "{{xa_audit_db_user}}",
+ "policy_user": "ambari-qa",
+ "UPDATE_XAPOLICIES_ON_GRANT_REVOKE": "true",
+ "XAAUDIT.HDFS.DESTINTATION_FILE": "%hostname%-audit.log",
+ "XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS": "86400",
+ "XAAUDIT.DB.PASSWORD": "{{xa_audit_db_password}}",
+ "XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT": "10",
+ "SSL_TRUSTSTORE_PASSWORD": "changeit",
+ "XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit/archive",
+ "REPOSITORY_CONFIG_USERNAME": "hbase",
+ "XAAUDIT.SOLR.MAX_FLUSH_INTERVAL_MS": "1000",
+ "XAAUDIT.DB.FLAVOUR": "{{xa_audit_db_flavor}}",
+ "XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS": "60",
+ "SSL_TRUSTSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-truststore.jks",
+ "REPOSITORY_CONFIG_PASSWORD": "hbase",
+ "XAAUDIT.SOLR.MAX_QUEUE_SIZE": "1"
+ },
+ "ranger-hdfs-plugin-properties": {
+ "XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS": "900",
+ "XAAUDIT.HDFS.DESTINATION_DIRECTORY": "hdfs://__REPLACE__NAME_NODE_HOST:8020/ranger/audit/%app-type%/%time:yyyyMMdd%",
+ "XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit",
+ "common.name.for.certificate": "-",
+ "XAAUDIT.HDFS.IS_ENABLED": "false",
+ "XAAUDIT.HDFS.LOCAL_BUFFER_FILE": "%time:yyyyMMdd-HHmm.ss%.log",
+ "SSL_KEYSTORE_PASSWORD": "myKeyFilePassword",
+ "XAAUDIT.DB.IS_ENABLED": "true",
+ "XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS": "600",
+ "XAAUDIT.SOLR.SOLR_URL": "http://localhost:6083/solr/ranger_audits",
+ "XAAUDIT.SOLR.IS_ENABLED": "false",
+ "hadoop.rpc.protection": "-",
+ "ranger-hdfs-plugin-enabled": "No",
+ "SSL_KEYSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-keystore.jks",
+ "XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS": "60",
+ "policy_user": "ambari-qa",
+ "XAAUDIT.HDFS.DESTINTATION_FILE": "%hostname%-audit.log",
+ "XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS": "86400",
+ "XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT": "10",
+ "SSL_TRUSTSTORE_PASSWORD": "changeit",
+ "XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit/archive",
+ "REPOSITORY_CONFIG_USERNAME": "hadoop",
+ "XAAUDIT.SOLR.MAX_FLUSH_INTERVAL_MS": "1000",
+ "XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS": "60",
+ "SSL_TRUSTSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-truststore.jks",
+ "REPOSITORY_CONFIG_PASSWORD": "hadoop",
+ "XAAUDIT.SOLR.MAX_QUEUE_SIZE": "1"
+ },
+ "hadoop-metrics2.properties": {
+ "content": "# Licensed to the Apache Software Foundation (ASF) under one or more\r\n# contributor license agreements. See the NOTICE file distributed with\r\n# this work for additional information regarding copyright ownership.\r\n# The ASF licenses this file to You under the Apache License, Version 2.0\r\n# (the \"License\"); you may not use this file except in compliance with\r\n# the License. You may obtain a copy of the License at\r\n#\r\n# http:\/\/www.apache.org\/licenses\/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n\r\n# syntax: [prefix].[source|sink|jmx].[instance].[options]\r\n# See package.html for org.apache.hadoop.metrics2 for details\r\n\r\n{% if has_gang
lia_server %}\r\n*.period=60\r\n\r\n*.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31\r\n*.sink.ganglia.period=10\r\n\r\n# default for supportsparse is false\r\n*.sink.ganglia.supportsparse=true\r\n\r\n.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both\r\n.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40\r\n\r\n# Hook up to the server\r\nnamenode.sink.ganglia.servers={{ganglia_server_host}}:8661\r\ndatanode.sink.ganglia.servers={{ganglia_server_host}}:8659\r\njobtracker.sink.ganglia.servers={{ganglia_server_host}}:8662\r\ntasktracker.sink.ganglia.servers={{ganglia_server_host}}:8658\r\nmaptask.sink.ganglia.servers={{ganglia_server_host}}:8660\r\nreducetask.sink.ganglia.servers={{ganglia_server_host}}:8660\r\nresourcemanager.sink.ganglia.servers={{ganglia_server_host}}:8664\r\nnodemanager.sink.ganglia.servers={{ganglia_server_host}}:8657\r\nhistoryserver.sink.ganglia.servers={{ganglia_server_host}}:8666\r\njo
urnalnode.sink.ganglia.servers={{ganglia_server_host}}:8654\r\nnimbus.sink.ganglia.servers={{ganglia_server_host}}:8649\r\nsupervisor.sink.ganglia.servers={{ganglia_server_host}}:8650\r\n\r\nresourcemanager.sink.ganglia.tagsForPrefix.yarn=Queue\r\n\r\n{% endif %}\r\n\r\n{% if has_metric_collector %}\r\n\r\n*.period={{metrics_collection_period}}\r\n*.sink.timeline.plugin.urls=file:\/\/\/usr\/lib\/ambari-metrics-hadoop-sink\/ambari-metrics-hadoop-sink.jar\r\n*.sink.timeline.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink\r\n*.sink.timeline.period={{metrics_collection_period}}\r\n*.sink.timeline.sendInterval={{metrics_report_interval}}000\r\n*.sink.timeline.slave.host.name={{hostname}}\r\n*.sink.timeline.zookeeper.quorum={{zookeeper_quorum}}\r\n*.sink.timeline.protocol={{metric_collector_protocol}}\r\n*.sink.timeline.port={{metric_collector_port}}\r\n\r\n# HTTPS properties\r\n*.sink.timeline.truststore.path = {{metric_truststore_path}}\r\n*.sink.timeline.trusts
tore.type = {{metric_truststore_type}}\r\n*.sink.timeline.truststore.password = {{metric_truststore_password}}\r\n\r\ndatanode.sink.timeline.collector={{metric_collector_hosts}}\r\nnamenode.sink.timeline.collector={{metric_collector_hosts}}\r\nresourcemanager.sink.timeline.collector={{metric_collector_hosts}}\r\nnodemanager.sink.timeline.collector={{metric_collector_hosts}}\r\njobhistoryserver.sink.timeline.collector={{metric_collector_hosts}}\r\njournalnode.sink.timeline.collector={{metric_collector_hosts}}\r\nmaptask.sink.timeline.collector={{metric_collector_hosts}}\r\nreducetask.sink.timeline.collector={{metric_collector_hosts}}\r\napplicationhistoryserver.sink.timeline.collector={{metric_collector_hosts}}\r\n\r\nresourcemanager.sink.timeline.tagsForPrefix.yarn=Queue\r\n\r\n{% if is_nn_client_port_configured %}\r\n# Namenode rpc ports customization\r\nnamenode.sink.timeline.metric.rpc.client.port={{nn_rpc_client_port}}\r\n{% endif %}\r\n{% if is_nn_dn_port_configured %}\r\nnamen
ode.sink.timeline.metric.rpc.datanode.port={{nn_rpc_dn_port}}\r\n{% endif %}\r\n{% if is_nn_healthcheck_port_configured %}\r\nnamenode.sink.timeline.metric.rpc.healthcheck.port={{nn_rpc_healthcheck_port}}\r\n{% endif %}\r\n\r\n{% endif %}"
+ }
+ },
+ "configuration_attributes": {
+ "sqoop-site": {},
+ "yarn-site": {
+ "final": {
+ "yarn.nodemanager.disk-health-checker.min-healthy-disks": "true",
+ "yarn.nodemanager.container-executor.class": "true",
+ "yarn.nodemanager.local-dirs": "true"
+ }
+ },
+ "tez-site": {
+ "final": {
+ "tez.am.log.level": "true"
+ }
+ },
+ "capacity-scheduler": {
+ "final": {
+ "yarn.scheduler.capacity.node-locality-delay": "true"
+ }
+ },
+ "mapred-site": {
+ "final": {
+ "mapred.healthChecker.script.path": "true",
+ "mapreduce.jobtracker.staging.root.dir": "true"
+ }
+ },
+ "oozie-site": {
+ "final": {
+ "oozie.service.PurgeService.purge.interval": "true",
+ "oozie.service.CallableQueueService.queue.size": "true"
+ }
+ },
+ "webhcat-site": {
+ "final": {
+ "templeton.pig.path": "true",
+ "templeton.exec.timeout": "true",
+ "templeton.override.enabled": "true"
+ }
+ },
+ "hdfs-site": {
+ "final": {
+ "dfs.web.ugi": "true",
+ "dfs.support.append": "true",
+ "dfs.cluster.administrators": "true"
+ }
+ },
+ "hbase-site": {
+ "final": {
+ "hbase.client.keyvalue.maxsize": "true",
+ "hbase.hstore.compactionThreshold": "true",
+ "hbase.rootdir": "true"
+ }
+ },
+ "core-site": {
+ "final": {
+ "hadoop.proxyuser.hive.groups": "true",
+ "webinterface.private.actions": "true",
+ "hadoop.proxyuser.oozie.hosts": "true"
+ }
+ },
+ "hive-site": {
+ "final": {
+ "javax.jdo.option.ConnectionPassword": "true",
+ "javax.jdo.option.ConnectionDriverName": "true",
+ "hive.optimize.bucketmapjoin.sortedmerge": "true"
+ }
+ },
+ "hiveserver2-site": {
+ "final": {
+ "javax.jdo.option.ConnectionPassword": "true",
+ "javax.jdo.option.ConnectionDriverName": "true",
+ "hive.optimize.bucketmapjoin.sortedmerge": "true"
+ }
+ },
+ "hive-interactive-site": {
+ "final": {
+ "javax.jdo.option.ConnectionPassword": "true",
+ "javax.jdo.option.ConnectionDriverName": "true",
+ "hive.optimize.bucketmapjoin.sortedmerge": "true"
+ }
+ }
+ },
+ "configurationTags": {
+ "capacity-scheduler": {
+ "tag": "version1"
+ },
+ "oozie-site": {
+ "tag": "version1389972945450"
+ },
+ "storm-site": {
+ "tag": "version1"
+ },
+ "webhcat-site": {
+ "tag": "version1389972945450"
+ },
+ "global": {
+ "tag": "version1389972945449"
+ },
+ "mapred-site": {
+ "tag": "version1389972945450"
+ },
+ "hdfs-site": {
+ "tag": "version1389972945450"
+ },
+ "hbase-site": {
+ "tag": "version1389972945449"
+ },
+ "core-site": {
+ "tag": "version1389972945450"
+ },
+ "yarn-site": {
+ "tag": "version1389972945449"
+ },
+ "hive-site": {
+ "tag": "version1389972945450"
+ },
+ "hiveserver2-site": {
+ "tag": "version1389972945450"
+ },
+ "hive-interactive-site": {
+ "tag": "version1389972945450"
+ },
+ "hdfs-log4j": {
+ "tag": "version1389972945450"
+ },
+ "yarn-log4j": {
+ "tag": "version1389972945450"
+ },
+ "hbase-log4j": {
+ "tag": "version1389972945450"
+ },
+ "hive-log4j": {
+ "tag": "version1389972945450"
+ },
+ "hive-exec-log4j": {
+ "tag": "version1389972945450"
+ },
+ "zookeeper-log4j": {
+ "tag": "version1389972945450"
+ },
+ "oozie-log4j": {
+ "tag": "version1389972945450"
+ },
+ "pig-log4j": {
+ "tag": "version1389972945450"
+ }
+ },
+ "commandId": "9-1",
+ "clusterHostInfo": {
+ "ambari_server_host": [
+ "c6401.ambari.apache.org"
+ ],
+ "snamenode_host": [
+ "c6402.ambari.apache.org"
+ ],
+ "nm_hosts": [
+ "c6402.ambari.apache.org"
+ ],
+ "drpc_server_hosts": [
+ "c6402.ambari.apache.org"
+ ],
+ "slave_hosts": [
+ "c6402.ambari.apache.org"
+ ],
+ "ganglia_server_host": [
+ "c6402.ambari.apache.org"
+ ],
+ "hive_server_host": [
+ "c6402.ambari.apache.org"
+ ],
+ "logviewer_server_hosts": [
+ "c6402.ambari.apache.org"
+ ],
+ "hive_metastore_host": [
+ "c6402.ambari.apache.org"
+ ],
+ "hbase_rs_hosts": [
+ "c6402.ambari.apache.org"
+ ],
+ "webhcat_server_host": [
+ "c6402.ambari.apache.org"
+ ],
+ "zookeeper_hosts": [
+ "c6401.ambari.apache.org",
+ "c6402.ambari.apache.org"
+ ],
+ "supervisor_hosts": [
+ "c6401.ambari.apache.org",
+ "c6402.ambari.apache.org"
+ ],
+ "ganglia_monitor_hosts": [
+ "c6401.ambari.apache.org",
+ "c6402.ambari.apache.org"
+ ],
+ "all_ping_ports": [
+ "8670",
+ "8670"
+ ],
+ "rm_host": [
+ "c6402.ambari.apache.org"
+ ],
+ "all_hosts": [
+ "c6401.ambari.apache.org",
+ "c6402.ambari.apache.org"
+ ],
+ "all_racks": [
+ "/default/rack_01",
+ "/default/rack_02"
+ ],
+ "all_ipv4_ips": [
+ "127.0.0.1",
+ "127.0.0.1"
+ ],
+ "storm_ui_server_hosts": [
+ "c6401.ambari.apache.org"
+ ],
+ "oozie_server": [
+ "c6402.ambari.apache.org"
+ ],
+ "hs_host": [
+ "c6402.ambari.apache.org"
+ ],
+ "nimbus_hosts": [
+ "c6401.ambari.apache.org"
+ ],
+ "namenode_host": [
+ "c6401.ambari.apache.org"
+ ],
+ "hbase_master_hosts": [
+ "c6402.ambari.apache.org"
+ ],
+ "hive_mysql_host": [
+ "c6402.ambari.apache.org"
+ ],
+ "falcon_server_hosts": [
+ "c6402.ambari.apache.org"
+ ]
+ }
+}
[44/50] [abbrv] ambari git commit: AMBARI-21793. Add support for
variables used by Knox topology template (Sandeep More via smohanty)
Posted by ja...@apache.org.
AMBARI-21793. Add support for variables used by Knox topology template (Sandeep More via smohanty)
(cherry picked from commit a21dc6d89a8243a34bf5fdc9df089bdfcaf40443)
Change-Id: I33ba56c9a98cb29266201e5b8f9da448b3de9507
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/2bf87b8a
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/2bf87b8a
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/2bf87b8a
Branch: refs/heads/2.5-maint
Commit: 2bf87b8a27e9140fece7309ea2c57cc12eadd90b
Parents: 859673a
Author: Sumit Mohanty <sm...@hortonworks.com>
Authored: Thu Aug 24 16:55:14 2017 -0700
Committer: Sumit Mohanty <sm...@hortonworks.com>
Committed: Thu Aug 24 20:44:40 2017 -0700
----------------------------------------------------------------------
.../0.5.0.2.2/package/scripts/params_linux.py | 91 +++++++++++++++++++-
1 file changed, 87 insertions(+), 4 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/2bf87b8a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/params_linux.py
index f54abef..01126a8 100644
--- a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/params_linux.py
@@ -240,6 +240,12 @@ if type(webhcat_server_hosts) is list:
else:
webhcat_server_host = webhcat_server_hosts
+hive_scheme = 'http'
+webhcat_scheme = 'http'
+
+hbase_master_scheme = 'http'
+hbase_master_ui_port = default('/configurations/hbase-site/hbase.master.info.port', "16010");
+
hbase_master_port = default('/configurations/hbase-site/hbase.rest.port', "8080")
hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", None)
if type(hbase_master_hosts) is list:
@@ -247,8 +253,14 @@ if type(hbase_master_hosts) is list:
else:
hbase_master_host = hbase_master_hosts
+#
+# Oozie
+#
oozie_https_port = None
+oozie_scheme = 'http'
+oozie_server_port = "11000"
oozie_server_hosts = default("/clusterHostInfo/oozie_server", None)
+
if type(oozie_server_hosts) is list:
oozie_server_host = oozie_server_hosts[0]
else:
@@ -256,15 +268,86 @@ else:
oozie_scheme = 'http'
has_oozie = not oozie_server_host == None
-oozie_server_port = "11000"
if has_oozie:
oozie_server_port = get_port_from_url(config['configurations']['oozie-site']['oozie.base.url'])
oozie_https_port = default("/configurations/oozie-site/oozie.https.port", None)
-
+
if oozie_https_port is not None:
- oozie_scheme = 'https'
- oozie_server_port = oozie_https_port
+ oozie_scheme = 'https'
+ oozie_server_port = oozie_https_port
+
+#
+# Falcon
+#
+falcon_server_hosts = default("/clusterHostInfo/falcon_server_hosts", None)
+if type(falcon_server_hosts) is list:
+ falcon_server_host = falcon_server_hosts[0]
+else:
+ falcon_server_host = falcon_server_hosts
+
+falcon_scheme = 'http'
+has_falcon = not falcon_server_host == None
+falcon_server_port = "15000"
+
+if has_falcon:
+ falcon_server_port = config['configurations']['falcon-env']['falcon_port']
+
+#
+# Solr
+#
+solr_scheme='http'
+solr_server_hosts = default("/clusterHostInfo/solr_hosts", None)
+if type(solr_server_hosts ) is list:
+ solr_host = solr_server_hosts[0]
+else:
+ solr_host = solr_server_hosts
+solr_port=default("/configuration/solr/solr-env/solr_port","8983")
+
+#
+# Spark
+#
+spark_scheme = 'http'
+spark_historyserver_hosts = default("/clusterHostInfo/spark_jobhistoryserver_hosts", None)
+if type(spark_historyserver_hosts) is list:
+ spark_historyserver_host = spark_historyserver_hosts[0]
+else:
+ spark_historyserver_host = spark_historyserver_hosts
+spark_historyserver_ui_port = default("/configurations/spark-defaults/spark.history.ui.port", "18080")
+
+
+#
+# JobHistory mapreduce
+#
+mr_scheme='http'
+mr_historyserver_address = default("/configurations/mapred-site/mapreduce.jobhistory.webapp.address", None)
+
+#
+# Yarn nodemanager
+#
+nodeui_scheme= 'http'
+nodeui_port = "8042"
+nm_hosts = default("/clusterHostInfo/nm_hosts", None)
+if type(nm_hosts) is list:
+ nm_host = nm_hosts[0]
+else:
+ nm_host = nm_hosts
+
+has_yarn = default("/configurations/yarn-site", None )
+if has_yarn and 'yarn.nodemanager.webapp.address' in config['configurations']['yarn-site']:
+ nodeui_port = get_port_from_url(config['configurations']['yarn-site']['yarn.nodemanager.webapp.address'])
+
+
+#
+# Spark Thrift UI
+#
+spark_thriftserver_scheme = 'http'
+spark_thriftserver_ui_port = 4039
+spark_thriftserver_hosts = default("/clusterHostInfo/spark_thriftserver_hosts", None)
+if type(spark_thriftserver_hosts) is list:
+ spark_thriftserver_host = spark_thriftserver_hosts[0]
+else:
+ spark_thriftserver_host = spark_thriftserver_hosts
# Knox managed properties
knox_managed_pid_symlink= format('{stack_root}/current/knox-server/pids')
[17/50] [abbrv] ambari git commit: AMBARI-21729 Owner of yarn.include
stays to be root:root where as dfs.include gets changed to the correct one
upon restart services (dsen)
Posted by ja...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/6d2bab16/ambari-server/src/test/python/stacks/2.0.6/configs/default_yarn_include_file_manage.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default_yarn_include_file_manage.json b/ambari-server/src/test/python/stacks/2.0.6/configs/default_yarn_include_file_manage.json
new file mode 100644
index 0000000..aa2d3d5
--- /dev/null
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default_yarn_include_file_manage.json
@@ -0,0 +1,1260 @@
+{
+ "localComponents": [
+ "NAMENODE",
+ "SECONDARY_NAMENODE",
+ "ZOOKEEPER_SERVER",
+ "DATANODE",
+ "HDFS_CLIENT",
+ "ZOOKEEPER_CLIENT",
+ "RESOURCEMANAGER",
+ "HISTORYSERVER",
+ "NODEMANAGER",
+ "YARN_CLIENT",
+ "MAPREDUCE2_CLIENT",
+ "SLIDER",
+ "PIG",
+ "SQOOP",
+ "HIVE_CLIENT",
+ "TEZ_CLIENT",
+ "HIVE_CLIENT",
+ "SPARK_CLIENT",
+ "SPARK2_CLIENT"
+ ],
+ "roleCommand": "SERVICE_CHECK",
+ "clusterName": "c1",
+ "hostname": "c6401.ambari.apache.org",
+ "hostLevelParams": {
+ "not_managed_hdfs_path_list": "[\"/apps/hive/warehouse\",\"/apps/falcon\",\"/mr-history/done\",\"/app-logs\",\"/tmp\"]",
+ "agent_stack_retry_count": "5",
+ "agent_stack_retry_on_unavailability": "false",
+ "agentCacheDir": "/var/lib/ambari-agent/cache",
+ "jdk_location": "http://c6401.ambari.apache.org:8080/resources/",
+ "ambari_db_rca_password": "mapred",
+ "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca",
+ "repo_info": "[{\"baseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0\",\"osType\":\"centos6\",\"repoId\":\"HDP-2.0._\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0\"}]",
+ "jce_name": "UnlimitedJCEPolicyJDK7.zip",
+ "stack_version": "2.0",
+ "stack_name": "HDP",
+ "ambari_db_rca_driver": "org.postgresql.Driver",
+ "jdk_name": "jdk-7u67-linux-x64.tar.gz",
+ "ambari_db_rca_username": "mapred",
+ "java_home": "/usr/jdk64/jdk1.7.0_45",
+ "java_version": "8",
+ "db_name": "ambari",
+ "user_group": "{\"sample\":[\"sample\",\"users\"]}",
+ "group_list": "[\"hadoop\",\"nobody\",\"users\"]",
+ "user_list": "[\"hive\",\"oozie\",\"nobody\",\"ambari-qa\",\"flume\",\"hdfs\",\"storm\",\"mapred\",\"hbase\",\"tez\",\"zookeeper\",\"falcon\",\"sqoop\",\"yarn\",\"hcat\"]",
+ "custom_mysql_jdbc_name" : "mysql-connector-java.jar",
+ "custom_oracle_jdbc_name" : "oracle-jdbc-driver.jar",
+ "custom_postgres_jdbc_name" : "test-postgres-jdbc.jar",
+ "custom_sqlanywhere_jdbc_name" : "sqla-client-jdbc.tar.gz"
+ },
+ "commandType": "EXECUTION_COMMAND",
+ "roleParams": {},
+ "serviceName": "HIVE",
+ "role": "HIVE_SERVER",
+ "commandParams": {
+ "command_timeout": "300",
+ "service_package_folder": "OOZIE",
+ "script_type": "PYTHON",
+ "script": "scripts/service_check.py",
+ "excluded_hosts": "host1,host2",
+ "mark_draining_only" : "false",
+ "update_files_only" : "false",
+ "xml_configs_list":[{"hdfs-site.xml":"hdfs-site"}],
+ "env_configs_list":[{"hadoop-env.sh":"hadoop-env"},{"log4j.properties":"hdfs-log4j,yarn-log4j"}],
+ "properties_configs_list":[{"runtime.properties":"falcon-runtime.properties"},{"startup.properties":"falcon-startup.properties"}],
+ "output_file":"HDFS_CLIENT-configs.tar.gz",
+ "refresh_topology": "True"
+ },
+ "taskId": 152,
+ "public_hostname": "c6401.ambari.apache.org",
+ "configurations": {
+ "sqoop-site": {
+ "atlas.cluster.name": "c1",
+ "sqoop.job.data.publish.class": "org.apache.atlas.sqoop.hook.SqoopHook"
+ },
+ "mapred-site": {
+ "mapreduce.jobhistory.address": "c6402.ambari.apache.org:10020",
+ "mapreduce.cluster.administrators": " hadoop",
+ "mapreduce.reduce.input.buffer.percent": "0.0",
+ "mapreduce.output.fileoutputformat.compress": "false",
+ "mapreduce.framework.name": "yarn",
+ "mapreduce.map.speculative": "false",
+ "mapreduce.reduce.shuffle.merge.percent": "0.66",
+ "yarn.app.mapreduce.am.resource.mb": "683",
+ "mapreduce.map.java.opts": "-Xmx273m",
+ "mapreduce.application.classpath": "$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/*,$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/lib/*",
+ "mapreduce.job.reduce.slowstart.completedmaps": "0.05",
+ "mapreduce.output.fileoutputformat.compress.type": "BLOCK",
+ "mapreduce.reduce.speculative": "false",
+ "mapreduce.reduce.java.opts": "-Xmx546m",
+ "mapreduce.am.max-attempts": "2",
+ "yarn.app.mapreduce.am.admin-command-opts": "-Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN",
+ "mapreduce.reduce.log.level": "INFO",
+ "mapreduce.map.sort.spill.percent": "0.7",
+ "mapreduce.task.timeout": "300000",
+ "mapreduce.map.memory.mb": "341",
+ "mapreduce.task.io.sort.factor": "100",
+ "mapreduce.jobhistory.intermediate-done-dir": "/mr-history/tmp",
+ "mapreduce.reduce.memory.mb": "683",
+ "yarn.app.mapreduce.am.log.level": "INFO",
+ "mapreduce.map.log.level": "INFO",
+ "mapreduce.shuffle.port": "13562",
+ "mapreduce.admin.user.env": "LD_LIBRARY_PATH=/usr/lib/hadoop/lib/native:/usr/lib/hadoop/lib/native/`$JAVA_HOME/bin/java -d32 -version &> /dev/null;if [ $? -eq 0 ]; then echo Linux-i386-32; else echo Linux-amd64-64;fi`",
+ "mapreduce.map.output.compress": "false",
+ "yarn.app.mapreduce.am.staging-dir": "/user",
+ "mapreduce.reduce.shuffle.parallelcopies": "30",
+ "mapreduce.reduce.shuffle.input.buffer.percent": "0.7",
+ "mapreduce.jobhistory.webapp.address": "c6402.ambari.apache.org:19888",
+ "mapreduce.jobhistory.done-dir": "/mr-history/done",
+ "mapreduce.admin.reduce.child.java.opts": "-Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN",
+ "mapreduce.task.io.sort.mb": "136",
+ "yarn.app.mapreduce.am.command-opts": "-Xmx546m",
+ "mapreduce.admin.map.child.java.opts": "-Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN"
+ },
+ "oozie-site": {
+ "oozie.service.PurgeService.purge.interval": "3600",
+ "oozie.service.CallableQueueService.queue.size": "1000",
+ "oozie.service.SchemaService.wf.ext.schemas": "shell-action-0.1.xsd,email-action-0.1.xsd,hive-action-0.2.xsd,sqoop-action-0.2.xsd,ssh-action-0.1.xsd,distcp-action-0.1.xsd,shell-action-0.2.xsd,oozie-sla-0.1.xsd,oozie-sla-0.2.xsd,hive-action-0.3.xsd",
+ "oozie.service.JPAService.jdbc.url": "jdbc:derby:${oozie.data.dir}/${oozie.db.schema.name}-db;create=true",
+ "oozie.service.HadoopAccessorService.nameNode.whitelist": " ",
+ "use.system.libpath.for.mapreduce.and.pig.jobs": "false",
+ "oozie.db.schema.name": "oozie",
+ "oozie.credentials.credentialclasses": "hcat=org.apache.oozie.action.hadoop.HCatCredentials",
+ "oozie.service.JPAService.create.db.schema": "false",
+ "oozie.authentication.kerberos.name.rules": "\n RULE:[2:$1@$0]([jt]t@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-MAPREDUSER/\n RULE:[2:$1@$0]([nd]n@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HDFSUSER/\n RULE:[2:$1@$0](hm@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HBASE-USER/\n RULE:[2:$1@$0](rs@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HBASE-USER/\n DEFAULT",
+ "oozie.service.ActionService.executor.ext.classes": "\n org.apache.oozie.action.email.EmailActionExecutor,\n org.apache.oozie.action.hadoop.HiveActionExecutor,\n org.apache.oozie.action.hadoop.ShellActionExecutor,\n org.apache.oozie.action.hadoop.SqoopActionExecutor,\n org.apache.oozie.action.hadoop.DistcpActionExecutor",
+ "oozie.base.url": "http://c6402.ambari.apache.org:11000/oozie",
+ "oozie.service.JPAService.jdbc.password": "asd",
+ "oozie.service.coord.normal.default.timeout": "120",
+ "oozie.service.AuthorizationService.security.enabled": "true",
+ "oozie.service.JPAService.pool.max.active.conn": "10",
+ "oozie.service.PurgeService.older.than": "30",
+ "oozie.service.coord.push.check.requeue.interval": "30000",
+ "oozie.service.HadoopAccessorService.hadoop.configurations": "*=/etc/hadoop/conf",
+ "oozie.service.HadoopAccessorService.jobTracker.whitelist": " ",
+ "oozie.service.CallableQueueService.callable.concurrency": "3",
+ "oozie.service.JPAService.jdbc.username": "oozie",
+ "oozie.service.CallableQueueService.threads": "10",
+ "oozie.services.ext": "org.apache.oozie.service.PartitionDependencyManagerService,org.apache.oozie.service.HCatAccessorService",
+ "oozie.systemmode": "NORMAL",
+ "oozie.service.WorkflowAppService.system.libpath": "/user/${user.name}/share/lib",
+ "oozie.services": "\n org.apache.oozie.service.SchedulerService,\n org.apache.oozie.service.InstrumentationService,\n org.apache.oozie.service.CallableQueueService,\n org.apache.oozie.service.UUIDService,\n org.apache.oozie.service.ELService,\n org.apache.oozie.service.AuthorizationService,\n org.apache.oozie.service.UserGroupInformationService,\n org.apache.oozie.service.HadoopAccessorService,\n org.apache.oozie.service.URIHandlerService,\n org.apache.oozie.service.MemoryLocksService,\n org.apache.oozie.service.DagXLogInfoService,\n org.apache.oozie.service.SchemaService,\n org.apache.oozie.service.LiteWorkflowAppService,\n org.apache.oozie.service.JPAService,\n org.apache.oozie.service.StoreService,\n org.apache.oozie.service.CoordinatorStoreService,\n org.apache.oozie.service.SLAStoreService,\n org.apache.oozie.service.DBLiteWorkflowStoreService,\n
org.apache.oozie.service.CallbackService,\n org.apache.oozie.service.ActionService,\n org.apache.oozie.service.ActionCheckerService,\n org.apache.oozie.service.RecoveryService,\n org.apache.oozie.service.PurgeService,\n org.apache.oozie.service.CoordinatorEngineService,\n org.apache.oozie.service.BundleEngineService,\n org.apache.oozie.service.DagEngineService,\n org.apache.oozie.service.CoordMaterializeTriggerService,\n org.apache.oozie.service.StatusTransitService,\n org.apache.oozie.service.PauseTransitService,\n org.apache.oozie.service.GroupsService,\n org.apache.oozie.service.ProxyUserService",
+ "oozie.service.URIHandlerService.uri.handlers": "org.apache.oozie.dependency.FSURIHandler,org.apache.oozie.dependency.HCatURIHandler",
+ "oozie.authentication.type": "simple",
+ "oozie.service.JPAService.jdbc.driver": "org.apache.derby.jdbc.EmbeddedDriver",
+ "oozie.system.id": "oozie-${user.name}"
+ },
+ "storm-site": {
+ "topology.tuple.serializer": "backtype.storm.serialization.types.ListDelegateSerializer",
+ "topology.workers": "1",
+ "drpc.worker.threads": "64",
+ "storm.zookeeper.servers": "['c6401.ambari.apache.org','c6402.ambari.apache.org']",
+ "supervisor.heartbeat.frequency.secs": "5",
+ "topology.executor.send.buffer.size": "1024",
+ "drpc.childopts": "-Xmx768m",
+ "nimbus.thrift.port": "6627",
+ "storm.zookeeper.retry.intervalceiling.millis": "30000",
+ "storm.local.dir": "/hadoop/storm",
+ "topology.receiver.buffer.size": "8",
+ "storm.messaging.netty.client_worker_threads": "1",
+ "transactional.zookeeper.root": "/transactional",
+ "drpc.request.timeout.secs": "600",
+ "topology.skip.missing.kryo.registrations": "false",
+ "worker.heartbeat.frequency.secs": "1",
+ "zmq.hwm": "0",
+ "storm.zookeeper.connection.timeout": "15000",
+ "topology.max.error.report.per.interval": "5",
+ "storm.messaging.netty.server_worker_threads": "1",
+ "supervisor.worker.start.timeout.secs": "120",
+ "zmq.threads": "1",
+ "topology.acker.executors": "null",
+ "storm.local.mode.zmq": "false",
+ "topology.max.task.parallelism": "null",
+ "storm.zookeeper.port": "2181",
+ "nimbus.childopts": "-Xmx1024m",
+ "worker.childopts": "-Xmx768m",
+ "drpc.queue.size": "128",
+ "storm.zookeeper.retry.times": "5",
+ "nimbus.monitor.freq.secs": "10",
+ "storm.cluster.mode": "distributed",
+ "dev.zookeeper.path": "/tmp/dev-storm-zookeeper",
+ "drpc.invocations.port": "3773",
+ "storm.zookeeper.root": "/storm",
+ "logviewer.childopts": "-Xmx128m",
+ "transactional.zookeeper.port": "null",
+ "topology.worker.childopts": "null",
+ "topology.max.spout.pending": "null",
+ "nimbus.cleanup.inbox.freq.secs": "600",
+ "storm.messaging.netty.min_wait_ms": "100",
+ "nimbus.task.timeout.secs": "30",
+ "nimbus.thrift.max_buffer_size": "1048576",
+ "topology.sleep.spout.wait.strategy.time.ms": "1",
+ "topology.optimize": "true",
+ "nimbus.reassign": "true",
+ "storm.messaging.transport": "backtype.storm.messaging.netty.Context",
+ "logviewer.appender.name": "A1",
+ "nimbus.host": "c6401.ambari.apache.org",
+ "ui.port": "8744",
+ "supervisor.slots.ports": "[6700, 6701]",
+ "nimbus.file.copy.expiration.secs": "600",
+ "supervisor.monitor.frequency.secs": "3",
+ "ui.childopts": "-Xmx768m",
+ "transactional.zookeeper.servers": "null",
+ "zmq.linger.millis": "5000",
+ "topology.error.throttle.interval.secs": "10",
+ "topology.worker.shared.thread.pool.size": "4",
+ "java.library.path": "/usr/local/lib:/opt/local/lib:/usr/lib",
+ "topology.spout.wait.strategy": "backtype.storm.spout.SleepSpoutWaitStrategy",
+ "task.heartbeat.frequency.secs": "3",
+ "topology.transfer.buffer.size": "1024",
+ "storm.zookeeper.session.timeout": "20000",
+ "topology.executor.receive.buffer.size": "1024",
+ "topology.stats.sample.rate": "0.05",
+ "topology.fall.back.on.java.serialization": "true",
+ "supervisor.childopts": "-Xmx256m",
+ "topology.enable.message.timeouts": "true",
+ "storm.messaging.netty.max_wait_ms": "1000",
+ "nimbus.topology.validator": "backtype.storm.nimbus.DefaultTopologyValidator",
+ "nimbus.supervisor.timeout.secs": "60",
+ "topology.disruptor.wait.strategy": "com.lmax.disruptor.BlockingWaitStrategy",
+ "nimbus.inbox.jar.expiration.secs": "3600",
+ "drpc.port": "3772",
+ "topology.kryo.factory": "backtype.storm.serialization.DefaultKryoFactory",
+ "storm.zookeeper.retry.interval": "1000",
+ "storm.messaging.netty.max_retries": "30",
+ "topology.tick.tuple.freq.secs": "null",
+ "supervisor.enable": "true",
+ "nimbus.task.launch.secs": "120",
+ "task.refresh.poll.secs": "10",
+ "topology.message.timeout.secs": "30",
+ "storm.messaging.netty.buffer_size": "5242880",
+ "topology.state.synchronization.timeout.secs": "60",
+ "supervisor.worker.timeout.secs": "30",
+ "topology.trident.batch.emit.interval.millis": "500",
+ "topology.builtin.metrics.bucket.size.secs": "60",
+ "storm.thrift.transport": "backtype.storm.security.auth.SimpleTransportPlugin",
+ "logviewer.port": "8000",
+ "topology.debug": "false"
+ },
+ "webhcat-site": {
+ "templeton.pig.path": "pig.tar.gz/pig/bin/pig",
+ "templeton.exec.timeout": "60000",
+ "templeton.override.enabled": "false",
+ "templeton.jar": "/usr/lib/hcatalog/share/webhcat/svr/webhcat.jar",
+ "templeton.zookeeper.hosts": "c6401.ambari.apache.org:2181,c6402.ambari.apache.org:2181",
+ "templeton.hive.properties": "hive.metastore.local=false,hive.metastore.uris=thrift://c6402.ambari.apache.org:9083,hive.metastore.sasl.enabled=yes,hive.metastore.execute.setugi=true,hive.metastore.warehouse.dir=/apps/hive/warehouse",
+ "templeton.storage.class": "org.apache.hive.hcatalog.templeton.tool.ZooKeeperStorage",
+ "templeton.hive.archive": "hdfs:///apps/webhcat/hive.tar.gz",
+ "templeton.streaming.jar": "hdfs:///apps/webhcat/hadoop-streaming.jar",
+ "templeton.port": "50111",
+ "templeton.libjars": "/usr/lib/zookeeper/zookeeper.jar",
+ "templeton.hadoop": "/usr/bin/hadoop",
+ "templeton.hive.path": "hive.tar.gz/hive/bin/hive",
+ "templeton.hadoop.conf.dir": "/etc/hadoop/conf",
+ "templeton.hcat": "/usr/bin/hcat",
+ "templeton.pig.archive": "hdfs:///apps/webhcat/pig.tar.gz",
+ "templeton.sqoop.archive": "hdfs:///apps/webhcat/sqoop.tar.gz"
+ },
+ "capacity-scheduler": {
+ "yarn.scheduler.capacity.node-locality-delay": "40",
+ "yarn.scheduler.capacity.root.capacity": "100",
+ "yarn.scheduler.capacity.root.acl_administer_queue": "*",
+ "yarn.scheduler.capacity.root.queues": "default",
+ "yarn.scheduler.capacity.maximum-applications": "10000",
+ "yarn.scheduler.capacity.root.default.user-limit-factor": "1",
+ "yarn.scheduler.capacity.root.default.maximum-capacity": "100",
+ "yarn.scheduler.capacity.root.default.state": "RUNNING",
+ "yarn.scheduler.capacity.maximum-am-resource-percent": "0.2",
+ "yarn.scheduler.capacity.root.default.acl_administer_jobs": "*",
+ "yarn.scheduler.capacity.root.default.capacity": "100",
+ "yarn.scheduler.capacity.root.default.acl_submit_applications": "*"
+ },
+ "hdfs-site": {
+ "dfs.namenode.checkpoint.period": "21600",
+ "dfs.namenode.avoid.write.stale.datanode": "true",
+ "dfs.block.access.token.enable": "true",
+ "dfs.support.append": "true",
+ "dfs.datanode.address": "0.0.0.0:${ambari.dfs.datanode.port}",
+ "nfs.file.dump.dir": "/tmp/.hdfs-nfs",
+ "dfs.datanode.address": "0.0.0.0:${ambari.dfs.datanode.port}",
+ "dfs.cluster.administrators": "test_user1,test_user2 hdfs,test_group",
+ "dfs.replication": "3",
+ "ambari.dfs.datanode.http.port": "50075",
+ "dfs.datanode.balance.bandwidthPerSec": "6250000",
+ "dfs.namenode.safemode.threshold-pct": "1.0f",
+ "dfs.namenode.checkpoint.edits.dir": "${dfs.namenode.checkpoint.dir}",
+ "dfs.permissions.enabled": "true",
+ "fs.checkpoint.size": "67108864",
+ "dfs.client.read.shortcircuit": "true",
+ "dfs.namenode.https-address": "c6401.ambari.apache.org:50470",
+ "dfs.journalnode.edits.dir": "/grid/0/hdfs/journal",
+ "dfs.blocksize": "134217728",
+ "dfs.datanode.max.transfer.threads": "1024",
+ "dfs.datanode.du.reserved": "1073741824",
+ "dfs.webhdfs.enabled": "true",
+ "dfs.namenode.handler.count": "100",
+ "dfs.namenode.checkpoint.dir": "/hadoop/hdfs/namesecondary,/hadoop/hdfs/namesecondary2",
+ "fs.permissions.umask-mode": "022",
+ "dfs.datanode.http.address": "0.0.0.0:50075",
+ "dfs.datanode.ipc.address": "0.0.0.0:8010",
+ "dfs.datanode.data.dir": "/hadoop/hdfs/data",
+ "dfs.namenode.http-address": "c6401.ambari.apache.org:50070",
+ "dfs.blockreport.initialDelay": "120",
+ "dfs.datanode.failed.volumes.tolerated": "0",
+ "dfs.namenode.accesstime.precision": "0",
+ "ambari.dfs.datanode.port": "50010",
+ "dfs.namenode.avoid.read.stale.datanode": "true",
+ "dfs.namenode.secondary.http-address": "c6402.ambari.apache.org:50090",
+ "dfs.namenode.stale.datanode.interval": "30000",
+ "dfs.heartbeat.interval": "3",
+ "dfs.client.read.shortcircuit.streams.cache.size": "4096",
+ "dfs.permissions.superusergroup": "hdfs",
+ "dfs.https.port": "50470",
+ "dfs.journalnode.http-address": "0.0.0.0:8480",
+ "dfs.domain.socket.path": "/var/lib/hadoop-hdfs/dn_socket",
+ "dfs.namenode.write.stale.datanode.ratio": "1.0f",
+ "dfs.hosts.exclude": "/etc/hadoop/conf/dfs.exclude",
+ "dfs.datanode.data.dir.perm": "750",
+ "dfs.namenode.name.dir.restore": "true",
+ "dfs.replication.max": "50",
+ "dfs.namenode.name.dir": "/hadoop/hdfs/namenode"
+ },
+ "hbase-site": {
+ "hbase.hstore.flush.retries.number": "120",
+ "hbase.client.keyvalue.maxsize": "10485760",
+ "hbase.hstore.compactionThreshold": "3",
+ "hbase.rootdir": "hdfs://c6401.ambari.apache.org:8020/apps/hbase/data",
+ "hbase.regionserver.handler.count": "60",
+ "hbase.regionserver.global.memstore.lowerLimit": "0.38",
+ "hbase.hregion.memstore.block.multiplier": "2",
+ "hbase.hregion.memstore.flush.size": "134217728",
+ "hbase.superuser": "hbase",
+ "hbase.zookeeper.property.clientPort": "2181",
+ "hbase.regionserver.global.memstore.upperLimit": "0.4",
+ "zookeeper.session.timeout": "30000",
+ "hbase.tmp.dir": "/hadoop/hbase",
+ "hbase.local.dir": "${hbase.tmp.dir}/local",
+ "hbase.hregion.max.filesize": "10737418240",
+ "hfile.block.cache.size": "0.40",
+ "hbase.security.authentication": "simple",
+ "hbase.defaults.for.version.skip": "true",
+ "hbase.zookeeper.quorum": "c6401.ambari.apache.org,c6402.ambari.apache.org",
+ "zookeeper.znode.parent": "/hbase-unsecure",
+ "hbase.hstore.blockingStoreFiles": "10",
+ "hbase.master.port": "60000",
+ "hbase.hregion.majorcompaction": "86400000",
+ "hbase.security.authorization": "false",
+ "hbase.cluster.distributed": "true",
+ "hbase.hregion.memstore.mslab.enabled": "true",
+ "hbase.client.scanner.caching": "100",
+ "hbase.zookeeper.useMulti": "true"
+ },
+ "core-site": {
+ "io.serializations": "org.apache.hadoop.io.serializer.WritableSerialization",
+ "gluster.daemon.user": "null",
+ "hadoop.proxyuser.oozie.groups": "users",
+ "hadoop.proxyuser.hcat.hosts": "c6402.ambari.apache.org",
+ "hadoop.proxyuser.hive.groups": "users",
+ "hadoop.security.authentication": "simple",
+ "hadoop.proxyuser.oozie.hosts": "c6402.ambari.apache.org",
+ "io.compression.codecs": "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec",
+ "mapreduce.jobtracker.webinterface.trusted": "false",
+ "fs.AbstractFileSystem.glusterfs.impl": "null",
+ "fs.defaultFS": "hdfs://c6401.ambari.apache.org:8020",
+ "fs.trash.interval": "360",
+ "ipc.client.idlethreshold": "8000",
+ "io.file.buffer.size": "131072",
+ "hadoop.security.authorization": "false",
+ "hadoop.proxyuser.hive.hosts": "c6402.ambari.apache.org",
+ "hadoop.security.auth_to_local": "\n RULE:[2:$1@$0]([rn]m@.*)s/.*/yarn/\n RULE:[2:$1@$0](jhs@.*)s/.*/mapred/\n RULE:[2:$1@$0]([nd]n@.*)s/.*/hdfs/\n RULE:[2:$1@$0](hm@.*)s/.*/hbase/\n RULE:[2:$1@$0](rs@.*)s/.*/hbase/\n DEFAULT",
+ "hadoop.proxyuser.hcat.groups": "users",
+ "ipc.client.connection.maxidletime": "30000",
+ "ipc.client.connect.max.retries": "50"
+ },
+ "hive-site": {
+ "hive.enforce.sorting": "true",
+ "javax.jdo.option.ConnectionPassword": "!`\"' 1",
+ "javax.jdo.option.ConnectionDriverName": "com.mysql.jdbc.Driver",
+ "hive.optimize.bucketmapjoin.sortedmerge": "true",
+ "hive.security.metastore.authorization.manager": "org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider",
+ "fs.file.impl.disable.cache": "true",
+ "hive.auto.convert.join.noconditionaltask": "true",
+ "hive.map.aggr": "true",
+ "hive.optimize.index.filter": "true",
+ "hive.security.authorization.enabled": "false",
+ "hive.optimize.reducededuplication.min.reducer": "1",
+ "hive.optimize.bucketmapjoin": "true",
+ "hive.metastore.uris": "thrift://c6402.ambari.apache.org:9083",
+ "hive.mapjoin.bucket.cache.size": "10000",
+ "hive.auto.convert.join.noconditionaltask.size": "1000000000",
+ "hive.vectorized.execution.enabled": "false",
+ "javax.jdo.option.ConnectionUserName": "hive",
+ "hive.metastore.cache.pinobjtypes": "Table,Database,Type,FieldSchema,Order",
+ "hive.optimize.reducededuplication": "true",
+ "hive.metastore.warehouse.dir": "/apps/hive/warehouse",
+ "hive.metastore.client.socket.timeout": "60",
+ "hive.auto.convert.join": "true",
+ "hive.enforce.bucketing": "true",
+ "hive.mapred.reduce.tasks.speculative.execution": "false",
+ "hive.security.authenticator.manager": "org.apache.hadoop.hive.ql.security.ProxyUserAuthenticator",
+ "javax.jdo.option.ConnectionURL": "jdbc:mysql://c6402.ambari.apache.org/hive?createDatabaseIfNotExist=true",
+ "hive.auto.convert.sortmerge.join": "true",
+ "fs.hdfs.impl.disable.cache": "true",
+ "hive.security.authorization.manager": "org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider",
+ "ambari.hive.db.schema.name": "hive",
+ "hive.metastore.execute.setugi": "true",
+ "hive.auto.convert.sortmerge.join.noconditionaltask": "true",
+ "hive.server2.enable.doAs": "true",
+ "hive.server2.authentication": "NOSASL",
+ "hive.server2.transport.mode": "binary",
+ "hive.optimize.mapjoin.mapreduce": "true",
+ "hive.exec.scratchdir" : "/custompath/tmp/hive"
+ },
+ "hive-interactive-site": {
+ "hive.enforce.sorting": "true",
+ "javax.jdo.option.ConnectionPassword": "!`\"' 1",
+ "javax.jdo.option.ConnectionDriverName": "com.mysql.jdbc.Driver",
+ "hive.optimize.bucketmapjoin.sortedmerge": "true",
+ "hive.security.metastore.authorization.manager": "org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider",
+ "fs.file.impl.disable.cache": "true",
+ "hive.auto.convert.join.noconditionaltask": "true",
+ "hive.map.aggr": "true",
+ "hive.optimize.index.filter": "true",
+ "hive.security.authorization.enabled": "false",
+ "hive.optimize.reducededuplication.min.reducer": "1",
+ "hive.optimize.bucketmapjoin": "true",
+ "hive.metastore.uris": "thrift://c6402.ambari.apache.org:9084",
+ "hive.mapjoin.bucket.cache.size": "10000",
+ "hive.auto.convert.join.noconditionaltask.size": "1000000000",
+ "hive.vectorized.execution.enabled": "false",
+ "javax.jdo.option.ConnectionUserName": "hive",
+ "hive.metastore.cache.pinobjtypes": "Table,Database,Type,FieldSchema,Order",
+ "hive.optimize.reducededuplication": "true",
+ "hive.metastore.warehouse.dir": "/apps/hive/warehouse",
+ "hive.metastore.client.socket.timeout": "60",
+ "hive.auto.convert.join": "true",
+ "hive.enforce.bucketing": "true",
+ "hive.mapred.reduce.tasks.speculative.execution": "false",
+ "hive.security.authenticator.manager": "org.apache.hadoop.hive.ql.security.ProxyUserAuthenticator",
+ "javax.jdo.option.ConnectionURL": "jdbc:mysql://c6402.ambari.apache.org/hive?createDatabaseIfNotExist=true",
+ "hive.auto.convert.sortmerge.join": "true",
+ "fs.hdfs.impl.disable.cache": "true",
+ "hive.security.authorization.manager": "org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider",
+ "ambari.hive.db.schema.name": "hive",
+ "hive.metastore.execute.setugi": "true",
+ "hive.auto.convert.sortmerge.join.noconditionaltask": "true",
+ "hive.server2.enable.doAs": "true",
+ "hive.server2.authentication": "NOSASL",
+ "hive.server2.transport.mode": "binary",
+ "hive.optimize.mapjoin.mapreduce": "true",
+ "hive.exec.scratchdir" : "/custompath/tmp/hive"
+ },
+ "hiveserver2-site": {
+ "hive.metastore.metrics.enabled": "true",
+ "hive.service.metrics.reporter": "HADOOP2"
+ },
+ "ranger-hive-plugin-properties": {
+ "XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS": "900",
+ "XAAUDIT.HDFS.DESTINATION_DIRECTORY": "hdfs://__REPLACE__NAME_NODE_HOST:8020/ranger/audit/%app-type%/%time:yyyyMMdd%",
+ "XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit",
+ "common.name.for.certificate": "-",
+ "XAAUDIT.HDFS.IS_ENABLED": "false",
+ "XAAUDIT.HDFS.LOCAL_BUFFER_FILE": "%time:yyyyMMdd-HHmm.ss%.log",
+ "SSL_KEYSTORE_PASSWORD": "myKeyFilePassword",
+ "XAAUDIT.DB.IS_ENABLED": "true",
+ "XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS": "600",
+ "XAAUDIT.SOLR.SOLR_URL": "http://localhost:6083/solr/ranger_audits",
+ "XAAUDIT.SOLR.IS_ENABLED": "false",
+ "SSL_KEYSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-keystore.jks",
+ "XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS": "60",
+ "policy_user": "ambari-qa",
+ "UPDATE_XAPOLICIES_ON_GRANT_REVOKE": "true",
+ "XAAUDIT.HDFS.DESTINTATION_FILE": "%hostname%-audit.log",
+ "XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS": "86400",
+ "XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT": "10",
+ "SSL_TRUSTSTORE_PASSWORD": "changeit",
+ "XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit/archive",
+ "ranger-hive-plugin-enabled": "No",
+ "jdbc.driverClassName": "org.apache.hive.jdbc.HiveDriver",
+ "REPOSITORY_CONFIG_USERNAME": "hive",
+ "XAAUDIT.SOLR.MAX_FLUSH_INTERVAL_MS": "1000",
+ "XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS": "60",
+ "SSL_TRUSTSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-truststore.jks",
+ "REPOSITORY_CONFIG_PASSWORD": "hive",
+ "XAAUDIT.SOLR.MAX_QUEUE_SIZE": "1"
+ },
+ "ranger-knox-plugin-properties": {
+ "XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS": "900",
+ "KNOX_HOME": "/usr/hdp/current/knox-server",
+ "XAAUDIT.HDFS.DESTINATION_DIRECTORY": "hdfs://__REPLACE__NAME_NODE_HOST:8020/ranger/audit/%app-type%/%time:yyyyMMdd%",
+ "XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit",
+ "common.name.for.certificate": "-",
+ "XAAUDIT.HDFS.IS_ENABLED": "false",
+ "XAAUDIT.HDFS.LOCAL_BUFFER_FILE": "%time:yyyyMMdd-HHmm.ss%.log",
+ "SSL_KEYSTORE_PASSWORD": "myKeyFilePassword",
+ "XAAUDIT.DB.IS_ENABLED": "true",
+ "XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS": "600",
+ "XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS": "60",
+ "XAAUDIT.SOLR.SOLR_URL": "http://localhost:6083/solr/ranger_audits",
+ "XAAUDIT.SOLR.IS_ENABLED": "false",
+ "SSL_KEYSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-keystore.jks",
+ "ranger-knox-plugin-enabled": "No",
+ "policy_user": "ambari-qa",
+ "XAAUDIT.HDFS.DESTINTATION_FILE": "%hostname%-audit.log",
+ "XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS": "86400",
+ "XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT": "10",
+ "SSL_TRUSTSTORE_PASSWORD": "changeit",
+ "XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit/archive",
+ "REPOSITORY_CONFIG_USERNAME": "admin",
+ "XAAUDIT.SOLR.MAX_FLUSH_INTERVAL_MS": "1000",
+ "XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS": "60",
+ "SSL_TRUSTSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-truststore.jks",
+ "REPOSITORY_CONFIG_PASSWORD": "admin-password",
+ "XAAUDIT.SOLR.MAX_QUEUE_SIZE": "1"
+ },
+ "yarn-site": {
+ "yarn.nodemanager.disk-health-checker.min-healthy-disks": "0.25",
+ "yarn.nodemanager.container-executor.class": "org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor",
+ "yarn.nodemanager.local-dirs": "/hadoop/yarn/local,/hadoop/yarn/local1",
+ "yarn.resourcemanager.resource-tracker.address": "c6402.ambari.apache.org:8025",
+ "yarn.nodemanager.remote-app-log-dir-suffix": "logs",
+ "yarn.resourcemanager.hostname": "c6402.ambari.apache.org",
+ "yarn.nodemanager.health-checker.script.timeout-ms": "60000",
+ "yarn.resourcemanager.scheduler.class": "org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler",
+ "yarn.nodemanager.resource.memory-mb": "2048",
+ "yarn.scheduler.minimum-allocation-mb": "683",
+ "yarn.resourcemanager.address": "c6402.ambari.apache.org:8050",
+ "yarn.resourcemanager.scheduler.address": "c6402.ambari.apache.org:8030",
+ "yarn.log-aggregation.retain-seconds": "2592000",
+ "yarn.scheduler.maximum-allocation-mb": "2048",
+ "yarn.log-aggregation-enable": "true",
+ "yarn.nodemanager.address": "0.0.0.0:45454",
+ "yarn.nodemanager.container-monitor.interval-ms": "3000",
+ "yarn.nodemanager.log-aggregation.compression-type": "gz",
+ "yarn.nodemanager.log.retain-seconds": "604800",
+ "yarn.nodemanager.delete.debug-delay-sec": "0",
+ "yarn.nodemanager.log-dirs": "/hadoop/yarn/log,/hadoop/yarn/log1",
+ "yarn.nodemanager.health-checker.interval-ms": "135000",
+ "yarn.resourcemanager.am.max-attempts": "2",
+ "yarn.nodemanager.remote-app-log-dir": "/app-logs",
+ "yarn.nodemanager.admin-env": "MALLOC_ARENA_MAX=$MALLOC_ARENA_MAX",
+ "yarn.nodemanager.aux-services": "mapreduce_shuffle",
+ "yarn.nodemanager.vmem-check-enabled": "false",
+ "yarn.nodemanager.vmem-pmem-ratio": "2.1",
+ "yarn.admin.acl": "*",
+ "yarn.resourcemanager.webapp.address": "c6402.ambari.apache.org:8088",
+ "yarn.resourcemanager.nodes.exclude-path": "/etc/hadoop/conf/yarn.exclude",
+ "yarn.resourcemanager.nodes.include-path": "/etc/hadoop/conf_for_include/yarn.include",
+ "yarn.nodemanager.linux-container-executor.group": "hadoop",
+ "yarn.acl.enable": "true",
+ "manage.include.files": "true",
+ "yarn.log.server.url": "http://c6402.ambari.apache.org:19888/jobhistory/logs",
+ "yarn.application.classpath": "/etc/hadoop/conf,/usr/lib/hadoop/*,/usr/lib/hadoop/lib/*,/usr/lib/hadoop-hdfs/*,/usr/lib/hadoop-hdfs/lib/*,/usr/lib/hadoop-yarn/*,/usr/lib/hadoop-yarn/lib/*,/usr/lib/hadoop-mapreduce/*,/usr/lib/hadoop-mapreduce/lib/*",
+ "yarn.resourcemanager.admin.address": "c6402.ambari.apache.org:8141",
+ "yarn.nodemanager.aux-services.mapreduce_shuffle.class": "org.apache.hadoop.mapred.ShuffleHandler",
+ "yarn.timeline-service.leveldb-timeline-store.path": "/var/log/hadoop-yarn/timeline",
+ "yarn.http.policy": "HTTP_ONLY",
+ "yarn.resourcemanager.webapp.https.address": "c6402.ambari.apache.org:8090"
+ },
+ "ranger-yarn-plugin-properties": {
+ "ranger-yarn-plugin-enabled": "No"
+ },
+ "tez-site": {
+ "tez.am.log.level": "WARN",
+ "tez.lib.uris": "hdfs:///apps/tez/,hdfs:///apps/tez/lib/",
+ "tez.staging-dir": "/tmp/${user.name}/staging",
+ "tez.am.am-rm.heartbeat.interval-ms.max": "250"
+ },
+ "slider-env": {
+ "content": "export JAVA_HOME={{java64_home}}\nexport HADOOP_CONF_DIR={{hadoop_conf_dir}}"
+ },
+ "yarn-env": {
+ "yarn_pid_dir_prefix": "/var/run/hadoop-yarn",
+ "apptimelineserver_heapsize": "1024",
+ "nodemanager_heapsize": "1024",
+ "content": "\nexport HADOOP_YARN_HOME={{hadoop_yarn_home}}\nexport YARN_LOG_DIR={{yarn_log_dir_prefix}}/$USER\nexport YARN_PID_DIR={{yarn_pid_dir_prefix}}/$USER\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\nexport JAVA_HOME={{java64_home}}\n\n# User for YARN daemons\nexport HADOOP_YARN_USER=${HADOOP_YARN_USER:-yarn}\n\n# resolve links - $0 may be a softlink\nexport YARN_CONF_DIR=\"${YARN_CONF_DIR:-$HADOOP_YARN_HOME/conf}\"\n\n# some Java parameters\n# export JAVA_HOME=/home/y/libexec/jdk1.6.0/\nif [ \"$JAVA_HOME\" != \"\" ]; then\n #echo \"run java in $JAVA_HOME\"\n JAVA_HOME=$JAVA_HOME\nfi\n\nif [ \"$JAVA_HOME\" = \"\" ]; then\n echo \"Error: JAVA_HOME is not set.\"\n exit 1\nfi\n\nJAVA=$JAVA_HOME/bin/java\nJAVA_HEAP_MAX=-Xmx1000m\n\n# For setting YARN specific HEAP sizes please use this\n# Parameter and set appropriately\nYARN_HEAPSIZE={{yarn_heapsize}}\n\n# check envvars which might override default args\nif [ \"$YARN_HEAPSIZE\" != \"\" ]; then\n JAVA_HEAP_MAX=\"-
Xmx\"\"$YARN_HEAPSIZE\"\"m\"\nfi\n\n# Resource Manager specific parameters\n\n# Specify the max Heapsize for the ResourceManager using a numerical value\n# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set\n# the value to 1000.\n# This value will be overridden by an Xmx setting specified in either YARN_OPTS\n# and/or YARN_RESOURCEMANAGER_OPTS.\n# If not specified, the default value will be picked from either YARN_HEAPMAX\n# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.\nexport YARN_RESOURCEMANAGER_HEAPSIZE={{resourcemanager_heapsize}}\n\n# Specify the JVM options to be used when starting the ResourceManager.\n# These options will be appended to the options specified as YARN_OPTS\n# and therefore may override any similar flags set in YARN_OPTS\n#export YARN_RESOURCEMANAGER_OPTS=\n\n# Node Manager specific parameters\n\n# Specify the max Heapsize for the NodeManager using a numerical value\n# in the scale of MB. For example, to specif
y an jvm option of -Xmx1000m, set\n# the value to 1000.\n# This value will be overridden by an Xmx setting specified in either YARN_OPTS\n# and/or YARN_NODEMANAGER_OPTS.\n# If not specified, the default value will be picked from either YARN_HEAPMAX\n# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.\nexport YARN_NODEMANAGER_HEAPSIZE={{nodemanager_heapsize}}\n\n# Specify the max Heapsize for the HistoryManager using a numerical value\n# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set\n# the value to 1024.\n# This value will be overridden by an Xmx setting specified in either YARN_OPTS\n# and/or YARN_HISTORYSERVER_OPTS.\n# If not specified, the default value will be picked from either YARN_HEAPMAX\n# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.\nexport YARN_HISTORYSERVER_HEAPSIZE={{apptimelineserver_heapsize}}\n\n# Specify the JVM options to be used when starting the NodeManager.\n# These options will be appen
ded to the options specified as YARN_OPTS\n# and therefore may override any similar flags set in YARN_OPTS\n#export YARN_NODEMANAGER_OPTS=\n\n# so that filenames w/ spaces are handled correctly in loops below\nIFS=\n\n\n# default log directory and file\nif [ \"$YARN_LOG_DIR\" = \"\" ]; then\n YARN_LOG_DIR=\"$HADOOP_YARN_HOME/logs\"\nfi\nif [ \"$YARN_LOGFILE\" = \"\" ]; then\n YARN_LOGFILE='yarn.log'\nfi\n\n# default policy file for service-level authorization\nif [ \"$YARN_POLICYFILE\" = \"\" ]; then\n YARN_POLICYFILE=\"hadoop-policy.xml\"\nfi\n\n# restore ordinary behaviour\nunset IFS\n\n\nYARN_OPTS=\"$YARN_OPTS -Dhadoop.log.dir=$YARN_LOG_DIR\"\nYARN_OPTS=\"$YARN_OPTS -Dyarn.log.dir=$YARN_LOG_DIR\"\nYARN_OPTS=\"$YARN_OPTS -Dhadoop.log.file=$YARN_LOGFILE\"\nYARN_OPTS=\"$YARN_OPTS -Dyarn.log.file=$YARN_LOGFILE\"\nYARN_OPTS=\"$YARN_OPTS -Dyarn.home.dir=$YARN_COMMON_HOME\"\nYARN_OPTS=\"$YARN_OPTS -Dyarn.id.str=$YARN_IDENT_STRING\"\nYARN_OPTS=\"$YARN_OPTS -Dhadoop.root.logger=${YARN_
ROOT_LOGGER:-INFO,console}\"\nYARN_OPTS=\"$YARN_OPTS -Dyarn.root.logger=${YARN_ROOT_LOGGER:-INFO,console}\"\nif [ \"x$JAVA_LIBRARY_PATH\" != \"x\" ]; then\n YARN_OPTS=\"$YARN_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH\"\nfi\nYARN_OPTS=\"$YARN_OPTS -Dyarn.policy.file=$YARN_POLICYFILE\"",
+ "yarn_heapsize": "1024",
+ "yarn_user": "yarn",
+ "resourcemanager_heapsize": "1024",
+ "yarn_log_dir_prefix": "/var/log/hadoop-yarn",
+ "min_user_id": "1000",
+ "is_supported_yarn_ranger": "false",
+ "service_check.queue.name": "default"
+ },
+ "hadoop-env": {
+ "hdfs_tmp_dir": "/tmp",
+ "namenode_opt_maxnewsize": "200m",
+ "hdfs_log_dir_prefix": "/var/log/hadoop",
+ "namenode_heapsize": "1024m",
+ "namenode_opt_newsize": "200m",
+ "namenode_opt_permsize" : "128m",
+ "namenode_opt_maxpermsize" : "256m",
+ "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME. All others are\n# optional. When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use. Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop}\n\n# Hadoop Configuration Directory\n#TODO: if env var set that can cause problems\nexport HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-{{hadoop_conf_dir}}}\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options. Empty by default.\nex
port HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appended to HADOOP_OPTS when specified\nexport HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}\"\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USE
R/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\nHADOOP_DATANODE_OPTS=\"-Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_DATANODE_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/
gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS\"\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER={{hdfs_user}}\n\n# Extra ssh options. Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored. $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SE
CURE_DN_USER\n\n# File naming remote slave hosts. $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from. Unset by default.\n# export HADOOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands. Unset by default. This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The s
cheduling priority for daemon processes. See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Use libraries from standard classpath\nJAVA_JDBC_LIBS=\"\"\n#Add libraries required by mysql connector\nfor jarFile in `ls /usr/share/java/*mysql* 2>/dev/null`\ndo\n JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n#Add libraries required by oracle connector\nfor jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`\ndo\n JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n#Add libraries required by nodemanager\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}\n\nif [ -d \"/usr/lib/tez\" ]; then\n export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:/usr/lib/tez/*:/usr/lib/tez/lib/*:/etc/tez/conf\nfi\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n#Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/lib/hadoop/lib/native/Linux-amd64-64",
+ "hdfs_user": "hdfs",
+ "dtnode_heapsize": "1024m",
+ "proxyuser_group": "users",
+ "hadoop_heapsize": "1024",
+ "hadoop_pid_dir_prefix": "/var/run/hadoop"
+ },
+ "hive-env": {
+ "hcat_pid_dir": "/var/run/webhcat",
+ "hcat_user": "hcat",
+ "hive_ambari_database": "MySQL",
+ "hive_hostname": "abtest-3.c.pramod-thangali.internal",
+ "hive_metastore_port": "9083",
+ "webhcat_user": "hcat",
+ "content": "\n if [ \"$SERVICE\" = \"cli\" ]; then\n if [ -z \"$DEBUG\" ]; then\n export HADOOP_OPTS=\"$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseParNewGC -XX:-UseGCOverheadLimit\"\n else\n export HADOOP_OPTS=\"$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit\"\n fi\n fi\n\n# The heap size of the jvm stared by hive shell script can be controlled via:\n\nexport HADOOP_HEAPSIZE=\"{{hive_heapsize}}\"\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS\"\n\n# Larger heap size may be required when running queries over large number of files or partitions.\n# By default hive shell scripts use a heap size of 256 (MB). Larger heap size would also be\n# appropriate for hive server (hwi etc).\n\n\n# Set HADOOP_HOME to point to a specific hadoop install directory\nHADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hive Configuration Directory can
be controlled by:\nexport HIVE_CONF_DIR={{conf_dir}}\n\n# Folder containing extra ibraries required for hive compilation/execution can be controlled by:\nif [ \"${HIVE_AUX_JARS_PATH}\" != \"\" ]; then\n export HIVE_AUX_JARS_PATH=${HIVE_AUX_JARS_PATH}\nelif [ -d \"/usr/lib/hive-hcatalog/\" ]; then\n export HIVE_AUX_JARS_PATH=/usr/lib/hive-hcatalog/share/hcatalog/hive-hcatalog-core-*.jar\nelse\n export HIVE_AUX_JARS_PATH=/usr/lib/hcatalog/share/hcatalog/hcatalog-core.jar\nfi\nexport METASTORE_PORT={{hive_metastore_port}}",
+ "hive_database_name": "hive",
+ "hive_database_type": "mysql",
+ "hive_pid_dir": "/var/run/hive",
+ "hive_log_dir": "/var/log/hive",
+ "hive_user": "hive",
+ "hcat_log_dir": "/var/log/webhcat",
+ "hive_database": "New MySQL Database",
+ "hive_security_authorization": "None"
+ },
+ "ranger-env": {
+ "xml_configurations_supported" : "false"
+ },
+ "cluster-env": {
+ "managed_hdfs_resource_property_names": "",
+ "security_enabled": "false",
+ "hdfs_user_principal" : "",
+ "hdfs_user_keytab" : "",
+ "ignore_groupsusers_create": "false",
+ "smokeuser": "ambari-qa",
+ "kerberos_domain": "EXAMPLE.COM",
+ "user_group": "hadoop",
+ "repo_suse_rhel_template": "[{{repo_id}}]\nname={{repo_id}}\n{% if mirror_list %}mirrorlist={{mirror_list}}{% else %}baseurl={{base_url}}{% endif %}\n\npath=/\nenabled=1\ngpgcheck=0",
+ "repo_ubuntu_template": "{{package_type}} {{base_url}} {{components}}",
+ "metrics_collector_external_hosts": "c6402.ambari.apache.org",
+ "metrics_collector_external_port": "6189",
+ "override_uid" : "true",
+ "fetch_nonlocal_groups": "true",
+ "manage_dirs_on_root": "true",
+ "ignore_bad_mounts": "false"
+ },
+ "hbase-env": {
+ "hbase_pid_dir": "/var/run/hbase",
+ "hbase_user": "hbase",
+ "hbase_master_heapsize": "1024m",
+ "content": "\n# Set environment variables here.\n\n# The java implementation to use. Java 1.6 required.\nexport JAVA_HOME={{java64_home}}\n\n# HBase Configuration directory\nexport HBASE_CONF_DIR=${HBASE_CONF_DIR:-{{hbase_conf_dir}}}\n\n# Extra Java CLASSPATH elements. Optional.\nexport HBASE_CLASSPATH=${HBASE_CLASSPATH}\n\n# The maximum amount of heap to use, in MB. Default is 1000.\n# export HBASE_HEAPSIZE=1000\n\n# Extra Java runtime options.\n# Below are what we set by default. May only work with SUN JVM.\n# For more on why as well as other possible settings,\n# see http://wiki.apache.org/hadoop/PerformanceTuning\nexport HBASE_OPTS=\"-XX:+UseConcMarkSweepGC -XX:ErrorFile={{log_dir}}/hs_err_pid%p.log\"\nexport SERVER_GC_OPTS=\"-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:{{log_dir}}/gc.log-`date +'%Y%m%d%H%M'`\"\n# Uncomment below to enable java garbage collection logging.\n# export HBASE_OPTS=\"$HBASE_OPTS -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateS
tamps -Xloggc:$HBASE_HOME/logs/gc-hbase.log\"\n\n# Uncomment and adjust to enable JMX exporting\n# See jmxremote.password and jmxremote.access in $JRE_HOME/lib/management to configure remote password access.\n# More details at: http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html\n#\n# export HBASE_JMX_BASE=\"-Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false\"\nexport HBASE_MASTER_OPTS=\"-Xmx{{master_heapsize}}\"\nexport HBASE_REGIONSERVER_OPTS=\"-Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70 -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}}\"\n# export HBASE_THRIFT_OPTS=\"$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10103\"\n# export HBASE_ZOOKEEPER_OPTS=\"$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10104\"\n\n# File naming hosts on which HRegionServers will run. $HBASE_HOME/conf/regionservers by default.\nexport HBASE_REGIONSERVERS=${HBASE_CONF_DIR}/regionservers\n\n# Extra
ssh options. Empty by default.\n# export HBASE_SSH_OPTS=\"-o ConnectTimeout=1 -o SendEnv=HBASE_CONF_DIR\"\n\n# Where log files are stored. $HBASE_HOME/logs by default.\nexport HBASE_LOG_DIR={{log_dir}}\n\n# A string representing this instance of hbase. $USER by default.\n# export HBASE_IDENT_STRING=$USER\n\n# The scheduling priority for daemon processes. See 'man nice'.\n# export HBASE_NICENESS=10\n\n# The directory where pid files are stored. /tmp by default.\nexport HBASE_PID_DIR={{pid_dir}}\n\n# Seconds to sleep between slave commands. Unset by default. This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HBASE_SLAVE_SLEEP=0.1\n\n# Tell HBase whether it should manage it's own instance of Zookeeper or not.\nexport HBASE_MANAGES_ZK=false\n\n{% if security_enabled %}\nexport HBASE_OPTS=\"$HBASE_OPTS -Djava.security.auth.login.config={{client_jaas_config_file}}\"\nexport HBASE_MASTER_OPTS=\"$HBAS
E_MASTER_OPTS -Djava.security.auth.login.config={{master_jaas_config_file}}\"\nexport HBASE_REGIONSERVER_OPTS=\"$HBASE_REGIONSERVER_OPTS -Djava.security.auth.login.config={{regionserver_jaas_config_file}}\"\n{% endif %}",
+ "hbase_regionserver_heapsize": "1024m",
+ "hbase_regionserver_xmn_max": "512",
+ "hbase_regionserver_xmn_ratio": "0.2",
+ "hbase_log_dir": "/var/log/hbase",
+ "hbase_java_io_tmpdir" : "/tmp",
+ "hbase_regionserver_shutdown_timeout": "30"
+ },
+ "ganglia-env": {
+ "gmond_user": "nobody",
+ "ganglia_runtime_dir": "/var/run/ganglia/hdp",
+ "rrdcached_base_dir": "/var/lib/ganglia/rrds",
+ "rrdcached_flush_timeout": "7200",
+ "gmetad_user": "nobody",
+ "rrdcached_write_threads": "4",
+ "rrdcached_delay": "1800",
+ "rrdcached_timeout": "3600"
+ },
+ "zookeeper-env": {
+ "zk_user": "zookeeper",
+ "zk_log_dir": "/var/log/zookeeper",
+ "content": "\nexport JAVA_HOME={{java64_home}}\nexport ZOO_LOG_DIR={{zk_log_dir}}\nexport ZOOPIDFILE={{zk_pid_file}}\nexport SERVER_JVMFLAGS={{zk_server_heapsize}}\nexport JAVA=$JAVA_HOME/bin/java\nexport CLASSPATH=$CLASSPATH:/usr/share/zookeeper/*\n\n{% if security_enabled %}\nexport SERVER_JVMFLAGS=\"$SERVER_JVMFLAGS -Djava.security.auth.login.config={{zk_server_jaas_file}}\"\nexport CLIENT_JVMFLAGS=\"$CLIENT_JVMFLAGS -Djava.security.auth.login.config={{zk_client_jaas_file}}\"\n{% endif %}",
+ "zk_pid_dir": "/var/run/zookeeper",
+ "zookeeper_principal_name": "zookeeper/_HOST@EXAMPLE.COM",
+ "zookeeper_keytab_path": "/etc/security/keytabs/zk.service.keytab"
+ },
+ "zoo.cfg": {
+ "clientPort": "2181",
+ "syncLimit": "5",
+ "initLimit": "10",
+ "dataDir": "/hadoop/zookeeper",
+ "tickTime": "2000"
+ },
+ "mapred-env": {
+ "content": "\n# export JAVA_HOME=/home/y/libexec/jdk1.6.0/\n\nexport HADOOP_JOB_HISTORYSERVER_HEAPSIZE={{jobhistory_heapsize}}\n\nexport HADOOP_MAPRED_ROOT_LOGGER=INFO,RFA\n\n#export HADOOP_JOB_HISTORYSERVER_OPTS=\n#export HADOOP_MAPRED_LOG_DIR=\"\" # Where log files are stored. $HADOOP_MAPRED_HOME/logs by default.\n#export HADOOP_JHS_LOGGER=INFO,RFA # Hadoop JobSummary logger.\n#export HADOOP_MAPRED_PID_DIR= # The pid files are stored. /tmp by default.\n#export HADOOP_MAPRED_IDENT_STRING= #A string representing this instance of hadoop. $USER by default\n#export HADOOP_MAPRED_NICENESS= #The scheduling priority for daemons. Defaults to 0.",
+ "mapred_pid_dir_prefix": "/var/run/hadoop-mapreduce",
+ "mapred_user": "mapred",
+ "jobhistory_heapsize": "900",
+ "mapred_log_dir_prefix": "/var/log/hadoop-mapreduce"
+ },
+ "tez-env": {
+ "content": "\n# Tez specific configuration\nexport TEZ_CONF_DIR={{config_dir}}\n\n# Set HADOOP_HOME to point to a specific hadoop install directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# The java implementation to use.\nexport JAVA_HOME={{java64_home}}",
+ "tez_user": "tez"
+ },
+ "flume-env": {
+ "content": "export JAVA_HOME={{java64_home}}\nexport HIVE_HOME={{flume_hive_home}}",
+ "flume_user": "flume",
+ "flume_log_dir": "/var/log/flume",
+ "flume_run_dir": "/var/run/flume"
+ },
+ "storm-env": {
+ "content": "\n#!/bin/bash\n\n# Set Storm specific environment variables here.\n\n# The java implementation to use.\nexport JAVA_HOME={{java_home}}\n\n# export STORM_CONF_DIR=\"\"",
+ "storm_log_dir": "/var/log/storm",
+ "storm_pid_dir": "/var/run/storm",
+ "storm_user": "storm"
+ },
+ "falcon-env": {
+ "falcon_apps_hdfs_dir": "/apps/falcon",
+ "falcon_port": "15000",
+ "falcon_pid_dir": "/var/run/falcon",
+ "falcon_log_dir": "/var/log/falcon",
+ "falcon.emeddedmq.port": "61616",
+ "falcon_user": "falcon",
+ "falcon_local_dir": "/hadoop/falcon",
+ "content": "\n# The java implementation to use. If JAVA_HOME is not found we expect java and jar to be in path\nexport JAVA_HOME={{java_home}}\n\n# any additional java opts you want to set. This will apply to both client and server operations\n#export FALCON_OPTS=\n\n# any additional java opts that you want to set for client only\n#export FALCON_CLIENT_OPTS=\n\n# java heap size we want to set for the client. Default is 1024MB\n#export FALCON_CLIENT_HEAP=\n\n# any additional opts you want to set for prisim service.\n#export FALCON_PRISM_OPTS=\n\n# java heap size we want to set for the prisim service. Default is 1024MB\n#export FALCON_PRISM_HEAP=\n\n# any additional opts you want to set for falcon service.\nexport FALCON_SERVER_OPTS=\"-Dfalcon.embeddedmq={{falcon_embeddedmq_enabled}} -Dfalcon.emeddedmq.port={{falcon_emeddedmq_port}}\"\n\n# java heap size we want to set for the falcon server. Default is 1024MB\n#export FALCON_SERVER_HEAP=\n\n# What is is considered as falcon home
dir. Default is the base locaion of the installed software\n#export FALCON_HOME_DIR=\n\n# Where log files are stored. Defatult is logs directory under the base install location\nexport FALCON_LOG_DIR={{falcon_log_dir}}\n\n# Where pid files are stored. Defatult is logs directory under the base install location\nexport FALCON_PID_DIR={{falcon_pid_dir}}\n\n# where the falcon active mq data is stored. Defatult is logs/data directory under the base install location\nexport FALCON_DATA_DIR={{falcon_embeddedmq_data}}\n\n# Where do you want to expand the war file. By Default it is in /server/webapp dir under the base install dir.\n#export FALCON_EXPANDED_WEBAPP_DIR=",
+ "falcon.embeddedmq.data": "/hadoop/falcon/embeddedmq/data",
+ "falcon.embeddedmq": "true",
+ "falcon_store_uri": "file:///hadoop/falcon/store",
+ "supports_hive_dr": "true"
+ },
+ "oozie-env": {
+ "oozie_derby_database": "Derby",
+ "oozie_admin_port": "11001",
+ "oozie_hostname": "abtest-3.c.pramod-thangali.internal",
+ "oozie_pid_dir": "/var/run/oozie",
+ "content": "\n#!/bin/bash\n\nif [ -d \"/usr/lib/bigtop-tomcat\" ]; then\n export OOZIE_CONFIG=${OOZIE_CONFIG:-/etc/oozie/conf}\n export CATALINA_BASE=${CATALINA_BASE:-/var/lib/oozie/oozie-server}\n export CATALINA_TMPDIR=${CATALINA_TMPDIR:-/var/tmp/oozie}\n export OOZIE_CATALINA_HOME=/usr/lib/bigtop-tomcat\nfi\n\n#Set JAVA HOME\nexport JAVA_HOME={{java_home}}\n\nexport JRE_HOME=${JAVA_HOME}\n\n# Set Oozie specific environment variables here.\n\n# Settings for the Embedded Tomcat that runs Oozie\n# Java System properties for Oozie should be specified in this variable\n#\n# export CATALINA_OPTS=\n\n# Oozie configuration file to load from Oozie configuration directory\n#\n# export OOZIE_CONFIG_FILE=oozie-site.xml\n\n# Oozie logs directory\n#\nexport OOZIE_LOG={{oozie_log_dir}}\n\n# Oozie pid directory\n#\nexport CATALINA_PID={{pid_file}}\n\n#Location of the data for oozie\nexport OOZIE_DATA={{oozie_data_dir}}\n\n# Oozie Log4J configuration file to load from Oozie configuratio
n directory\n#\n# export OOZIE_LOG4J_FILE=oozie-log4j.properties\n\n# Reload interval of the Log4J configuration file, in seconds\n#\n# export OOZIE_LOG4J_RELOAD=10\n\n# The port Oozie server runs\n#\nexport OOZIE_HTTP_PORT={{oozie_server_port}}\n\n# The admin port Oozie server runs\n#\nexport OOZIE_ADMIN_PORT={{oozie_server_admin_port}}\n\n# The host name Oozie server runs on\n#\n# export OOZIE_HTTP_HOSTNAME=`hostname -f`\n\n# The base URL for callback URLs to Oozie\n#\n# export OOZIE_BASE_URL=\"http://${OOZIE_HTTP_HOSTNAME}:${OOZIE_HTTP_PORT}/oozie\"\nexport JAVA_LIBRARY_PATH=/usr/lib/hadoop/lib/native/Linux-amd64-64",
+ "oozie_user": "oozie",
+ "oozie_admin_users": "{oozie_user}, {oozie_user}-admin",
+ "oozie_database": "New Derby Database",
+ "oozie_data_dir": "/hadoop/oozie/data",
+ "oozie_log_dir": "/var/log/oozie",
+ "oozie_tmp_dir": "/var/tmp/oozie"
+ },
+ "webhcat-env": {
+ "content": "\n# The file containing the running pid\nPID_FILE={{pid_file}}\n\nTEMPLETON_LOG_DIR={{templeton_log_dir}}/\n\n\nWEBHCAT_LOG_DIR={{templeton_log_dir}}/\n\n# The console error log\nERROR_LOG={{templeton_log_dir}}/webhcat-console-error.log\n\n# The console log\nCONSOLE_LOG={{templeton_log_dir}}/webhcat-console.log\n\n#TEMPLETON_JAR=templeton_jar_name\n\n#HADOOP_PREFIX=hadoop_prefix\n\n#HCAT_PREFIX=hive_prefix\n\n# Set HADOOP_HOME to point to a specific hadoop install directory\nexport HADOOP_HOME=/usr/lib/hadoop"
+ },
+ "hcat-env": {
+ "content": "JAVA_HOME={{java64_home}}\n HCAT_PID_DIR={{hcat_pid_dir}}/\n HCAT_LOG_DIR={{hcat_log_dir}}/\n HCAT_CONF_DIR={{hcat_conf_dir}}\n HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n #DBROOT is the path where the connector jars are downloaded\n DBROOT={{hcat_dbroot}}\n USER={{hcat_user}}\n METASTORE_PORT={{hive_metastore_port}}"
+ },
+ "pig-env": {
+ "content": "\nJAVA_HOME={{java64_home}}\nHADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\nif [ -d \"/usr/lib/tez\" ]; then\n PIG_OPTS=\"$PIG_OPTS -Dmapreduce.framework.name=yarn\"\nfi"
+ },
+ "sqoop-env": {
+ "content": "\n# Set Hadoop-specific environment variables here.\n\n#Set path to where bin/hadoop is available\n#Set path to where bin/hadoop is available\nexport HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop}\n\n#set the path to where bin/hbase is available\nexport HBASE_HOME=${HBASE_HOME:-/usr/lib/hbase}\n\n#Set the path to where bin/hive is available\nexport HIVE_HOME=${HIVE_HOME:-/usr/lib/hive}\n\n#Set the path for where zookeper config dir is\nexport ZOOCFGDIR=${ZOOCFGDIR:-/etc/zookeeper/conf}\n\n# add libthrift in hive to sqoop class path first so hive imports work\nexport SQOOP_USER_CLASSPATH=\"`ls ${HIVE_HOME}/lib/libthrift-*.jar 2> /dev/null`:${SQOOP_USER_CLASSPATH}\"",
+ "sqoop_user": "sqoop"
+ },
+ "hdfs-log4j": {
+ "content": "log4jproperties\nline2"
+ },
+ "yarn-log4j": {
+ "content": "log4jproperties\nline2"
+ },
+ "hbase-log4j": {
+ "content": "log4jproperties\nline2"
+ },
+ "hive-log4j": {
+ "content": "log4jproperties\nline2"
+ },
+ "hive-exec-log4j": {
+ "content": "log4jproperties\nline2"
+ },
+ "webhcat-log4j": {
+ "content": "log4jproperties\nline2"
+ },
+ "zookeeper-log4j": {
+ "content": "log4jproperties\nline2"
+ },
+ "pig-log4j": {
+ "content": "log4jproperties\nline2"
+ },
+ "pig-properties": {
+ "content": "pigproperties\nline2"
+ },
+ "oozie-log4j": {
+ "content": "log4jproperties\nline2"
+ },
+ "flume-conf": {
+ "content": "a1.sources = r1\n a1.sinks = k1\n a1.channels = c1\n # Describe/configure the source\n a1.sources.r1.type = netcat\n a1.sources.r1.bind = localhost\n a1.sources.r1.port = 44444\n \n # Describe the sink\n a1.sinks.k1.type = logger\n \n # Use a channel which buffers events in memory\n a1.channels.c1.type = memory\n a1.channels.c1.capacity = 1000\n a1.channels.c1.transactionCapacity = 100\n \n # Bind the source and sink to the channel\n a1.sources.r1.channels = c1\n a1.sinks.k1.channel = c1\n"
+ },
+ "flume-log4j": {
+ "content": "log4jproperties\nline2"
+ },
+ "ranger-hbase-plugin-properties": {
+ "POLICY_MGR_URL": "{{policymgr_mgr_url}}",
+ "XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS": "900",
+ "XAAUDIT.HDFS.DESTINATION_DIRECTORY": "hdfs://__REPLACE__NAME_NODE_HOST:8020/ranger/audit/%app-type%/%time:yyyyMMdd%",
+ "XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit",
+ "common.name.for.certificate": "-",
+ "XAAUDIT.HDFS.IS_ENABLED": "false",
+ "SQL_CONNECTOR_JAR": "{{sql_connector_jar}}",
+ "XAAUDIT.HDFS.LOCAL_BUFFER_FILE": "%time:yyyyMMdd-HHmm.ss%.log",
+ "ranger-hbase-plugin-enabled": "No",
+ "REPOSITORY_NAME": "{{repo_name}}",
+ "SSL_KEYSTORE_PASSWORD": "myKeyFilePassword",
+ "XAAUDIT.DB.IS_ENABLED": "true",
+ "XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS": "600",
+ "XAAUDIT.SOLR.SOLR_URL": "http://localhost:6083/solr/ranger_audits",
+ "XAAUDIT.DB.DATABASE_NAME": "{{xa_audit_db_name}}",
+ "XAAUDIT.DB.HOSTNAME": "{{xa_db_host}}",
+ "XAAUDIT.SOLR.IS_ENABLED": "false",
+ "SSL_KEYSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-keystore.jks",
+ "XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS": "60",
+ "XAAUDIT.DB.USER_NAME": "{{xa_audit_db_user}}",
+ "policy_user": "ambari-qa",
+ "UPDATE_XAPOLICIES_ON_GRANT_REVOKE": "true",
+ "XAAUDIT.HDFS.DESTINTATION_FILE": "%hostname%-audit.log",
+ "XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS": "86400",
+ "XAAUDIT.DB.PASSWORD": "{{xa_audit_db_password}}",
+ "XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT": "10",
+ "SSL_TRUSTSTORE_PASSWORD": "changeit",
+ "XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit/archive",
+ "REPOSITORY_CONFIG_USERNAME": "hbase",
+ "XAAUDIT.SOLR.MAX_FLUSH_INTERVAL_MS": "1000",
+ "XAAUDIT.DB.FLAVOUR": "{{xa_audit_db_flavor}}",
+ "XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS": "60",
+ "SSL_TRUSTSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-truststore.jks",
+ "REPOSITORY_CONFIG_PASSWORD": "hbase",
+ "XAAUDIT.SOLR.MAX_QUEUE_SIZE": "1"
+ },
+ "ranger-hdfs-plugin-properties": {
+ "XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS": "900",
+ "XAAUDIT.HDFS.DESTINATION_DIRECTORY": "hdfs://__REPLACE__NAME_NODE_HOST:8020/ranger/audit/%app-type%/%time:yyyyMMdd%",
+ "XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit",
+ "common.name.for.certificate": "-",
+ "XAAUDIT.HDFS.IS_ENABLED": "false",
+ "XAAUDIT.HDFS.LOCAL_BUFFER_FILE": "%time:yyyyMMdd-HHmm.ss%.log",
+ "SSL_KEYSTORE_PASSWORD": "myKeyFilePassword",
+ "XAAUDIT.DB.IS_ENABLED": "true",
+ "XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS": "600",
+ "XAAUDIT.SOLR.SOLR_URL": "http://localhost:6083/solr/ranger_audits",
+ "XAAUDIT.SOLR.IS_ENABLED": "false",
+ "hadoop.rpc.protection": "-",
+ "ranger-hdfs-plugin-enabled": "No",
+ "SSL_KEYSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-keystore.jks",
+ "XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS": "60",
+ "policy_user": "ambari-qa",
+ "XAAUDIT.HDFS.DESTINTATION_FILE": "%hostname%-audit.log",
+ "XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS": "86400",
+ "XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT": "10",
+ "SSL_TRUSTSTORE_PASSWORD": "changeit",
+ "XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit/archive",
+ "REPOSITORY_CONFIG_USERNAME": "hadoop",
+ "XAAUDIT.SOLR.MAX_FLUSH_INTERVAL_MS": "1000",
+ "XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS": "60",
+ "SSL_TRUSTSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-truststore.jks",
+ "REPOSITORY_CONFIG_PASSWORD": "hadoop",
+ "XAAUDIT.SOLR.MAX_QUEUE_SIZE": "1"
+ },
+ "ams-hbase-env": {
+ "hbase_pid_dir": "/var/run/ambari-metrics-collector/",
+ "regionserver_xmn_size": "256m",
+ "max_open_files_limit": "32768",
+ "hbase_master_maxperm_size": "128m",
+ "hbase_regionserver_xmn_ratio": "0.2",
+ "hbase_master_heapsize": "512m",
+ "hbase_regionserver_heapsize": "512m",
+ "hbase_log_dir": "/var/log/ambari-metrics-collector",
+ "hbase_master_xmn_size": "256m",
+ "content": "\n",
+ "hbase_regionserver_shutdown_timeout": "30"
+ },
+ "ams-log4j": {
+ "content": "\n"
+ },
+ "ams-hbase-site": {
+ "hbase.master.info.bindAddress": "0.0.0.0",
+ "hbase.zookeeper.property.dataDir": "${hbase.tmp.dir}/zookeeper",
+ "hbase.master.wait.on.regionservers.mintostart": "1",
+ "hbase.replication": "false",
+ "hbase.regionserver.global.memstore.lowerLimit": "0.3",
+ "hbase.hregion.memstore.block.multiplier": "4",
+ "hbase.hregion.memstore.flush.size": "134217728",
+ "hbase.rootdir": "hdfs://localhost:8020/apps/hbase/data",
+ "hbase.zookeeper.property.clientPort": "61181",
+ "phoenix.spool.directory": "${hbase.tmp.dir}/phoenix-spool",
+ "hbase.client.scanner.timeout.period": "900000",
+ "phoenix.groupby.maxCacheSize": "307200000",
+ "hbase.snapshot.enabled": "false",
+ "hbase.regionserver.global.memstore.upperLimit": "0.35",
+ "hbase_master_xmn_size": "128m",
+ "phoenix.query.spoolThresholdBytes": "12582912",
+ "zookeeper.session.timeout": "120000",
+ "hbase.tmp.dir": "/var/lib/ambari-metrics-collector/hbase-tmp",
+ "hfile.block.cache.size": "0.3",
+ "hbase.regionserver.port": "61320",
+ "hbase.regionserver.thread.compaction.small": "3",
+ "hbase.master.info.port": "61310",
+ "hbase.hregion.majorcompaction": "0",
+ "phoenix.query.maxGlobalMemoryPercentage": "15",
+ "hbase.zookeeper.quorum": "{{zookeeper_quorum_hosts}}",
+ "hbase.regionserver.info.port": "61330",
+ "hbase.hstore.blockingStoreFiles": "200",
+ "hbase.master.port": "61300",
+ "hbase.zookeeper.leaderport": "61388",
+ "hbase.regionserver.thread.compaction.large": "2",
+ "phoenix.query.timeoutMs": "1200000",
+ "hbase.local.dir": "${hbase.tmp.dir}/local",
+ "hbase.cluster.distributed": "true",
+ "zookeeper.session.timeout.localHBaseCluster": "20000",
+ "hbase.client.scanner.caching": "10000",
+ "phoenix.sequence.saltBuckets": "2",
+ "hbase.hstore.flusher.count": "2",
+ "hbase.zookeeper.peerport": "61288"
+ },
+ "ams-env": {
+ "ambari_metrics_user": "ams",
+ "metrics_monitor_log_dir": "/var/log/ambari-metrics-monitor",
+ "metrics_collector_log_dir": "/var/log/ambari-metrics-collector",
+ "metrics_monitor_pid_dir": "/var/run/ambari-metrics-monitor",
+ "content": "\n",
+ "metrics_collector_pid_dir": "/var/run/ambari-metrics-collector",
+ "metrics_collector_heapsize": "512m"
+ },
+ "ams-hbase-policy": {
+ "security.masterregion.protocol.acl": "*",
+ "security.admin.protocol.acl": "*",
+ "security.client.protocol.acl": "*"
+ },
+ "ams-hbase-log4j": {
+ "content": "\n"
+ },
+ "ams-site": {
+ "timeline.metrics.service.http.policy": "HTTPS_ONLY",
+ "timeline.metrics.host.aggregator.minute.ttl": "604800",
+ "timeline.metrics.cluster.aggregator.daily.checkpointCutOffMultiplier": "1",
+ "timeline.metrics.cluster.aggregator.daily.ttl": "63072000",
+ "timeline.metrics.cluster.aggregator.minute.timeslice.interval": "30",
+ "timeline.metrics.service.resultset.fetchSize": "2000",
+ "timeline.metrics.service.checkpointDelay": "60",
+ "timeline.metrics.host.aggregator.hourly.disabled": "false",
+ "timeline.metrics.cluster.aggregator.daily.interval": "86400",
+ "timeline.metrics.cluster.aggregator.hourly.ttl": "31536000",
+ "timeline.metrics.host.aggregator.daily.disabled": "false",
+ "timeline.metrics.hbase.compression.scheme": "SNAPPY",
+ "timeline.metrics.cluster.aggregator.hourly.interval": "3600",
+ "phoenix.spool.directory": "/tmp",
+ "timeline.metrics.host.aggregator.ttl": "86400",
+ "timeline.metrics.cluster.aggregator.hourly.checkpointCutOffMultiplier": "2",
+ "timeline.metrics.service.cluster.aggregator.appIds": "datanode,nodemanager,hbase",
+ "timeline.metrics.service.webapp.address": "0.0.0.0:6188",
+ "timeline.metrics.cluster.aggregator.hourly.disabled": "false",
+ "timeline.metrics.aggregator.checkpoint.dir": "/var/lib/ambari-metrics-collector/checkpoint",
+ "timeline.metrics.hbase.data.block.encoding": "FAST_DIFF",
+ "timeline.metrics.cluster.aggregator.minute.ttl": "2592000",
+ "timeline.metrics.host.aggregator.minute.disabled": "false",
+ "phoenix.query.maxGlobalMemoryPercentage": "25",
+ "timeline.metrics.service.operation.mode": "distributed",
+ "timeline.metrics.host.aggregator.minute.checkpointCutOffMultiplier": "2",
+ "timeline.metrics.cluster.aggregator.minute.checkpointCutOffMultiplier": "2",
+ "timeline.metrics.host.aggregator.hourly.checkpointCutOffMultiplier": "2",
+ "timeline.metrics.cluster.aggregator.daily.disabled": "false",
+ "timeline.metrics.service.rpc.address": "0.0.0.0:60200",
+ "timeline.metrics.cluster.aggregator.minute.disabled": "false",
+ "timeline.metrics.host.aggregator.hourly.ttl": "2592000",
+ "timeline.metrics.host.aggregator.minute.interval": "120",
+ "timeline.metrics.service.default.result.limit": "5760",
+ "timeline.metrics.host.aggregator.daily.ttl": "31536000",
+ "timeline.metrics.host.aggregator.daily.checkpointCutOffMultiplier": "1",
+ "timeline.metrics.daily.aggregator.minute.interval": "86400",
+ "timeline.metrics.cluster.aggregator.minute.interval": "120",
+ "timeline.metrics.host.aggregator.hourly.interval": "3600"
+ },
+ "ams-grafana-env": {
+ "metrics_grafana_log_dir": "/var/log/ambari-metrics-grafana",
+ "metrics_grafana_pid_dir": "/var/run/ambari-metrics-grafana",
+ "metrics_grafana_data_dir": "/var/lib/ambari-metrics-grafana",
+ "content": "\n",
+ "metrics_grafana_username" : "admin",
+ "metrics_grafana_password" : "admin"
+ },
+ "ams-ssl-server": {
+ "content": "\n"
+ },
+ "ams-ssl-client": {
+ "ssl.client.truststore.location": "/etc/security/clientKeys/all.jks",
+ "ssl.client.truststore.type": "jks",
+ "ssl.client.truststore.password": "bigdata"
+ },
+ "ams-grafana-ini": {
+ "content": "\n"
+ },
+ "hadoop-metrics2.properties": {
+ "content": "# Licensed to the Apache Software Foundation (ASF) under one or more\r\n# contributor license agreements. See the NOTICE file distributed with\r\n# this work for additional information regarding copyright ownership.\r\n# The ASF licenses this file to You under the Apache License, Version 2.0\r\n# (the \"License\"); you may not use this file except in compliance with\r\n# the License. You may obtain a copy of the License at\r\n#\r\n# http:\/\/www.apache.org\/licenses\/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n\r\n# syntax: [prefix].[source|sink|jmx].[instance].[options]\r\n# See package.html for org.apache.hadoop.metrics2 for details\r\n\r\n{% if has_ganglia_se
rver %}\r\n*.period=60\r\n\r\n*.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31\r\n*.sink.ganglia.period=10\r\n\r\n# default for supportsparse is false\r\n*.sink.ganglia.supportsparse=true\r\n\r\n.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both\r\n.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40\r\n\r\n# Hook up to the server\r\nnamenode.sink.ganglia.servers={{ganglia_server_host}}:8661\r\ndatanode.sink.ganglia.servers={{ganglia_server_host}}:8659\r\njobtracker.sink.ganglia.servers={{ganglia_server_host}}:8662\r\ntasktracker.sink.ganglia.servers={{ganglia_server_host}}:8658\r\nmaptask.sink.ganglia.servers={{ganglia_server_host}}:8660\r\nreducetask.sink.ganglia.servers={{ganglia_server_host}}:8660\r\nresourcemanager.sink.ganglia.servers={{ganglia_server_host}}:8664\r\nnodemanager.sink.ganglia.servers={{ganglia_server_host}}:8657\r\nhistoryserver.sink.ganglia.servers={{ganglia_server_host}}:8666\r\njournaln
ode.sink.ganglia.servers={{ganglia_server_host}}:8654\r\nnimbus.sink.ganglia.servers={{ganglia_server_host}}:8649\r\nsupervisor.sink.ganglia.servers={{ganglia_server_host}}:8650\r\n\r\nresourcemanager.sink.ganglia.tagsForPrefix.yarn=Queue\r\n\r\n{% endif %}\r\n\r\n{% if has_metric_collector %}\r\n\r\n*.period={{metrics_collection_period}}\r\n*.sink.timeline.plugin.urls=file:\/\/\/usr\/lib\/ambari-metrics-hadoop-sink\/ambari-metrics-hadoop-sink.jar\r\n*.sink.timeline.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink\r\n*.sink.timeline.period={{metrics_collection_period}}\r\n*.sink.timeline.sendInterval={{metrics_report_interval}}000\r\n*.sink.timeline.slave.host.name={{hostname}}\r\n*.sink.timeline.zookeeper.quorum={{zookeeper_quorum}}\r\n*.sink.timeline.protocol={{metric_collector_protocol}}\r\n*.sink.timeline.port={{metric_collector_port}}\r\n\r\n# HTTPS properties\r\n*.sink.timeline.truststore.path = {{metric_truststore_path}}\r\n*.sink.timeline.truststore.t
ype = {{metric_truststore_type}}\r\n*.sink.timeline.truststore.password = {{metric_truststore_password}}\r\n\r\ndatanode.sink.timeline.collector={{metric_collector_hosts}}\r\nnamenode.sink.timeline.collector={{metric_collector_hosts}}\r\nresourcemanager.sink.timeline.collector={{metric_collector_hosts}}\r\nnodemanager.sink.timeline.collector={{metric_collector_hosts}}\r\njobhistoryserver.sink.timeline.collector={{metric_collector_hosts}}\r\njournalnode.sink.timeline.collector={{metric_collector_hosts}}\r\nmaptask.sink.timeline.collector={{metric_collector_hosts}}\r\nreducetask.sink.timeline.collector={{metric_collector_hosts}}\r\napplicationhistoryserver.sink.timeline.collector={{metric_collector_hosts}}\r\n\r\nresourcemanager.sink.timeline.tagsForPrefix.yarn=Queue\r\n\r\n{% if is_nn_client_port_configured %}\r\n# Namenode rpc ports customization\r\nnamenode.sink.timeline.metric.rpc.client.port={{nn_rpc_client_port}}\r\n{% endif %}\r\n{% if is_nn_dn_port_configured %}\r\nnamenode.si
nk.timeline.metric.rpc.datanode.port={{nn_rpc_dn_port}}\r\n{% endif %}\r\n{% if is_nn_healthcheck_port_configured %}\r\nnamenode.sink.timeline.metric.rpc.healthcheck.port={{nn_rpc_healthcheck_port}}\r\n{% endif %}\r\n\r\n{% endif %}"
+ }
+ },
+ "configuration_attributes": {
+ "ams-hbase-env": {},
+ "ams-hbase-security-site": {},
+ "ams-log4j": {},
+ "ams-hbase-site": {},
+ "ams-hbase-policy": {},
+ "ams-hbase-log4j": {},
+ "ams-site": {},
+ "ams-ssl-server": {},
+ "ams-ssl-client": {},
+ "sqoop-site": {},
+ "yarn-site": {
+ "final": {
+ "yarn.nodemanager.disk-health-checker.min-healthy-disks": "true",
+ "yarn.nodemanager.container-executor.class": "true",
+ "yarn.nodemanager.local-dirs": "true"
+ }
+ },
+ "tez-site": {
+ "final": {
+ "tez.am.log.level": "true"
+ }
+ },
+ "capacity-scheduler": {
+ "final": {
+ "yarn.scheduler.capacity.node-locality-delay": "true"
+ }
+ },
+ "mapred-site": {
+ "final": {
+ "mapred.healthChecker.script.path": "true",
+ "mapreduce.jobtracker.staging.root.dir": "true"
+ }
+ },
+ "oozie-site": {
+ "final": {
+ "oozie.service.PurgeService.purge.interval": "true",
+ "oozie.service.CallableQueueService.queue.size": "true"
+ }
+ },
+ "webhcat-site": {
+ "final": {
+ "templeton.pig.path": "true",
+ "templeton.exec.timeout": "true",
+ "templeton.override.enabled": "true"
+ }
+ },
+ "hdfs-site": {
+ "final": {
+ "dfs.web.ugi": "true",
+ "dfs.support.append": "true",
+ "dfs.cluster.administrators": "true"
+ }
+ },
+ "hbase-site": {
+ "final": {
+ "hbase.client.keyvalue.maxsize": "true",
+ "hbase.hstore.compactionThreshold": "true",
+ "hbase.rootdir": "true"
+ }
+ },
+ "core-site": {
+ "final": {
+ "hadoop.proxyuser.hive.groups": "true",
+ "webinterface.private.actions": "true",
+ "hadoop.proxyuser.oozie.hosts": "true"
+ }
+ },
+ "hive-site": {
+ "final": {
+ "javax.jdo.option.ConnectionPassword": "true",
+ "javax.jdo.option.ConnectionDriverName": "true",
+ "hive.optimize.bucketmapjoin.sortedmerge": "true"
+ }
+ },
+ "hiveserver2-site": {
+ "final": {
+ "javax.jdo.option.ConnectionPassword": "true",
+ "javax.jdo.option.ConnectionDriverName": "true",
+ "hive.optimize.bucketmapjoin.sortedmerge": "true"
+ }
+ },
+ "hive-interactive-site": {
+ "final": {
+ "javax.jdo.option.ConnectionPassword": "true",
+ "javax.jdo.option.ConnectionDriverName": "true",
+ "hive.optimize.bucketmapjoin.sortedmerge": "true"
+ }
+ }
+ },
+ "configurationTags": {
+ "ams-hbase-env": {
+ "tag": "version1"
+ },
+ "ams-hbase-security-site": {
+ "tag": "version1"
+ },
+ "ams-hbase-site": {
+ "tag": "version1"
+ },
+ "ams-env": {
+ "tag": "version1"
+ },
+ "ams-site": {
+ "tag": "version1"
+ },
+ "ams-ssl-server": {
+ "tag": "version1"
+ },
+ "ams-ssl-client": {
+ "tag": "version1"
+ },
+ "ams-hbase-policy": {
+ "tag": "version1"
+ },
+ "ams-log4j": {
+ "tag": "version1"
+ },
+ "ams-hbase-log4j": {
+ "tag": "version1"
+ },
+ "capacity-scheduler": {
+ "tag": "version1"
+ },
+ "oozie-site": {
+ "tag": "version1"
+ },
+ "storm-site": {
+ "tag": "version1"
+ },
+ "webhcat-site": {
+ "tag": "version1"
+ },
+ "global": {
+ "tag": "version1"
+ },
+ "mapred-site": {
+ "tag": "version1"
+ },
+ "hdfs-site": {
+ "tag": "version1"
+ },
+ "hbase-site": {
+ "tag": "version1"
+ },
+ "core-site": {
+ "tag": "version1"
+ },
+ "yarn-site": {
+ "tag": "version1"
+ },
+ "hive-site": {
+ "tag": "version1"
+ },
+ "hive-interactive-site": {
+ "tag": "version1"
+ },
+ "hiveserver2-site": {
+ "tag": "version1"
+ },
+ "hdfs-log4j": {
+ "tag": "version1"
+ },
+ "yarn-log4j": {
+ "tag": "version1"
+ },
+ "hbase-log4j": {
+ "tag": "version1"
+ },
+ "hive-log4j": {
+ "tag": "version1"
+ },
+ "hive-exec-log4j": {
+ "tag": "version1"
+ },
+ "zookeeper-log4j": {
+ "tag": "version1"
+ },
+ "oozie-log4j": {
+ "tag": "version1"
+ },
+ "pig-log4j": {
+ "tag": "version1"
+ },
+ "pig-properties": {
+ "tag": "version1"
+ }
+ },
+ "commandId": "7-1",
+ "clusterHostInfo": {
+ "ambari_server_host": [
+ "c6401.ambari.apache.org"
+ ],
+ "snamenode_host": [
+ "c6402.ambari.apache.org"
+ ],
+ "nm_hosts": [
+ "c6402.ambari.apache.org"
+ ],
+ "drpc_server_hosts": [
+ "c6402.ambari.apache.org"
+ ],
+ "slave_hosts": [
+ "c6402.ambari.apache.org"
+ ],
+ "ganglia_server_host": [
+ "c6401.ambari.apache.org"
+ ],
+ "hive_server_host": [
+ "c6402.ambari.apache.org"
+ ],
+ "logviewer_server_hosts": [
+ "c6402.ambari.apache.org"
+ ],
+ "hive_metastore_host": [
+ "c6402.ambari.apache.org"
+ ],
+ "hbase_rs_hosts": [
+ "c6402.ambari.apache.org"
+ ],
+ "webhcat_server_host": [
+ "c6402.ambari.apache.org"
+ ],
+ "zookeeper_hosts": [
+ "c6401.ambari.apache.org",
+ "c6402.ambari.apache.org"
+ ],
+ "supervisor_hosts": [
+ "c6401.ambari.apache.org",
+ "c6402.ambari.apache.org"
+ ],
+ "ganglia_monitor_hosts": [
+ "c6401.ambari.apache.org",
+ "c6402.ambari.apache.org"
+ ],
+ "all_ping_ports": [
+ "8670",
+ "8670"
+ ],
+ "rm_host": [
+ "c6402.ambari.apache.org"
+ ],
+ "all_hosts": [
+ "c6401.ambari.apache.org",
+ "c6402.ambari.apache.org"
+ ],
+ "all_racks": [
+ "/default/rack_01",
+ "/default/rack_02"
+ ],
+ "all_ipv4_ips": [
+ "127.0.0.1",
+ "127.0.0.1"
+ ],
+ "storm_ui_server_hosts": [
+ "c6401.ambari.apache.org"
+ ],
+ "oozie_server": [
+ "c6402.ambari.apache.org"
+ ],
+ "hs_host": [
+ "c6402.ambari.apache.org"
+ ],
+ "nimbus_hosts": [
+ "c6401.ambari.apache.org"
+ ],
+ "namenode_host": [
+ "c6401.ambari.apache.org"
+ ],
+ "hbase_master_hosts": [
+ "c6402.ambari.apache.org"
+ ],
+ "hive_mysql_host": [
+ "c6402.ambari.apache.org"
+ ],
+ "falcon_server_hosts": [
+ "c6402.ambari.apache.org"
+ ],
+ "metrics_collector_hosts": [
+ "c6401.ambari.apache.org"
+ ]
+ }
+}
[22/50] [abbrv] ambari git commit: AMBARI-21750. HDFS restart fails
after migration if WebHDFS is disabled. (swagle)
Posted by ja...@apache.org.
AMBARI-21750. HDFS restart fails after migration if WebHDFS is disabled. (swagle)
(cherry picked from commit 57d33d23876b7a937bfafd15fbab47ad328d0cc3)
Change-Id: Idcfe2bfd0e95edf2712b94c87115837b0166c04e
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/c0648e12
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/c0648e12
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/c0648e12
Branch: refs/heads/2.5-maint
Commit: c0648e12fb8c2b97fd38fa1845322887995cf8b9
Parents: 0c97625
Author: Siddharth Wagle <sw...@hortonworks.com>
Authored: Thu Aug 17 17:41:03 2017 -0700
Committer: swagle@hortonworks.com <je...@hortonworks.com>
Committed: Fri Aug 18 01:06:10 2017 +0000
----------------------------------------------------------------------
ambari-server/src/main/assemblies/server.xml | 1 -
1 file changed, 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/c0648e12/ambari-server/src/main/assemblies/server.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/assemblies/server.xml b/ambari-server/src/main/assemblies/server.xml
index 395f322..403e21c 100644
--- a/ambari-server/src/main/assemblies/server.xml
+++ b/ambari-server/src/main/assemblies/server.xml
@@ -195,7 +195,6 @@
<outputDirectory>/var/lib/ambari-server/resources/stacks/${supplemental.distribution}</outputDirectory>
<excludes>
<exclude>/4.2/hooks/before-START/files/fast-hdfs-resource.jar</exclude>
- <exclude>/4.2.5/hooks/before-START/files/fast-hdfs-resource.jar</exclude>
</excludes>
</fileSet>
<fileSet>
[43/50] [abbrv] ambari git commit: AMBARI-21807. 'Move Hive
Metastore' wizard is stuck while configuring MYSQL_SERVER. (jaimin)
Posted by ja...@apache.org.
AMBARI-21807. 'Move Hive Metastore' wizard is stuck while configuring MYSQL_SERVER. (jaimin)
Change-Id: I2176cecabdaeee3e23d9f5a5302e79a296edc17c
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/859673a7
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/859673a7
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/859673a7
Branch: refs/heads/2.5-maint
Commit: 859673a77055b3315d763535b2c9e2747602d7c5
Parents: 4b71161
Author: Jaimin Jetly <ja...@hortonworks.com>
Authored: Thu Aug 24 13:18:58 2017 -0700
Committer: Jaimin Jetly <ja...@hortonworks.com>
Committed: Thu Aug 24 13:18:58 2017 -0700
----------------------------------------------------------------------
.../main/service/reassign/step1_controller.js | 13 +++++++++----
1 file changed, 9 insertions(+), 4 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/859673a7/ambari-web/app/controllers/main/service/reassign/step1_controller.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/main/service/reassign/step1_controller.js b/ambari-web/app/controllers/main/service/reassign/step1_controller.js
index c631e85..12d77d3 100644
--- a/ambari-web/app/controllers/main/service/reassign/step1_controller.js
+++ b/ambari-web/app/controllers/main/service/reassign/step1_controller.js
@@ -93,8 +93,7 @@ App.ReassignMasterWizardStep1Controller = Em.Controller.extend({
databaseTypeMatch,
properties = {},
configs = {},
- dbPropertyMapItem = Em.getWithDefault(this.get('dbPropertyMap'), this.get('content.reassign.component_name'), null),
- serviceDbProp = this.get('content.reassign.service_id').toLowerCase() + '_database';
+ dbPropertyMapItem = Em.getWithDefault(this.get('dbPropertyMap'), this.get('content.reassign.component_name'), null);
data.items.forEach(function(item) {
configs[item.type] = item.properties;
@@ -118,8 +117,7 @@ App.ReassignMasterWizardStep1Controller = Em.Controller.extend({
App.router.reassignMasterController.set('content.hasManualSteps', false);
}
- properties['is_remote_db'] = /Existing/ig.test(properties[serviceDbProp]);
-
+ properties['is_remote_db'] = this.isExistingDb(configs);
properties['database_hostname'] = this.getDatabaseHost();
this.saveDatabaseType(databaseType);
@@ -145,6 +143,13 @@ App.ReassignMasterWizardStep1Controller = Em.Controller.extend({
}
},
+ isExistingDb: function(configs) {
+ var serviceName = this.get('content.reassign.service_id').toLowerCase();
+ var serviceDbSite = serviceName + '-env';
+ var serviceDbConfig = serviceName + '_database';
+ return /Existing/ig.test(configs[serviceDbSite][serviceDbConfig]);
+ },
+
getDatabaseHost: function() {
var db_type = this.get('databaseType'),
connectionURLProps = {
[29/50] [abbrv] ambari git commit: Revert "AMBARI-21675.Add Secure
Flag to Cookie / JSESSIONID in Zeppelin(Prabhjyot Singh via Venkata Sairam)"
Posted by ja...@apache.org.
Revert "AMBARI-21675.Add Secure Flag to Cookie / JSESSIONID in Zeppelin(Prabhjyot Singh via Venkata Sairam)"
This reverts commit 2b50a560c34a278b67352b699e698a1cfd050406.
(cherry picked from commit 4124bc30d021277efbd132123a99de9df7707c6e)
Change-Id: I6082d0a34e99904d5423f75ffc5c10fc0d1d0eed
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/8e7ec78a
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/8e7ec78a
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/8e7ec78a
Branch: refs/heads/2.5-maint
Commit: 8e7ec78a4a0162ce009afb1059009f4ffa7cc2f0
Parents: 5a57ad3
Author: Sumit Mohanty <sm...@hortonworks.com>
Authored: Mon Aug 21 13:07:34 2017 -0700
Committer: Sumit Mohanty <sm...@hortonworks.com>
Committed: Mon Aug 21 13:49:56 2017 -0700
----------------------------------------------------------------------
.../ZEPPELIN/0.6.0.2.5/configuration/zeppelin-shiro-ini.xml | 6 ------
1 file changed, 6 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/8e7ec78a/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/configuration/zeppelin-shiro-ini.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/configuration/zeppelin-shiro-ini.xml b/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/configuration/zeppelin-shiro-ini.xml
index 742f5be..971e4af 100644
--- a/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/configuration/zeppelin-shiro-ini.xml
+++ b/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/configuration/zeppelin-shiro-ini.xml
@@ -61,12 +61,6 @@ sessionManager = org.apache.shiro.web.session.mgt.DefaultWebSessionManager
cacheManager = org.apache.shiro.cache.MemoryConstrainedCacheManager
securityManager.cacheManager = $cacheManager
-cookie = org.apache.shiro.web.servlet.SimpleCookie
-cookie.name = JSESSIONID
-cookie.secure = true
-cookie.httpOnly = true
-sessionManager.sessionIdCookie = $cookie
-
securityManager.sessionManager = $sessionManager
# 86,400,000 milliseconds = 24 hour
securityManager.sessionManager.globalSessionTimeout = 86400000
[36/50] [abbrv] ambari git commit: AMBARI-21781. Error: Repositories
for os type redhat-ppc7 are not defined during package installation (ncole)
Posted by ja...@apache.org.
AMBARI-21781. Error: Repositories for os type redhat-ppc7 are not defined during package installation (ncole)
(cherry picked from commit 589d967638bc72ea1596e60bb0c1b358e888ea09)
Change-Id: If50799800da2634e287b2c858844472265517bf0
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/c0141992
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/c0141992
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/c0141992
Branch: refs/heads/2.5-maint
Commit: c01419924ede494db1daf1c6954bf9a9c8b3e2b3
Parents: bf66bf8
Author: Nate Cole <nc...@hortonworks.com>
Authored: Tue Aug 22 12:18:01 2017 -0400
Committer: Zuul <re...@hortonworks.com>
Committed: Tue Aug 22 14:13:53 2017 -0700
----------------------------------------------------------------------
.../ClusterStackVersionResourceProvider.java | 52 +----
...ClusterStackVersionResourceProviderTest.java | 210 +++++++++++++++++++
2 files changed, 214 insertions(+), 48 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/c0141992/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
index b489d7f..aba8b9c 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
@@ -506,16 +506,15 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
final AmbariManagementController managementController = getManagementController();
final AmbariMetaInfo ami = managementController.getAmbariMetaInfo();
- String osFamily = getPowerPCOsFamily(hosts);
// build the list of OS repos
List<OperatingSystemEntity> operatingSystems = repoVersionEnt.getOperatingSystems();
Map<String, List<RepositoryEntity>> perOsRepos = new HashMap<>();
for (OperatingSystemEntity operatingSystem : operatingSystems) {
- String osType = getOsTypeForRepo(operatingSystem, osFamily);
- if (operatingSystem.isAmbariManagedRepos()) {
- perOsRepos.put(osType, operatingSystem.getRepositories());
+
+ if (operatingSystem.isAmbariManagedRepos()) {
+ perOsRepos.put(operatingSystem.getOsType(), operatingSystem.getRepositories());
} else {
- perOsRepos.put(osType, Collections.<RepositoryEntity> emptyList());
+ perOsRepos.put(operatingSystem.getOsType(), Collections.<RepositoryEntity> emptyList());
}
}
@@ -1089,47 +1088,4 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
amc.getAuthName(), serviceNote);
}
}
-
- /**
- * Check one host is enough to tell the arch
- * because all hosts should have the same arch.
- * @param hosts List<Host>
- * @return osFamily, null if hosts is empty or is X86_64
-
- */
- private String getPowerPCOsFamily(List<Host> hosts) {
- if (hosts.isEmpty()){
- return null;
- } else {
- Host host = hosts.get(0);
- String osFamily = host.getHostAttributes().get("os_family");
- if (null != osFamily && osFamily.endsWith("-ppc")){
- return osFamily;
- } else {
- return null;
- }
- }
- }
-
- /**
- * Use os type with -ppc post fix for powerpc
- * in order to have it consistent with the os information
- * stored in the Hosts table
- * No need to apply the change if os is x86_64
- * */
- private String getOsTypeForRepo(OperatingSystemEntity operatingSystem, String osFamily) {
- if (null != osFamily){
- String osType = operatingSystem.getOsType();
- int pos = osFamily.indexOf("-ppc");
- if (pos > 0){
- String os = osType.substring(0, pos);
- String majorVersion = osType.substring(os.length());
- return String.format("%s-ppc%s", os, majorVersion);
- } else {
- return operatingSystem.getOsType();
- }
- } else {
- return operatingSystem.getOsType();
- }
- }
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/c0141992/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProviderTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProviderTest.java
index 68596e0..5627756 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProviderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProviderTest.java
@@ -117,6 +117,7 @@ import org.junit.Test;
import org.springframework.security.core.Authentication;
import org.springframework.security.core.context.SecurityContextHolder;
+import com.google.common.collect.ImmutableMap;
import com.google.gson.JsonArray;
import com.google.gson.JsonObject;
import com.google.gson.JsonParser;
@@ -1795,6 +1796,215 @@ public class ClusterStackVersionResourceProviderTest {
verify(managementController, response, clusters, cluster, hostVersionDAO);
}
+ @Test
+ public void testCreateResourcesPPC() throws Exception {
+ Resource.Type type = Resource.Type.ClusterStackVersion;
+
+ AmbariManagementController managementController = createMock(AmbariManagementController.class);
+ Clusters clusters = createNiceMock(Clusters.class);
+ Cluster cluster = createNiceMock(Cluster.class);
+ Map<String, String> hostLevelParams = new HashMap<>();
+ StackId stackId = new StackId("HDP", "2.0.1");
+
+ RepositoryVersionEntity repoVersion = new RepositoryVersionEntity();
+ repoVersion.setId(1l);
+
+ String os_json = "[\n" +
+ " {\n" +
+ " \"repositories\":[\n" +
+ " {\n" +
+ " \"Repositories/base_url\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos-ppc7/2.x/updates/2.2.0.0\",\n" +
+ " \"Repositories/repo_name\":\"HDP-UTILS\",\n" +
+ " \"Repositories/repo_id\":\"HDP-UTILS-1.1.0.20\"\n" +
+ " },\n" +
+ " {\n" +
+ " \"Repositories/base_url\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos-ppc7/2.x/updates/2.2.0.0\",\n" +
+ " \"Repositories/repo_name\":\"HDP\",\n" +
+ " \"Repositories/repo_id\":\"HDP-2.2\"\n" +
+ " }\n" +
+ " ],\n" +
+ " \"OperatingSystems/os_type\":\"redhat-ppc7\"\n" +
+ " }\n" +
+ "]";
+
+ repoVersion.setOperatingSystems(os_json);
+
+ Map<String, Host> hostsForCluster = new HashMap<>();
+ int hostCount = 2;
+ for (int i = 0; i < hostCount; i++) {
+ String hostname = "host" + i;
+ Host host = createNiceMock(hostname, Host.class);
+ expect(host.getHostName()).andReturn(hostname).anyTimes();
+ expect(host.getOsFamily()).andReturn("redhat-ppc7").anyTimes();
+ expect(host.getMaintenanceState(EasyMock.anyLong())).andReturn(
+ MaintenanceState.OFF).anyTimes();
+ expect(host.getAllHostVersions()).andReturn(
+ Collections.<HostVersionEntity>emptyList()).anyTimes();
+ expect(host.getHostAttributes()).andReturn(
+ ImmutableMap.<String, String>builder()
+ .put("os_family", "redhat-ppc")
+ .put("os_release_version", "7.2")
+ .build()
+ ).anyTimes();
+ replay(host);
+ hostsForCluster.put(hostname, host);
+ }
+
+ final ServiceComponentHost schDatanode = createMock(ServiceComponentHost.class);
+ expect(schDatanode.getServiceName()).andReturn("HDFS").anyTimes();
+ expect(schDatanode.getServiceComponentName()).andReturn("DATANODE").anyTimes();
+ final ServiceComponentHost schNamenode = createMock(ServiceComponentHost.class);
+ expect(schNamenode.getServiceName()).andReturn("HDFS").anyTimes();
+ expect(schNamenode.getServiceComponentName()).andReturn("NAMENODE").anyTimes();
+ final ServiceComponentHost schAMS = createMock(ServiceComponentHost.class);
+ expect(schAMS.getServiceName()).andReturn("AMBARI_METRICS").anyTimes();
+ expect(schAMS.getServiceComponentName()).andReturn("METRICS_COLLECTOR").anyTimes();
+ // First host contains versionable components
+ final List<ServiceComponentHost> schsH1 = new ArrayList<ServiceComponentHost>(){{
+ add(schDatanode);
+ add(schNamenode);
+ add(schAMS);
+ }};
+ // Second host does not contain versionable components
+ final List<ServiceComponentHost> schsH2 = new ArrayList<ServiceComponentHost>(){{
+ add(schAMS);
+ }};
+
+
+ ServiceOsSpecific.Package hdfsPackage = new ServiceOsSpecific.Package();
+ hdfsPackage.setName("hdfs");
+ List<ServiceOsSpecific.Package> packages = Collections.singletonList(hdfsPackage);
+
+ ActionManager actionManager = createNiceMock(ActionManager.class);
+
+ RequestStatusResponse response = createNiceMock(RequestStatusResponse.class);
+ ResourceProviderFactory resourceProviderFactory = createNiceMock(ResourceProviderFactory.class);
+ ResourceProvider csvResourceProvider = createNiceMock(ClusterStackVersionResourceProvider.class);
+
+ AbstractControllerResourceProvider.init(resourceProviderFactory);
+
+ Map<String, Map<String, String>> hostConfigTags = new HashMap<>();
+ expect(configHelper.getEffectiveDesiredTags(anyObject(ClusterImpl.class), anyObject(String.class))).andReturn(hostConfigTags);
+
+ expect(managementController.getClusters()).andReturn(clusters).anyTimes();
+ expect(managementController.getAmbariMetaInfo()).andReturn(ambariMetaInfo).anyTimes();
+ expect(managementController.getAuthName()).andReturn("admin").anyTimes();
+ expect(managementController.getActionManager()).andReturn(actionManager).anyTimes();
+ expect(managementController.getJdkResourceUrl()).andReturn("/JdkResourceUrl").anyTimes();
+ expect(managementController.getPackagesForServiceHost(anyObject(ServiceInfo.class),
+ (Map<String, String>) anyObject(List.class), anyObject(String.class))).
+ andReturn(packages).anyTimes(); // 1 host has no versionable components, other hosts have 2 services
+// // that's why we don't send commands to it
+
+ expect(resourceProviderFactory.getHostResourceProvider(anyObject(Set.class), anyObject(Map.class),
+ eq(managementController))).andReturn(csvResourceProvider).anyTimes();
+
+ expect(clusters.getCluster(anyObject(String.class))).andReturn(cluster);
+ expect(clusters.getHostsForCluster(anyObject(String.class))).andReturn(
+ hostsForCluster).anyTimes();
+
+ String clusterName = "Cluster100";
+ expect(cluster.getClusterId()).andReturn(1L).anyTimes();
+ expect(cluster.getHosts()).andReturn(hostsForCluster.values()).atLeastOnce();
+ expect(cluster.getServices()).andReturn(new HashMap<String, Service>()).anyTimes();
+ expect(cluster.getCurrentStackVersion()).andReturn(stackId);
+ expect(cluster.getServiceComponentHosts(anyObject(String.class))).andAnswer(new IAnswer<List<ServiceComponentHost>>() {
+ @Override
+ public List<ServiceComponentHost> answer() throws Throwable {
+ String hostname = (String) EasyMock.getCurrentArguments()[0];
+ if (hostname.equals("host2")) {
+ return schsH2;
+ } else {
+ return schsH1;
+ }
+ }
+ }).anyTimes();
+
+ ExecutionCommand executionCommand = createNiceMock(ExecutionCommand.class);
+ ExecutionCommandWrapper executionCommandWrapper = createNiceMock(ExecutionCommandWrapper.class);
+
+ expect(executionCommandWrapper.getExecutionCommand()).andReturn(executionCommand).anyTimes();
+
+ Stage stage = createNiceMock(Stage.class);
+ expect(stage.getExecutionCommandWrapper(anyObject(String.class), anyObject(String.class))).
+ andReturn(executionCommandWrapper).anyTimes();
+
+ expect(executionCommand.getHostLevelParams()).andReturn(hostLevelParams).anyTimes();
+
+ Map<Role, Float> successFactors = new HashMap<>();
+ expect(stage.getSuccessFactors()).andReturn(successFactors).atLeastOnce();
+
+ // Check that we create proper stage count
+ expect(stageFactory.createNew(anyLong(), anyObject(String.class),
+ anyObject(String.class), anyLong(),
+ anyObject(String.class), anyObject(String.class),
+ anyObject(String.class))).andReturn(stage).
+ times((int) Math.ceil(hostCount / MAX_TASKS_PER_STAGE));
+
+ expect(
+ repositoryVersionDAOMock.findByStackAndVersion(
+ anyObject(StackId.class),
+ anyObject(String.class))).andReturn(repoVersion);
+
+ expect(actionManager.getRequestTasks(anyLong())).andReturn(Collections.<HostRoleCommand>emptyList()).anyTimes();
+
+ ClusterEntity clusterEntity = new ClusterEntity();
+ clusterEntity.setClusterId(1l);
+ clusterEntity.setClusterName(clusterName);
+ ClusterVersionEntity cve = new ClusterVersionEntity(clusterEntity,
+ repoVersion, RepositoryVersionState.INSTALL_FAILED, 0, "");
+ expect(clusterVersionDAO.findByClusterAndStackAndVersion(anyObject(String.class),
+ anyObject(StackId.class), anyObject(String.class))).andReturn(cve);
+
+ StageUtils.setTopologyManager(injector.getInstance(TopologyManager.class));
+ StageUtils.setConfiguration(injector.getInstance(Configuration.class));
+
+ expect(clusterVersionDAO.findByCluster(anyObject(String.class))).andReturn(Collections.<ClusterVersionEntity>emptyList()).once();
+
+ // replay
+ replay(managementController, response, clusters, resourceProviderFactory, csvResourceProvider,
+ cluster, repositoryVersionDAOMock, configHelper, schDatanode, schNamenode, schAMS, actionManager,
+ executionCommand, executionCommandWrapper,stage, stageFactory, clusterVersionDAO);
+
+ ResourceProvider provider = AbstractControllerResourceProvider.getResourceProvider(
+ type,
+ PropertyHelper.getPropertyIds(type),
+ PropertyHelper.getKeyPropertyIds(type),
+ managementController);
+
+ injector.injectMembers(provider);
+
+ // add the property map to a set for the request. add more maps for multiple creates
+ Set<Map<String, Object>> propertySet = new LinkedHashSet<>();
+
+ Map<String, Object> properties = new LinkedHashMap<>();
+
+ // add properties to the request map
+ properties.put(ClusterStackVersionResourceProvider.CLUSTER_STACK_VERSION_CLUSTER_NAME_PROPERTY_ID, "Cluster100");
+ properties.put(ClusterStackVersionResourceProvider.CLUSTER_STACK_VERSION_REPOSITORY_VERSION_PROPERTY_ID, "2.2.0.1-885");
+ properties.put(ClusterStackVersionResourceProvider.CLUSTER_STACK_VERSION_STACK_PROPERTY_ID, "HDP");
+ properties.put(ClusterStackVersionResourceProvider.CLUSTER_STACK_VERSION_VERSION_PROPERTY_ID, "2.1.1");
+
+ propertySet.add(properties);
+
+ // create the request
+ Request request = PropertyHelper.getCreateRequest(propertySet, null);
+
+ SecurityContextHolder.getContext().setAuthentication(TestAuthenticationFactory.createAdministrator());
+
+ RequestStatus status = provider.createResources(request);
+ Assert.assertNotNull(status);
+
+ // verify
+ verify(managementController, response, clusters, stageFactory, stage);
+
+ // check that the success factor was populated in the stage
+ Float successFactor = successFactors.get(Role.INSTALL_PACKAGES);
+ Assert.assertEquals(Float.valueOf(0.85f), successFactor);
+ }
+
+
+
private void testCreateResourcesExistingUpgrade(Authentication authentication) throws Exception {
Resource.Type type = Resource.Type.ClusterStackVersion;
[38/50] [abbrv] ambari git commit: AMBARI-21794. Post Ambari upgrade
on a migration cluster Oozie failed to restart when LZO compression is
enabled. (swagle)
Posted by ja...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/df448220/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/OOZIE/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/OOZIE/package/scripts/params_linux.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/OOZIE/package/scripts/params_linux.py
new file mode 100644
index 0000000..0e6179b
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/OOZIE/package/scripts/params_linux.py
@@ -0,0 +1,387 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from resource_management import *
+from ambari_commons.constants import AMBARI_SUDO_BINARY
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.functions import get_port_from_url
+from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
+from resource_management.libraries.functions.setup_atlas_hook import has_atlas_in_cluster
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions.get_lzo_packages import get_lzo_packages
+from resource_management.libraries.functions.expect import expect
+from resource_management.libraries.functions.get_architecture import get_architecture
+from resource_management.libraries.functions.stack_features import get_stack_feature_version
+from resource_management.libraries.functions.stack_tools import get_stack_name
+from resource_management.libraries.functions.version import get_major_version
+
+from resource_management.core.utils import PasswordString
+from ambari_commons.credential_store_helper import get_password_from_credential_store
+from urlparse import urlparse
+
+import status_params
+import os
+
+# server configurations
+config = Script.get_config()
+tmp_dir = Script.get_tmp_dir()
+sudo = AMBARI_SUDO_BINARY
+
+architecture = get_architecture()
+
+
+# Needed since this writes out the Atlas Hive Hook config file.
+cluster_name = config['clusterName']
+
+hostname = config["hostname"]
+
+# New Cluster Stack Version that is defined during the RESTART of a Rolling Upgrade
+version = default("/commandParams/version", None)
+stack_name = status_params.stack_name
+stack_name_uppercase = stack_name.upper()
+upgrade_direction = default("/commandParams/upgrade_direction", None)
+agent_stack_retry_on_unavailability = config['hostLevelParams']['agent_stack_retry_on_unavailability']
+agent_stack_retry_count = expect("/hostLevelParams/agent_stack_retry_count", int)
+
+stack_root = status_params.stack_root
+
+# The source stack will be present during a cross-stack upgrade.
+# E.g., BigInsights-4.2.5 or HDP-2.6
+source_stack = default("/commandParams/source_stack", None)
+# This variable name is important, do not change
+source_stack_name = get_stack_name(source_stack)
+
+stack_version_unformatted = status_params.stack_version_unformatted
+stack_version_formatted = status_params.stack_version_formatted
+major_stack_version = get_major_version(stack_version_formatted)
+version_for_stack_feature_checks = get_stack_feature_version(config)
+
+hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
+hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
+hadoop_lib_home = stack_select.get_hadoop_dir("lib")
+
+#hadoop params
+if stack_version_formatted and check_stack_feature(StackFeature.ROLLING_UPGRADE,stack_version_formatted):
+ stack_version = None
+ upgrade_stack = stack_select._get_upgrade_stack()
+ if upgrade_stack is not None and len(upgrade_stack) == 2 and upgrade_stack[1] is not None:
+ stack_version = upgrade_stack[1]
+
+ # oozie-server or oozie-client, depending on role
+ oozie_root = status_params.component_directory
+
+ # using the correct oozie root dir, format the correct location
+ oozie_lib_dir = format("{stack_root}/current/{oozie_root}")
+ oozie_setup_sh = format("{stack_root}/current/{oozie_root}/bin/oozie-setup.sh")
+ oozie_webapps_dir = format("{stack_root}/current/{oozie_root}/oozie-server/webapps")
+ oozie_webapps_conf_dir = format("{stack_root}/current/{oozie_root}/oozie-server/conf")
+ oozie_libext_dir = format("{stack_root}/current/{oozie_root}/libext")
+ oozie_server_dir = format("{stack_root}/current/{oozie_root}/oozie-server")
+ oozie_shared_lib = format("{stack_root}/current/{oozie_root}/share")
+ oozie_home = format("{stack_root}/current/{oozie_root}")
+ oozie_bin_dir = format("{stack_root}/current/{oozie_root}/bin")
+ oozie_examples_regex = format("{stack_root}/current/{oozie_root}/doc")
+
+ # set the falcon home for copying JARs; if in an upgrade, then use the version of falcon that
+ # matches the version of oozie
+ falcon_home = format("{stack_root}/current/falcon-client")
+ if stack_version is not None:
+ falcon_home = '{0}/{1}/falcon'.format(stack_root, stack_version)
+
+ conf_dir = format("{stack_root}/current/{oozie_root}/conf")
+ hive_conf_dir = format("{conf_dir}/action-conf/hive")
+
+else:
+ oozie_lib_dir = "/var/lib/oozie"
+ oozie_setup_sh = "/usr/lib/oozie/bin/oozie-setup.sh"
+ oozie_webapps_dir = "/var/lib/oozie/oozie-server/webapps/"
+ oozie_webapps_conf_dir = "/var/lib/oozie/oozie-server/conf"
+ oozie_libext_dir = "/usr/lib/oozie/libext"
+ oozie_server_dir = "/var/lib/oozie/oozie-server"
+ oozie_shared_lib = "/usr/lib/oozie/share"
+ oozie_home = "/usr/lib/oozie"
+ oozie_bin_dir = "/usr/bin"
+ falcon_home = '/usr/lib/falcon'
+ conf_dir = "/etc/oozie/conf"
+ hive_conf_dir = "/etc/oozie/conf/action-conf/hive"
+ oozie_examples_regex = "/usr/share/doc/oozie-*"
+
+execute_path = oozie_bin_dir + os.pathsep + hadoop_bin_dir
+
+oozie_user = config['configurations']['oozie-env']['oozie_user']
+smokeuser = config['configurations']['cluster-env']['smokeuser']
+smokeuser_principal = config['configurations']['cluster-env']['smokeuser_principal_name']
+smoke_hdfs_user_mode = 0770
+service_check_queue_name = default('/configurations/yarn-env/service_check.queue.name', 'default')
+
+# This config actually contains {oozie_user}
+oozie_admin_users = format(config['configurations']['oozie-env']['oozie_admin_users'])
+
+user_group = config['configurations']['cluster-env']['user_group']
+jdk_location = config['hostLevelParams']['jdk_location']
+check_db_connection_jar_name = "DBConnectionVerification.jar"
+check_db_connection_jar = format("/usr/lib/ambari-agent/{check_db_connection_jar_name}")
+oozie_tmp_dir = default("configurations/oozie-env/oozie_tmp_dir", "/var/tmp/oozie")
+oozie_hdfs_user_dir = format("/user/{oozie_user}")
+oozie_pid_dir = status_params.oozie_pid_dir
+pid_file = status_params.pid_file
+hadoop_jar_location = "/usr/lib/hadoop/"
+java_share_dir = "/usr/share/java"
+java64_home = config['hostLevelParams']['java_home']
+java_exec = format("{java64_home}/bin/java")
+
+# This variable name is important, do not change
+ext_js_file = "ext-2.2.zip"
+
+# During a cross-stack migration, the source location will be different
+# This variable name is important, do not change
+ext_js_path = format("/usr/share/{stack_name_uppercase}-oozie/{ext_js_file}")
+
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+oozie_heapsize = config['configurations']['oozie-env']['oozie_heapsize']
+oozie_permsize = config['configurations']['oozie-env']['oozie_permsize']
+
+limits_conf_dir = "/etc/security/limits.d"
+
+oozie_user_nofile_limit = config['configurations']['oozie-env']['oozie_user_nofile_limit']
+oozie_user_nproc_limit = config['configurations']['oozie-env']['oozie_user_nproc_limit']
+
+kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+oozie_service_keytab = config['configurations']['oozie-site']['oozie.service.HadoopAccessorService.keytab.file']
+oozie_principal = config['configurations']['oozie-site']['oozie.service.HadoopAccessorService.kerberos.principal']
+http_principal = config['configurations']['oozie-site']['oozie.authentication.kerberos.principal']
+oozie_site = config['configurations']['oozie-site']
+# Need this for yarn.nodemanager.recovery.dir in yarn-site
+yarn_log_dir_prefix = config['configurations']['yarn-env']['yarn_log_dir_prefix']
+yarn_resourcemanager_address = config['configurations']['yarn-site']['yarn.resourcemanager.address']
+zk_namespace = default('/configurations/oozie-site/oozie.zookeeper.namespace', 'oozie')
+zk_connection_string = default('/configurations/oozie-site/oozie.zookeeper.connection.string', None)
+jaas_file = os.path.join(conf_dir, 'zkmigrator_jaas.conf')
+stack_supports_zk_security = check_stack_feature(StackFeature.SECURE_ZOOKEEPER, version_for_stack_feature_checks)
+
+credential_store_enabled = False
+if 'credentialStoreEnabled' in config:
+ credential_store_enabled = config['credentialStoreEnabled']
+
+if security_enabled:
+ oozie_site = dict(config['configurations']['oozie-site'])
+ oozie_principal_with_host = oozie_principal.replace('_HOST', hostname)
+
+ # If a user-supplied oozie.ha.authentication.kerberos.principal property exists in oozie-site,
+ # use it to replace the existing oozie.authentication.kerberos.principal value. This is to ensure
+ # that any special principal name needed for HA is used rather than the Ambari-generated value
+ if "oozie.ha.authentication.kerberos.principal" in oozie_site:
+ oozie_site['oozie.authentication.kerberos.principal'] = oozie_site['oozie.ha.authentication.kerberos.principal']
+ http_principal = oozie_site['oozie.authentication.kerberos.principal']
+
+ # If a user-supplied oozie.ha.authentication.kerberos.keytab property exists in oozie-site,
+ # use it to replace the existing oozie.authentication.kerberos.keytab value. This is to ensure
+ # that any special keytab file needed for HA is used rather than the Ambari-generated value
+ if "oozie.ha.authentication.kerberos.keytab" in oozie_site:
+ oozie_site['oozie.authentication.kerberos.keytab'] = oozie_site['oozie.ha.authentication.kerberos.keytab']
+
+ if stack_version_formatted and check_stack_feature(StackFeature.OOZIE_HOST_KERBEROS, stack_version_formatted):
+ #older versions of oozie have problems when using _HOST in principal
+ oozie_site['oozie.service.HadoopAccessorService.kerberos.principal'] = oozie_principal_with_host
+ oozie_site['oozie.authentication.kerberos.principal'] = http_principal.replace('_HOST', hostname)
+
+smokeuser_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
+oozie_keytab = default("/configurations/oozie-env/oozie_keytab", oozie_service_keytab)
+oozie_env_sh_template = config['configurations']['oozie-env']['content']
+
+oracle_driver_jar_name = "ojdbc6.jar"
+
+oozie_metastore_user_name = config['configurations']['oozie-site']['oozie.service.JPAService.jdbc.username']
+
+if credential_store_enabled:
+ if 'hadoop.security.credential.provider.path' in config['configurations']['oozie-site']:
+ cs_lib_path = config['configurations']['oozie-site']['credentialStoreClassPath']
+ java_home = config['hostLevelParams']['java_home']
+ alias = 'oozie.service.JPAService.jdbc.password'
+ provider_path = config['configurations']['oozie-site']['hadoop.security.credential.provider.path']
+ oozie_metastore_user_passwd = PasswordString(get_password_from_credential_store(alias, provider_path, cs_lib_path, java_home, jdk_location))
+ else:
+ raise Exception("hadoop.security.credential.provider.path property should be set")
+else:
+ oozie_metastore_user_passwd = default("/configurations/oozie-site/oozie.service.JPAService.jdbc.password","")
+
+oozie_jdbc_connection_url = default("/configurations/oozie-site/oozie.service.JPAService.jdbc.url", "")
+oozie_log_dir = config['configurations']['oozie-env']['oozie_log_dir']
+oozie_data_dir = config['configurations']['oozie-env']['oozie_data_dir']
+oozie_server_port = get_port_from_url(config['configurations']['oozie-site']['oozie.base.url'])
+oozie_server_admin_port = config['configurations']['oozie-env']['oozie_admin_port']
+if 'export OOZIE_HTTPS_PORT' in oozie_env_sh_template or 'oozie.https.port' in config['configurations']['oozie-site'] or 'oozie.https.keystore.file' in config['configurations']['oozie-site'] or 'oozie.https.keystore.pass' in config['configurations']['oozie-site']:
+ oozie_secure = '-secure'
+else:
+ oozie_secure = ''
+
+https_port = None
+# try to get https port form oozie-env content
+for line in oozie_env_sh_template.splitlines():
+ result = re.match(r"export\s+OOZIE_HTTPS_PORT=(\d+)", line)
+ if result is not None:
+ https_port = result.group(1)
+# or from oozie-site.xml
+if https_port is None and 'oozie.https.port' in config['configurations']['oozie-site']:
+ https_port = config['configurations']['oozie-site']['oozie.https.port']
+
+oozie_base_url = config['configurations']['oozie-site']['oozie.base.url']
+
+service_check_job_name = default("/configurations/oozie-env/service_check_job_name", "no-op")
+
+# construct proper url for https
+if https_port is not None:
+ parsed_url = urlparse(oozie_base_url)
+ oozie_base_url = oozie_base_url.replace(parsed_url.scheme, "https")
+ if parsed_url.port is None:
+ oozie_base_url.replace(parsed_url.hostname, ":".join([parsed_url.hostname, str(https_port)]))
+ else:
+ oozie_base_url = oozie_base_url.replace(str(parsed_url.port), str(https_port))
+
+oozie_setup_sh_current = oozie_setup_sh
+
+hdfs_site = config['configurations']['hdfs-site']
+fs_root = config['configurations']['core-site']['fs.defaultFS']
+
+if stack_version_formatted and check_stack_feature(StackFeature.OOZIE_SETUP_SHARED_LIB, stack_version_formatted):
+ put_shared_lib_to_hdfs_cmd = format("{oozie_setup_sh} sharelib create -fs {fs_root} -locallib {oozie_shared_lib}")
+ # for older
+else:
+ put_shared_lib_to_hdfs_cmd = format("hadoop --config {hadoop_conf_dir} dfs -put {oozie_shared_lib} {oozie_hdfs_user_dir}")
+
+default_connectors_map = { "com.microsoft.sqlserver.jdbc.SQLServerDriver":"sqljdbc4.jar",
+ "com.mysql.jdbc.Driver":"mysql-connector-java.jar",
+ "org.postgresql.Driver":"postgresql-jdbc.jar",
+ "oracle.jdbc.driver.OracleDriver":"ojdbc.jar",
+ "sap.jdbc4.sqlanywhere.IDriver":"sajdbc4.jar"}
+
+jdbc_driver_name = default("/configurations/oozie-site/oozie.service.JPAService.jdbc.driver", "")
+# NOT SURE THAT IT'S A GOOD IDEA TO USE PATH TO CLASS IN DRIVER, MAYBE IT WILL BE BETTER TO USE DB TYPE.
+# BECAUSE PATH TO CLASSES COULD BE CHANGED
+sqla_db_used = False
+previous_jdbc_jar_name = None
+if jdbc_driver_name == "com.microsoft.sqlserver.jdbc.SQLServerDriver":
+ jdbc_driver_jar = default("/hostLevelParams/custom_mssql_jdbc_name", None)
+ previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_mssql_jdbc_name", None)
+elif jdbc_driver_name == "com.mysql.jdbc.Driver":
+ jdbc_driver_jar = default("/hostLevelParams/custom_mysql_jdbc_name", None)
+ previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_mysql_jdbc_name", None)
+elif jdbc_driver_name == "org.postgresql.Driver":
+ jdbc_driver_jar = format("{oozie_home}/libserver/postgresql-9.0-801.jdbc4.jar") #oozie using it's own postgres jdbc
+ previous_jdbc_jar_name = None
+elif jdbc_driver_name == "oracle.jdbc.driver.OracleDriver":
+ jdbc_driver_jar = default("/hostLevelParams/custom_oracle_jdbc_name", None)
+ previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_oracle_jdbc_name", None)
+elif jdbc_driver_name == "sap.jdbc4.sqlanywhere.IDriver":
+ jdbc_driver_jar = default("/hostLevelParams/custom_sqlanywhere_jdbc_name", None)
+ previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_sqlanywhere_jdbc_name", None)
+ sqla_db_used = True
+else:
+ jdbc_driver_jar = ""
+ jdbc_symlink_name = ""
+ previous_jdbc_jar_name = None
+
+default("/hostLevelParams/custom_sqlanywhere_jdbc_name", None)
+driver_curl_source = format("{jdk_location}/{jdbc_driver_jar}")
+downloaded_custom_connector = format("{tmp_dir}/{jdbc_driver_jar}")
+if jdbc_driver_name == "org.postgresql.Driver":
+ target = jdbc_driver_jar
+ previous_jdbc_jar = None
+else:
+ target = format("{oozie_libext_dir}/{jdbc_driver_jar}")
+ previous_jdbc_jar = format("{oozie_libext_dir}/{previous_jdbc_jar_name}")
+
+#constants for type2 jdbc
+jdbc_libs_dir = format("{oozie_libext_dir}/native/lib64")
+lib_dir_available = os.path.exists(jdbc_libs_dir)
+
+if sqla_db_used:
+ jars_path_in_archive = format("{tmp_dir}/sqla-client-jdbc/java/*")
+ libs_path_in_archive = format("{tmp_dir}/sqla-client-jdbc/native/lib64/*")
+ downloaded_custom_connector = format("{tmp_dir}/{jdbc_driver_jar}")
+
+hdfs_share_dir = format("{oozie_hdfs_user_dir}/share")
+ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
+falcon_host = default("/clusterHostInfo/falcon_server_hosts", [])
+has_falcon_host = not len(falcon_host) == 0
+
+oozie_server_hostnames = default("/clusterHostInfo/oozie_server", [])
+oozie_server_hostnames = sorted(oozie_server_hostnames)
+
+oozie_log_maxhistory = default('configurations/oozie-log4j/oozie_log_maxhistory',720)
+
+#oozie-log4j.properties
+if (('oozie-log4j' in config['configurations']) and ('content' in config['configurations']['oozie-log4j'])):
+ log4j_props = config['configurations']['oozie-log4j']['content']
+else:
+ log4j_props = None
+
+oozie_hdfs_user_mode = 0775
+hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
+
+hdfs_site = config['configurations']['hdfs-site']
+default_fs = config['configurations']['core-site']['fs.defaultFS']
+
+dfs_type = default("/commandParams/dfs_type", "")
+
+
+########################################################
+############# Atlas related params #####################
+########################################################
+#region Atlas Hooks needed by Hive on Oozie
+hive_atlas_application_properties = default('/configurations/hive-atlas-application.properties', {})
+
+if has_atlas_in_cluster():
+ atlas_hook_filename = default('/configurations/atlas-env/metadata_conf_file', 'atlas-application.properties')
+#endregion
+
+import functools
+#create partial functions with common arguments for every HdfsResource call
+#to create/delete hdfs directory/file/copyfromlocal we need to call params.HdfsResource in code
+HdfsResource = functools.partial(
+ HdfsResource,
+ user=hdfs_user,
+ hdfs_resource_ignore_file = "/var/lib/ambari-agent/data/.hdfs_resource_ignore",
+ security_enabled = security_enabled,
+ keytab = hdfs_user_keytab,
+ kinit_path_local = kinit_path_local,
+ hadoop_bin_dir = hadoop_bin_dir,
+ hadoop_conf_dir = hadoop_conf_dir,
+ principal_name = hdfs_principal_name,
+ hdfs_site = hdfs_site,
+ default_fs = default_fs,
+ immutable_paths = get_not_managed_resources(),
+ dfs_type = dfs_type
+)
+
+is_webhdfs_enabled = config['configurations']['hdfs-site']['dfs.webhdfs.enabled']
+
+# The logic for LZO also exists in HDFS' params.py
+io_compression_codecs = default("/configurations/core-site/io.compression.codecs", None)
+lzo_enabled = io_compression_codecs is not None and "com.hadoop.compression.lzo" in io_compression_codecs.lower()
+
+all_lzo_packages = ["lzo", "hadoop-lzo", "hadoop-lzo-native"]
http://git-wip-us.apache.org/repos/asf/ambari/blob/df448220/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/OOZIE/package/scripts/params_windows.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/OOZIE/package/scripts/params_windows.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/OOZIE/package/scripts/params_windows.py
new file mode 100644
index 0000000..1f939d4
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/OOZIE/package/scripts/params_windows.py
@@ -0,0 +1,34 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management.libraries.script.script import Script
+import os
+from status_params import *
+
+config = Script.get_config()
+
+hadoop_user = config["configurations"]["cluster-env"]["hadoop.user.name"]
+stack_root = os.path.abspath(os.path.join(os.environ["HADOOP_HOME"], ".."))
+oozie_root = os.environ['OOZIE_ROOT']
+oozie_home = os.environ['OOZIE_HOME']
+oozie_conf_dir = os.path.join(oozie_home,'conf')
+oozie_user = hadoop_user
+oozie_tmp_dir = "c:\\hadoop\\temp\\oozie"
+
+oozie_env_cmd_template = config['configurations']['oozie-env']['content']
http://git-wip-us.apache.org/repos/asf/ambari/blob/df448220/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/OOZIE/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/OOZIE/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/OOZIE/package/scripts/service_check.py
new file mode 100644
index 0000000..03030ca
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/OOZIE/package/scripts/service_check.py
@@ -0,0 +1,133 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import os
+import glob
+
+from resource_management.core.resources.system import Execute
+from resource_management.core.resources import File
+from resource_management.core.source import StaticFile
+from resource_management.core.system import System
+from resource_management.libraries.functions import format
+from resource_management.libraries.script import Script
+from ambari_commons.os_family_impl import OsFamilyImpl
+from ambari_commons import OSConst
+
+from resource_management.core.logger import Logger
+
+class OozieServiceCheck(Script):
+ pass
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class OozieServiceCheckDefault(OozieServiceCheck):
+
+ def service_check(self, env):
+ import params
+ env.set_params(params)
+
+ # on HDP1 this file is different
+ prepare_hdfs_file_name = 'prepareOozieHdfsDirectories.sh'
+ smoke_test_file_name = 'oozieSmoke2.sh'
+
+ OozieServiceCheckDefault.oozie_smoke_shell_file(smoke_test_file_name, prepare_hdfs_file_name)
+
+ @staticmethod
+ def oozie_smoke_shell_file(file_name, prepare_hdfs_file_name):
+ import params
+
+ File(format("{tmp_dir}/{file_name}"),
+ content=StaticFile(file_name),
+ mode=0755
+ )
+ File(format("{tmp_dir}/{prepare_hdfs_file_name}"),
+ content=StaticFile(prepare_hdfs_file_name),
+ mode=0755
+ )
+
+ os_family = System.get_instance().os_family
+ oozie_examples_dir = glob.glob(params.oozie_examples_regex)[0]
+
+ Execute((format("{tmp_dir}/{prepare_hdfs_file_name}"), params.conf_dir, oozie_examples_dir, params.hadoop_conf_dir, params.yarn_resourcemanager_address, params.fs_root, params.service_check_queue_name, params.service_check_job_name),
+ tries=3,
+ try_sleep=5,
+ logoutput=True
+ )
+
+ params.HdfsResource(format("/user/{smokeuser}"),
+ type="directory",
+ action="create_on_execute",
+ owner=params.smokeuser,
+ mode=params.smoke_hdfs_user_mode,
+ )
+
+ examples_dir = format('/user/{smokeuser}/examples')
+ params.HdfsResource(examples_dir,
+ action = "delete_on_execute",
+ type = "directory"
+ )
+ params.HdfsResource(examples_dir,
+ action = "create_on_execute",
+ type = "directory",
+ source = format("{oozie_examples_dir}/examples"),
+ owner = params.smokeuser,
+ group = params.user_group
+ )
+
+ input_data_dir = format('/user/{smokeuser}/input-data')
+ params.HdfsResource(input_data_dir,
+ action = "delete_on_execute",
+ type = "directory"
+ )
+ params.HdfsResource(input_data_dir,
+ action = "create_on_execute",
+ type = "directory",
+ source = format("{oozie_examples_dir}/examples/input-data"),
+ owner = params.smokeuser,
+ group = params.user_group
+ )
+ params.HdfsResource(None, action="execute")
+
+ if params.security_enabled:
+ sh_cmd = format(
+ "{tmp_dir}/{file_name} {os_family} {oozie_lib_dir} {conf_dir} {oozie_bin_dir} {oozie_base_url} {oozie_examples_dir} {hadoop_conf_dir} {hadoop_bin_dir} {smokeuser} {service_check_job_name} {security_enabled} {smokeuser_keytab} {kinit_path_local} {smokeuser_principal}")
+ else:
+ sh_cmd = format(
+ "{tmp_dir}/{file_name} {os_family} {oozie_lib_dir} {conf_dir} {oozie_bin_dir} {oozie_base_url} {oozie_examples_dir} {hadoop_conf_dir} {hadoop_bin_dir} {smokeuser} {service_check_job_name} {security_enabled}")
+
+ Execute(sh_cmd,
+ path=params.execute_path,
+ tries=3,
+ try_sleep=5,
+ logoutput=True
+ )
+
+@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
+class OozieServiceCheckWindows(OozieServiceCheck):
+
+ def service_check(self, env):
+ import params
+
+ env.set_params(params)
+ smoke_cmd = os.path.join(params.stack_root, "Run-SmokeTests.cmd")
+ service = "OOZIE"
+ Execute(format("cmd /C {smoke_cmd} {service}"), logoutput=True)
+
+if __name__ == "__main__":
+ OozieServiceCheck().execute()
+
http://git-wip-us.apache.org/repos/asf/ambari/blob/df448220/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/OOZIE/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/OOZIE/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/OOZIE/package/scripts/status_params.py
new file mode 100644
index 0000000..ce990cf
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/OOZIE/package/scripts/status_params.py
@@ -0,0 +1,65 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from ambari_commons.os_check import OSCheck
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions.version import format_stack_version
+
+# a map of the Ambari role to the component name
+# for use with <stack-root>/current/<component>
+SERVER_ROLE_DIRECTORY_MAP = {
+ 'OOZIE_SERVER' : 'oozie-server',
+ 'OOZIE_CLIENT' : 'oozie-client',
+ 'OOZIE_SERVICE_CHECK' : 'oozie-client',
+ 'ru_execute_tasks' : 'oozie-server'
+}
+
+component_directory = Script.get_component_from_role(SERVER_ROLE_DIRECTORY_MAP, "OOZIE_CLIENT")
+
+config = Script.get_config()
+stack_root = Script.get_stack_root()
+
+stack_version_unformatted = config['hostLevelParams']['stack_version']
+stack_version_formatted = format_stack_version(stack_version_unformatted)
+
+if OSCheck.is_windows_family():
+ # windows service mapping
+ oozie_server_win_service_name = "oozieservice"
+else:
+ oozie_pid_dir = config['configurations']['oozie-env']['oozie_pid_dir']
+ pid_file = format("{oozie_pid_dir}/oozie.pid")
+
+ security_enabled = config['configurations']['cluster-env']['security_enabled']
+ kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+
+ conf_dir = "/etc/oozie/conf"
+ if stack_version_formatted and check_stack_feature(StackFeature.ROLLING_UPGRADE, stack_version_formatted):
+ conf_dir = format("{stack_root}/current/{component_directory}/conf")
+
+ tmp_dir = Script.get_tmp_dir()
+ oozie_user = config['configurations']['oozie-env']['oozie_user']
+ hostname = config["hostname"]
+
+stack_name = default("/hostLevelParams/stack_name", None)
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/ambari/blob/df448220/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/OOZIE/package/templates/adminusers.txt.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/OOZIE/package/templates/adminusers.txt.j2 b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/OOZIE/package/templates/adminusers.txt.j2
new file mode 100644
index 0000000..2a0f7b2
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/OOZIE/package/templates/adminusers.txt.j2
@@ -0,0 +1,28 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Users should be set using following rules:
+#
+# One user name per line
+# Empty lines and lines starting with '#' are ignored
+
+{% if oozie_admin_users %}
+{% for oozie_admin_user in oozie_admin_users.split(',') %}
+{{oozie_admin_user|trim}}
+{% endfor %}
+{% endif %}
http://git-wip-us.apache.org/repos/asf/ambari/blob/df448220/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/OOZIE/package/templates/oozie-log4j.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/OOZIE/package/templates/oozie-log4j.properties.j2 b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/OOZIE/package/templates/oozie-log4j.properties.j2
new file mode 100644
index 0000000..e39428f
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/OOZIE/package/templates/oozie-log4j.properties.j2
@@ -0,0 +1,93 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License. See accompanying LICENSE file.
+#
+
+# If the Java System property 'oozie.log.dir' is not defined at Oozie start up time
+# XLogService sets its value to '${oozie.home}/logs'
+
+log4j.appender.oozie=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.oozie.File=${oozie.log.dir}/oozie.log
+log4j.appender.oozie.Append=true
+log4j.appender.oozie.layout=org.apache.log4j.PatternLayout
+log4j.appender.oozie.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - SERVER[${oozie.instance.id}] %m%n
+log4j.appender.oozie.RollingPolicy.FileNamePattern=${log4j.appender.oozie.File}-%d{yyyy-MM-dd}
+log4j.appender.oozie.DatePattern='.'yyyy-MM-dd
+
+log4j.appender.oozieops=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.oozieops.DatePattern='.'yyyy-MM-dd
+log4j.appender.oozieops.File=${oozie.log.dir}/oozie-ops.log
+log4j.appender.oozieops.Append=true
+log4j.appender.oozieops.layout=org.apache.log4j.PatternLayout
+log4j.appender.oozieops.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
+
+log4j.appender.oozieinstrumentation=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.oozieinstrumentation.DatePattern='.'yyyy-MM-dd
+log4j.appender.oozieinstrumentation.File=${oozie.log.dir}/oozie-instrumentation.log
+log4j.appender.oozieinstrumentation.Append=true
+log4j.appender.oozieinstrumentation.layout=org.apache.log4j.PatternLayout
+log4j.appender.oozieinstrumentation.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
+
+log4j.appender.oozieaudit=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.oozieaudit.DatePattern='.'yyyy-MM-dd
+log4j.appender.oozieaudit.File=${oozie.log.dir}/oozie-audit.log
+log4j.appender.oozieaudit.Append=true
+log4j.appender.oozieaudit.layout=org.apache.log4j.PatternLayout
+log4j.appender.oozieaudit.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
+
+log4j.appender.openjpa=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.openjpa.DatePattern='.'yyyy-MM-dd
+log4j.appender.openjpa.File=${oozie.log.dir}/oozie-jpa.log
+log4j.appender.openjpa.Append=true
+log4j.appender.openjpa.layout=org.apache.log4j.PatternLayout
+log4j.appender.openjpa.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
+
+log4j.logger.openjpa=INFO, openjpa
+log4j.logger.oozieops=INFO, oozieops
+log4j.logger.oozieinstrumentation=ALL, oozieinstrumentation
+log4j.logger.oozieaudit=ALL, oozieaudit
+log4j.logger.org.apache.oozie=INFO, oozie
+log4j.logger.org.apache.hadoop=WARN, oozie
+log4j.logger.org.mortbay=WARN, oozie
+log4j.logger.org.hsqldb=WARN, oozie
+log4j.logger.org.apache.hadoop.security.authentication.server=INFO, oozie
http://git-wip-us.apache.org/repos/asf/ambari/blob/df448220/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/OOZIE/package/templates/oozie.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/OOZIE/package/templates/oozie.conf.j2 b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/OOZIE/package/templates/oozie.conf.j2
new file mode 100644
index 0000000..1f99e49
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/OOZIE/package/templates/oozie.conf.j2
@@ -0,0 +1,35 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+{{oozie_user}} - nofile {{oozie_user_nofile_limit}}
+{{oozie_user}} - nproc {{oozie_user_nproc_limit}}
http://git-wip-us.apache.org/repos/asf/ambari/blob/df448220/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/OOZIE/package/templates/zkmigrator_jaas.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/OOZIE/package/templates/zkmigrator_jaas.conf.j2 b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/OOZIE/package/templates/zkmigrator_jaas.conf.j2
new file mode 100644
index 0000000..fbc0ce5
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/OOZIE/package/templates/zkmigrator_jaas.conf.j2
@@ -0,0 +1,26 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+Client {
+ com.sun.security.auth.module.Krb5LoginModule required
+ useKeyTab=true
+ storeKey=true
+ useTicketCache=false
+ keyTab="{{oozie_keytab}}"
+ principal="{{oozie_principal_with_host}}";
+};
\ No newline at end of file
[47/50] [abbrv] ambari git commit: AMBARI-21829. Sqoop: hive import
fails with exception 'java.io.IOException:java.lang.RuntimeException:
native-lzo library not available' for LZO compression codec (aonishuk)
Posted by ja...@apache.org.
AMBARI-21829. Sqoop: hive import fails with exception 'java.io.IOException:java.lang.RuntimeException: native-lzo library not available' for LZO compression codec (aonishuk)
Change-Id: I52a315c56a61422e4c67f3bbf57b657e3b8a11ef
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/e07e92d8
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/e07e92d8
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/e07e92d8
Branch: refs/heads/2.5-maint
Commit: e07e92d8d2027a94cf673836f53143209acd7fcd
Parents: 82a821e
Author: Andrew Onishuk <ao...@hortonworks.com>
Authored: Mon Aug 28 18:49:46 2017 +0300
Committer: Andrew Onishuk <ao...@hortonworks.com>
Committed: Mon Aug 28 18:49:46 2017 +0300
----------------------------------------------------------------------
.../stacks/BigInsights/4.2.5/upgrades/config-upgrade.xml | 3 ++-
.../resources/stacks/BigInsights/4.2/upgrades/config-upgrade.xml | 3 ++-
2 files changed, 4 insertions(+), 2 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/e07e92d8/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/config-upgrade.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/config-upgrade.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/config-upgrade.xml
index f90a20e..52c2c70 100644
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/config-upgrade.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/config-upgrade.xml
@@ -37,7 +37,7 @@
<changes>
<definition xsi:type="configure" id="biginsights_4_2_namenode_update_hadoop_env" summary="Update Hadoop env">
<type>hadoop-env</type>
- <replace key="content" find="export JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/iop/current/hadoop-client/lib/native/Linux-amd64-64" replace-with="export JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/hdp/current/hadoop-client/lib/native" />
+ <replace key="content" find="export JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/iop/current/hadoop-client/lib/native/Linux-amd64-64" replace-with="export JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:{{hadoop_lib_home}}/native/Linux-{{architecture}}-64" />
<replace key="content" find="export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}" replace-with="export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS} if [ -d "/usr/lib/hadoop-lzo" ]; then export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:/usr/lib/hadoop-lzo/lib/* export JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/lib/hadoop-lzo/lib/native fi"/>
<insert key="content" value="{% if hadoop_custom_extensions_enabled %} export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:{{stack_root}}/current/ext/hadoop/* {% endif %}" insert-type="append" newline-after="true" newline-before="true"/>
</definition>
@@ -108,6 +108,7 @@
<definition xsi:type="configure" id="biginsights_4_2_mapreduce_application_framework_patch" summary="Update MapReduce2 configurations">
<type>mapred-site</type>
<set key="mapreduce.application.framework.path" value="/hdp/apps/${hdp.version}/mapreduce/mapreduce.tar.gz#mr-framework"/>
+ <set key="yarn.app.mapreduce.am.env" value="LD_LIBRARY_PATH=/usr/hdp/${hdp.version}/hadoop/lib/native:/usr/hdp/${hdp.version}/hadoop/lib/native/Linux-{{architecture}}-64"/>
</definition>
</changes>
</component>
http://git-wip-us.apache.org/repos/asf/ambari/blob/e07e92d8/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/config-upgrade.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/config-upgrade.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/config-upgrade.xml
index cdc9c40..f767a02 100644
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/config-upgrade.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/config-upgrade.xml
@@ -125,7 +125,7 @@
<changes>
<definition xsi:type="configure" id="biginsights_4_2_namenode_update_hadoop_env" summary="Update Hadoop env">
<type>hadoop-env</type>
- <replace key="content" find="export JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/iop/current/hadoop-client/lib/native/Linux-amd64-64" replace-with="export JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/hdp/current/hadoop-client/lib/native" />
+ <replace key="content" find="export JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/iop/current/hadoop-client/lib/native/Linux-amd64-64" replace-with="export JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:{{hadoop_lib_home}}/native/Linux-{{architecture}}-64" />
<replace key="content" find="export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}" replace-with="export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS} if [ -d "/usr/lib/hadoop-lzo" ]; then export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:/usr/lib/hadoop-lzo/lib/* export JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/lib/hadoop-lzo/lib/native fi"/>
<insert key="content" value="{% if hadoop_custom_extensions_enabled %} export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:{{stack_root}}/current/ext/hadoop/* {% endif %}" insert-type="append" newline-after="true" newline-before="true"/>
</definition>
@@ -197,6 +197,7 @@
<definition xsi:type="configure" id="biginsights_4_2_mapreduce_application_framework_patch" summary="Update MapReduce2 configurations">
<type>mapred-site</type>
<set key="mapreduce.application.framework.path" value="/hdp/apps/${hdp.version}/mapreduce/mapreduce.tar.gz#mr-framework"/>
+ <set key="yarn.app.mapreduce.am.env" value="LD_LIBRARY_PATH=/usr/hdp/${hdp.version}/hadoop/lib/native:/usr/hdp/${hdp.version}/hadoop/lib/native/Linux-{{architecture}}-64"/>
<insert key="mapreduce.application.classpath" insert-type="append" value=":/usr/hdp/current/ext/hadoop/*" newline-before="false" newline-after="false"/>
</definition>
</changes>
[07/50] [abbrv] ambari git commit: AMBARI-21690 Updating
get_stack_version pattern match (mugdha)
Posted by ja...@apache.org.
AMBARI-21690 Updating get_stack_version pattern match (mugdha)
(cherry picked from commit ee87b6ab60edc43ef94a404ae2ab72da42f3549f)
Change-Id: I258779ad329a423cf43dbdbf9ff340697b6a10da
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/b79e01b8
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/b79e01b8
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/b79e01b8
Branch: refs/heads/2.5-maint
Commit: b79e01b807801873968916f03d164bfcc68b7007
Parents: 591b406
Author: Mugdha Varadkar <mu...@apache.org>
Authored: Thu Aug 10 11:57:28 2017 +0530
Committer: Zuul <re...@hortonworks.com>
Committed: Thu Aug 10 22:28:53 2017 -0700
----------------------------------------------------------------------
----------------------------------------------------------------------
[21/50] [abbrv] ambari git commit: AMBARI-21743 Delete
ranger-solr-plugin-enabled property during Cross Stack Upgrade (mugdha)
Posted by ja...@apache.org.
AMBARI-21743 Delete ranger-solr-plugin-enabled property during Cross Stack Upgrade (mugdha)
(cherry picked from commit dae9c5d533b71bdfdd9652a9257fa98657e45cde)
Change-Id: I4394e08942c01942eccaedd487eb8fd7d96ad5dd
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/0c976252
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/0c976252
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/0c976252
Branch: refs/heads/2.5-maint
Commit: 0c97625222dd3bc3e21fcf04cfce3f77e6d48ef5
Parents: 5e1bfb7
Author: Mugdha Varadkar <mu...@apache.org>
Authored: Thu Aug 17 17:06:39 2017 +0530
Committer: Zuul <re...@hortonworks.com>
Committed: Thu Aug 17 14:14:57 2017 -0700
----------------------------------------------------------------------
.../stacks/BigInsights/4.2.5/upgrades/config-upgrade.xml | 4 ++++
.../BigInsights/4.2.5/upgrades/nonrolling-upgrade-to-hdp-2.6.xml | 4 ++++
2 files changed, 8 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/0c976252/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/config-upgrade.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/config-upgrade.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/config-upgrade.xml
index 6ed6a11..f55f9fb 100644
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/config-upgrade.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/config-upgrade.xml
@@ -156,6 +156,10 @@
<type>ranger-env</type>
<transfer operation="delete" delete-key="bind_anonymous" />
</definition>
+ <definition xsi:type="configure" id="hdp_2_6_0_0_remove_ranger_solr_plugin_enabled_property">
+ <type>ranger-env</type>
+ <transfer operation="delete" delete-key="ranger-solr-plugin-enabled" if-type="ranger-env" if-key="ranger-solr-plugin-enabled" if-key-state="present" />
+ </definition>
<definition xsi:type="configure" id="admin_log4j_parameterize" summary="Parameterizing Ranger Log4J Properties">
<type>admin-log4j</type>
<set key="ranger_xa_log_maxfilesize" value="256"/>
http://git-wip-us.apache.org/repos/asf/ambari/blob/0c976252/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/nonrolling-upgrade-to-hdp-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/nonrolling-upgrade-to-hdp-2.6.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/nonrolling-upgrade-to-hdp-2.6.xml
index e5f3690..83968e0 100644
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/nonrolling-upgrade-to-hdp-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/nonrolling-upgrade-to-hdp-2.6.xml
@@ -294,6 +294,10 @@
<task xsi:type="configure" id="hdp_2_6_0_0_remove_bind_anonymous"/>
</execute-stage>
+ <execute-stage service="RANGER" component="RANGER_ADMIN" title="Apply config changes for Ranger Admin">
+ <task xsi:type="configure" id="hdp_2_6_0_0_remove_ranger_solr_plugin_enabled_property"/>
+ </execute-stage>
+
<execute-stage service="RANGER" component="RANGER_ADMIN" title="Parameterizing Ranger Admin Log4J Properties">
<task xsi:type="configure" id="admin_log4j_parameterize">
<summary>Updating the Ranger admin Log4J properties to include parameterizations</summary>
[27/50] [abbrv] ambari git commit: AMBARI-21762 Sometimes alerts
order is not correctly arranged if ordering by Status. (atkach)
Posted by ja...@apache.org.
AMBARI-21762 Sometimes alerts order is not correctly arranged if ordering by Status. (atkach)
(cherry picked from commit ba1fa0bfbcfccc1d182ce9f5d5928757c0fc2ea5)
Change-Id: I0f9a220fde73b5e466366d45fe7b9ccaa4e30502
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/f146158e
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/f146158e
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/f146158e
Branch: refs/heads/2.5-maint
Commit: f146158e85cc817edd5b1beb1a7a3561e72f98d2
Parents: 5a80b52
Author: Andrii Tkach <at...@apache.org>
Authored: Mon Aug 21 16:11:44 2017 +0300
Committer: atkach@apache.org <je...@hortonworks.com>
Committed: Mon Aug 21 13:21:18 2017 +0000
----------------------------------------------------------------------
ambari-web/app/views/common/sort_view.js | 26 ++++++++++-
.../app/views/main/alert_definitions_view.js | 9 ++--
ambari-web/test/views/common/sort_view_test.js | 49 +++++++++++++++++++-
3 files changed, 78 insertions(+), 6 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/f146158e/ambari-web/app/views/common/sort_view.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/common/sort_view.js b/ambari-web/app/views/common/sort_view.js
index 0fc1db7..290a12f 100644
--- a/ambari-web/app/views/common/sort_view.js
+++ b/ambari-web/app/views/common/sort_view.js
@@ -30,6 +30,8 @@ var App = require('app');
var wrapperView = Em.View.extend({
tagName: 'tr',
+ name: 'SortWrapperView',
+
classNames: ['sort-wrapper'],
willInsertElement: function () {
@@ -87,9 +89,10 @@ var wrapperView = Em.View.extend({
* @param property {object}
* @param order {Boolean} true - DESC, false - ASC
* @param returnSorted {Boolean}
+ * @param content {Array}
*/
- sort: function (property, order, returnSorted) {
- var content = this.get('content').toArray();
+ sort: function (property, order, returnSorted, content) {
+ content = content || this.get('content').toArray();
var sortFunc = this.getSortFunc(property, order);
var status = order ? 'sorting_desc' : 'sorting_asc';
@@ -122,6 +125,25 @@ var wrapperView = Em.View.extend({
}.observes('controller.contentUpdater'),
/**
+ *
+ * @param {Em.Object[]} content
+ * @returns {Em.Object[]}
+ */
+ getSortedContent: function(content) {
+ if (!this.get('isSorting') && content.get('length')) {
+ var activeSortViews = this.get('childViews').rejectProperty('status', 'sorting');
+ if (activeSortViews[0]) {
+ var status = activeSortViews[0].get('status');
+ this.set('isSorting', true);
+ content = this.sort(activeSortViews[0], status === 'sorting_desc', true, content);
+ this.set('isSorting', false);
+ activeSortViews[0].set('status', status);
+ }
+ }
+ return content;
+ },
+
+ /**
* reset all sorts fields
*/
resetSort: function () {
http://git-wip-us.apache.org/repos/asf/ambari/blob/f146158e/ambari-web/app/views/main/alert_definitions_view.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/main/alert_definitions_view.js b/ambari-web/app/views/main/alert_definitions_view.js
index 9fb517f..ec52075 100644
--- a/ambari-web/app/views/main/alert_definitions_view.js
+++ b/ambari-web/app/views/main/alert_definitions_view.js
@@ -28,11 +28,14 @@ App.MainAlertDefinitionsView = App.TableView.extend({
contentObs: function () {
Em.run.once(this, this.contentObsOnce);
- }.observes('controller.content.[]', 'App.router.clusterController.isAlertsLoaded'),
+ }.observes('controller.content.@each.summary', 'App.router.clusterController.isAlertsLoaded'),
contentObsOnce: function() {
var content = this.get('controller.content') && App.get('router.clusterController.isAlertsLoaded') ?
- this.get('controller.content').toArray().sort(App.AlertDefinition.getSortDefinitionsByStatus(true)) : [];
+ this.get('controller.content').toArray() : [];
+ if (this.get('childViews').someProperty('name', 'SortWrapperView')) {
+ content = this.get('childViews').findProperty('name', 'SortWrapperView').getSortedContent(content);
+ }
this.set('content', content);
},
@@ -46,7 +49,7 @@ App.MainAlertDefinitionsView = App.TableView.extend({
if (savedSortConditions.everyProperty('status', 'sorting')) {
savedSortConditions.push({
name: "summary",
- status: "sorting_asc"
+ status: "sorting_desc"
});
App.db.setSortingStatuses(controllerName, savedSortConditions);
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/f146158e/ambari-web/test/views/common/sort_view_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/views/common/sort_view_test.js b/ambari-web/test/views/common/sort_view_test.js
index a21a352..9d095ec 100644
--- a/ambari-web/test/views/common/sort_view_test.js
+++ b/ambari-web/test/views/common/sort_view_test.js
@@ -182,6 +182,53 @@ describe('#wrapperView', function () {
});
})
- })
+ });
+
+ describe('#getSortedContent', function() {
+ var wrapperView;
+ var content = [
+ Em.Object.create({
+ id: 1
+ }),
+ Em.Object.create({
+ id: 2
+ })
+ ];
+
+ beforeEach(function() {
+ wrapperView = sort.wrapperView.create({
+ childViews: [],
+ isSorting: false
+ });
+ sinon.stub(wrapperView, 'sort', function(arg1, arg2, arg3, arg4) {
+ return arg4.reverse();
+ });
+ });
+ afterEach(function() {
+ wrapperView.sort.restore();
+ });
+
+ it('should return content without sorting', function() {
+ expect(wrapperView.getSortedContent(content)).to.be.eql(content);
+ expect(wrapperView.sort.called).to.be.false;
+ });
+
+ it('should return content with sorting', function() {
+ wrapperView.set('childViews', [
+ Em.Object.create({
+ status: 'sorting_desc'
+ })
+ ]);
+ expect(wrapperView.getSortedContent(content)).to.be.eql(content.reverse());
+ expect(wrapperView.sort.calledWith(
+ Em.Object.create({
+ status: 'sorting_desc'
+ }),
+ true,
+ true,
+ content
+ )).to.be.true;
+ });
+ });
});
\ No newline at end of file
[12/50] [abbrv] ambari git commit: Revert "AMBARI-21045.
AMBARI-21045. Enable Storm's AutoTGT configs in secure mode (Sriharsha
Chintalapani via smohanty)"
Posted by ja...@apache.org.
Revert "AMBARI-21045. AMBARI-21045. Enable Storm's AutoTGT configs in secure mode (Sriharsha Chintalapani via smohanty)"
This reverts commit 3d07ec5a091de3735e63c5500c380612e6533baa.
(cherry picked from commit 96e5ea566037e7874e815000966495145974f69c)
Change-Id: I0420808b1238dcf0504a36fe9b961241c52794d6
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/5ec0382b
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/5ec0382b
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/5ec0382b
Branch: refs/heads/2.5-maint
Commit: 5ec0382b222e94235381f0e99ea57764d62e825a
Parents: 55fbea5
Author: Sumit Mohanty <sm...@hortonworks.com>
Authored: Tue Aug 15 16:58:05 2017 -0700
Committer: smohanty@hortonworks.com <je...@hortonworks.com>
Committed: Wed Aug 16 00:11:11 2017 +0000
----------------------------------------------------------------------
.../server/upgrade/UpgradeCatalog251.java | 36 --------
.../STORM/1.1.0/configuration/storm-env.xml | 54 ------------
.../STORM/1.1.0/configuration/storm-site.xml | 48 +++++++++++
.../stacks/HDP/2.3/upgrades/config-upgrade.xml | 7 --
.../HDP/2.3/upgrades/nonrolling-upgrade-2.6.xml | 4 -
.../stacks/HDP/2.3/upgrades/upgrade-2.6.xml | 1 -
.../stacks/HDP/2.4/upgrades/config-upgrade.xml | 7 --
.../HDP/2.4/upgrades/nonrolling-upgrade-2.6.xml | 4 -
.../stacks/HDP/2.4/upgrades/upgrade-2.6.xml | 2 -
.../stacks/HDP/2.5/upgrades/config-upgrade.xml | 7 --
.../HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml | 4 -
.../stacks/HDP/2.5/upgrades/upgrade-2.6.xml | 1 -
.../stacks/HDP/2.6/services/stack_advisor.py | 87 +-------------------
.../stacks/HDP/2.6/upgrades/config-upgrade.xml | 6 --
.../HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml | 4 -
.../stacks/HDP/2.6/upgrades/upgrade-2.6.xml | 1 -
.../src/main/resources/stacks/stack_advisor.py | 18 ----
17 files changed, 51 insertions(+), 240 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/5ec0382b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog251.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog251.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog251.java
index 1a854f8..afda1f6 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog251.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog251.java
@@ -170,40 +170,4 @@ public class UpgradeCatalog251 extends AbstractUpgradeCatalog {
}
}
}
-
- /**
- * Make sure storm-env changes are applied to anyone upgrading to HDP-2.6.1 Storm
- * If the base version was before Ambari 2.5.0, this method should wind up doing nothing.
- * @throws AmbariException
- */
- protected void updateSTORMConfigs() throws AmbariException {
- AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
- Clusters clusters = ambariManagementController.getClusters();
- if (clusters != null) {
- Map<String, Cluster> clusterMap = getCheckedClusterMap(clusters);
- if (clusterMap != null && !clusterMap.isEmpty()) {
- for (final Cluster cluster : clusterMap.values()) {
- Set<String> installedServices = cluster.getServices().keySet();
-
- if (installedServices.contains("STORM") && cluster.getSecurityType() == SecurityType.KERBEROS) {
- Config stormEnv = cluster.getDesiredConfigByType(STORM_ENV_CONFIG);
- String content = stormEnv.getProperties().get("content");
- if (content != null && !content.contains("STORM_AUTOCREDS_LIB_DIR")) {
- Map<String, String> newProperties = new HashMap<>();
- String stormEnvConfigs = "\n #set storm-auto creds \n" +
- "# check if storm_jaas.conf in config , only enable storm_auto_creds in secure mode.\n " +
- "STORM_JAAS_CONF=$STORM_HOME/conf/storm_jaas.conf \n" +
- "STORM_AUTOCREDS_LIB_DIR=$STORM_HOME/external/storm-autocreds \n" +
- "if [ -f $STORM_JAAS_CONF ] && [ -d $STORM_AUTOCREDS_LIB_DIR ]; then \n" +
- " export STORM_EXT_CLASSPATH=$STORM_AUTOCREDS_LIB_DIR \n" +
- "fi\n";
- content += stormEnvConfigs;
- newProperties.put("content", content);
- updateConfigurationPropertiesForCluster(cluster, "storm-env", newProperties, true, false);
- }
- }
- }
- }
- }
- }
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/5ec0382b/ambari-server/src/main/resources/common-services/STORM/1.1.0/configuration/storm-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.1.0/configuration/storm-env.xml b/ambari-server/src/main/resources/common-services/STORM/1.1.0/configuration/storm-env.xml
deleted file mode 100644
index 2ce560d..0000000
--- a/ambari-server/src/main/resources/common-services/STORM/1.1.0/configuration/storm-env.xml
+++ /dev/null
@@ -1,54 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration supports_adding_forbidden="true">
- <!-- storm-env.sh -->
- <property>
- <name>content</name>
- <display-name>storm-env template</display-name>
- <description>This is the jinja template for storm-env.sh file</description>
- <value>
-#!/bin/bash
-
-# Set Storm specific environment variables here.
-
-# The java implementation to use.
-export JAVA_HOME={{java64_home}}
-
-export STORM_CONF_DIR={{conf_dir}}
-export STORM_HOME={{storm_component_home_dir}}
-export STORM_JAR_JVM_OPTS={{jar_jvm_opts}}
-
-#set storm-auto creds
-# check if storm_jaas.conf in config , only enable storm_auto_creds in secure mode.
-STORM_JAAS_CONF=$STORM_HOME/conf/storm_jaas.conf
-STORM_AUTOCREDS_LIB_DIR=$STORM_HOME/external/storm-autocreds
-
-if [ -f $STORM_JAAS_CONF ] && [ -d $STORM_AUTOCREDS_LIB_DIR ]; then
- export STORM_EXT_CLASSPATH=$STORM_AUTOCREDS_LIB_DIR
-fi
- </value>
- <value-attributes>
- <type>content</type>
- </value-attributes>
- <on-ambari-upgrade add="true"/>
- </property>
-</configuration>
http://git-wip-us.apache.org/repos/asf/ambari/blob/5ec0382b/ambari-server/src/main/resources/common-services/STORM/1.1.0/configuration/storm-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.1.0/configuration/storm-site.xml b/ambari-server/src/main/resources/common-services/STORM/1.1.0/configuration/storm-site.xml
new file mode 100644
index 0000000..b2e9acb
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.1.0/configuration/storm-site.xml
@@ -0,0 +1,48 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="true">
+ <property>
+ <name>nimbus.impersonation.acl</name>
+ <description>
+ The ImpersonationAuthorizer uses nimbus.impersonation.acl as the acl to authorize users. Following is a sample nimbus config for supporting impersonation:
+ nimbus.impersonation.acl:
+ impersonating_user1:
+ hosts:
+ [comma separated list of hosts from which impersonating_user1 is allowed to impersonate other users]
+ groups:
+ [comma separated list of groups whose users impersonating_user1 is allowed to impersonate]
+ impersonating_user2:
+ hosts:
+ [comma separated list of hosts from which impersonating_user2 is allowed to impersonate other users]
+ groups:
+ [comma separated list of groups whose users impersonating_user2 is allowed to impersonate]
+ </description>
+ <!-- The depends-on section is new in this version, required so Stack Advisor can include streamline-env and streamline-common configs. -->
+ <depends-on>
+ <property>
+ <type>streamline-env</type>
+ <name>streamline_principal_name</name>
+ </property>
+ </depends-on>
+ <on-ambari-upgrade add="false"/>
+ </property>
+</configuration>
http://git-wip-us.apache.org/repos/asf/ambari/blob/5ec0382b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/config-upgrade.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/config-upgrade.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/config-upgrade.xml
index 0c4106e..d8e2d7d 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/config-upgrade.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/config-upgrade.xml
@@ -664,13 +664,6 @@
replace-with="${sys:workers.artifacts}/${sys:storm.id}/${sys:worker.port}/${sys:logfile.name}"/>
</definition>
- <definition xsi:type="configure" id="storm_nimbus_autocred_config" summary="Update Storm's Nimbus AutoCred config">
- <type>storm-site</type>
- <set key="nimbus.autocredential.plugins.classes" value="['org.apache.storm.hdfs.security.AutoHDFS', 'org.apache.storm.hbase.security.AutoHBase', 'org.apache.storm.hive.security.AutoHive']" if-type="streamline-common" if-key="authorizer.class.name" if-key-state="present"/>
- <set key="nimbus.credential.renewers.classes" value="['org.apache.storm.hdfs.security.AutoHDFS', 'org.apache.storm.hbase.security.AutoHBase', 'org.apache.storm.hive.security.AutoHive']" if-type="streamline-common" if-key="authorizer.class.name" if-key-state="present"/>
- <set key="nimbus.credential.renewers.freq.secs" value="82800" if-type="streamline-common" if-key="authorizer.class.name" if-key-state="present"/>
- </definition>
-
</changes>
</component>
</service>
http://git-wip-us.apache.org/repos/asf/ambari/blob/5ec0382b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.6.xml
index c45c7c5..ca3c193 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.6.xml
@@ -638,10 +638,6 @@
<task xsi:type="configure" id="hdp_2_5_0_0_upgrade_storm_1.0"/>
</execute-stage>
- <execute-stage service="STORM" component="NIMBUS" title="Apply config changes for Storm AutoCreds">
- <task xsi:type="configure" id="storm_nimbus_autocred_config"/>
- </execute-stage>
-
<execute-stage service="STORM" component="NIMBUS" title="Apply config changes for Storm">
<!-- Remove Atlas configs that were incorrectly added to storm-site instead of Atlas' application.properties. -->
<task xsi:type="configure" id="hdp_2_5_0_0_remove_storm_atlas_configs"/>
http://git-wip-us.apache.org/repos/asf/ambari/blob/5ec0382b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.6.xml
index 69be4dc..cf2c257 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.6.xml
@@ -1130,7 +1130,6 @@
<task xsi:type="configure" id="storm_worker_log4j_parameterize" />
<task xsi:type="configure" id="storm_cluster_log4j_parameterize" />
<task xsi:type="configure" id="storm_worker_log4j_directory" />
- <task xsi:type="configure" id="storm_nimbus_autocred_config" />
</pre-upgrade>
<pre-downgrade>
http://git-wip-us.apache.org/repos/asf/ambari/blob/5ec0382b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/config-upgrade.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/config-upgrade.xml b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/config-upgrade.xml
index 3e62fb1..83a0a1e 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/config-upgrade.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/config-upgrade.xml
@@ -483,13 +483,6 @@
<replace key="content" find="${sys:storm.log.dir}/${sys:logfile.name}"
replace-with="${sys:workers.artifacts}/${sys:storm.id}/${sys:worker.port}/${sys:logfile.name}"/>
</definition>
- <definition xsi:type="configure" id="storm_nimbus_autocred_config" summary="Update Storm's Nimbus AutoCred config">
- <type>storm-site</type>
- <set key="nimbus.autocredential.plugins.classes" value="['org.apache.storm.hdfs.security.AutoHDFS', 'org.apache.storm.hbase.security.AutoHBase', 'org.apache.storm.hive.security.AutoHive']" if-type="streamline-common" if-key="authorizer.class.name" if-key-state="present"/>
- <set key="nimbus.credential.renewers.classes" value="['org.apache.storm.hdfs.security.AutoHDFS', 'org.apache.storm.hbase.security.AutoHBase', 'org.apache.storm.hive.security.AutoHive']" if-type="streamline-common" if-key="authorizer.class.name" if-key-state="present"/>
- <set key="nimbus.credential.renewers.freq.secs" value="82800" if-type="streamline-common" if-key="authorizer.class.name" if-key-state="present"/>
- </definition>
-
</changes>
</component>
</service>
http://git-wip-us.apache.org/repos/asf/ambari/blob/5ec0382b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.6.xml
index b6f48fc..6e899ee 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.6.xml
@@ -563,10 +563,6 @@
<task xsi:type="configure" id="hdp_2_5_0_0_remove_storm_atlas_configs"/>
</execute-stage>
- <execute-stage service="STORM" component="NIMBUS" title="Apply config changes for Storm AutoCreds">
- <task xsi:type="configure" id="storm_nimbus_autocred_config"/>
- </execute-stage>
-
<execute-stage service="STORM" component="NIMBUS" title="Apply security changes for Storm">
<!-- Add nimbus.impersonation acls . -->
<task xsi:type="configure" id="hdp_2_5_0_0_add_storm_security_configs" />
http://git-wip-us.apache.org/repos/asf/ambari/blob/5ec0382b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.6.xml
index f1d34e7..4e14e83 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.6.xml
@@ -1092,8 +1092,6 @@
<task xsi:type="configure" id="storm_worker_log4j_parameterize" />
<task xsi:type="configure" id="storm_cluster_log4j_parameterize" />
<task xsi:type="configure" id="storm_worker_log4j_directory" />
- <task xsi:type="configure" id="storm_nimbus_autocred_config" />
-
</pre-upgrade>
<pre-downgrade>
http://git-wip-us.apache.org/repos/asf/ambari/blob/5ec0382b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/config-upgrade.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/config-upgrade.xml b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/config-upgrade.xml
index 8539006..f227bac 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/config-upgrade.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/config-upgrade.xml
@@ -63,13 +63,6 @@
<regex-replace key="content" find="A1" immediateFlush="false"
 fileName="\$\{sys:storm.log.dir}/\$\{sys:logfile.name}"
 filePattern="\$\{sys:storm.log.dir}/\$\{sys:logfile.name}.%i.gz">
 <PatternLayout>
 <pattern>\$\{pattern}</pattern>
 </PatternLayout>
 <Policies>
 <SizeBasedTriggeringPolicy size="(?:[0-9]+) MB"/> <!-- Or every 100 MB -->
 </Policies>
 <DefaultRolloverStrategy max="([0-9]+)"
replace-with="A1" immediateFlush="false"
 fileName="${sys:storm.log.dir}/${sys:logfile.name}"
 filePattern="${sys:storm.log.dir}/${sys:logfile.name}.%i.gz">
 <PatternLayout>
 <pattern>${pattern}</pattern>
 </PatternLayout>
 <Policies>
 <SizeBasedTriggeringPolicy size="{{storm_a1_maxfilesize}} MB"/> <!-- Or every 100 MB -->
 </Policies>
 <DefaultRolloverStrategy max="{{storm_a1_maxbackupindex}}"/>
</definition>
- <definition xsi:type="configure" id="storm_nimbus_autocred_config" summary="Update Storm's Nimbus AutoCred config">
- <type>storm-site</type>
- <set key="nimbus.autocredential.plugins.classes" value="['org.apache.storm.hdfs.security.AutoHDFS', 'org.apache.storm.hbase.security.AutoHBase', 'org.apache.storm.hive.security.AutoHive']" if-type="streamline-common" if-key="authorizer.class.name" if-key-state="present"/>
- <set key="nimbus.credential.renewers.classes" value="['org.apache.storm.hdfs.security.AutoHDFS', 'org.apache.storm.hbase.security.AutoHBase', 'org.apache.storm.hive.security.AutoHive']" if-type="streamline-common" if-key="authorizer.class.name" if-key-state="present"/>
- <set key="nimbus.credential.renewers.freq.secs" value="82800" if-type="streamline-common" if-key="authorizer.class.name" if-key-state="present"/>
- </definition>
-
</changes>
</component>
</service>
http://git-wip-us.apache.org/repos/asf/ambari/blob/5ec0382b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml
index ef4c6f4..ce10e8b 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml
@@ -416,10 +416,6 @@
<task xsi:type="configure" id="hdp_2_6_0_0_remove_bind_anonymous"/>
</execute-stage>
- <execute-stage service="STORM" component="NIMBUS" title="Apply config changes for Storm AutoCreds">
- <task xsi:type="configure" id="storm_nimbus_autocred_config"/>
- </execute-stage>
-
<!--RANGER-->
<execute-stage service="RANGER" component="RANGER_ADMIN" title="Parameterizing Ranger Admin Log4J Properties">
<task xsi:type="configure" id="admin_log4j_parameterize">
http://git-wip-us.apache.org/repos/asf/ambari/blob/5ec0382b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml
index d9675f1..840b17d 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml
@@ -1030,7 +1030,6 @@
<pre-upgrade>
<task xsi:type="configure" id="storm_worker_log4j_parameterize" />
<task xsi:type="configure" id="storm_cluster_log4j_parameterize" />
- <task xsi:type="configure" id="storm_nimbus_autocred_config" />
</pre-upgrade>
<pre-downgrade/>
<upgrade>
http://git-wip-us.apache.org/repos/asf/ambari/blob/5ec0382b/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py
index 974b10c..cc5fa92 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py
@@ -41,92 +41,11 @@ class HDP26StackAdvisor(HDP25StackAdvisor):
"HIVE": self.recommendHIVEConfigurations,
"HBASE": self.recommendHBASEConfigurations,
"YARN": self.recommendYARNConfigurations,
- "KAFKA": self.recommendKAFKAConfigurations,
- "BEACON": self.recommendBEACONConfigurations,
- "STORM": self.recommendSTORMConfigurations
+ "KAFKA": self.recommendKAFKAConfigurations
}
parentRecommendConfDict.update(childRecommendConfDict)
return parentRecommendConfDict
- def recommendSTORMConfigurations(self, configurations, clusterData, services, hosts):
- """
- In HDF-2.6.1 we introduced a new way of doing Auto Credentials with services such as
- HDFS, HIVE, HBASE. This method will update the required configs for autocreds if the users installs
- STREAMLINE service.
- """
- super(HDP26StackAdvisor, self).recommendStormConfigurations(configurations, clusterData, services, hosts)
- storm_site = self.getServicesSiteProperties(services, "storm-site")
- storm_env = self.getServicesSiteProperties(services, "storm-env")
- putStormSiteProperty = self.putProperty(configurations, "storm-site", services)
- putStormSiteAttributes = self.putPropertyAttribute(configurations, "storm-site")
- security_enabled = self.isSecurityEnabled(services)
- servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
-
- if storm_env and storm_site and security_enabled and 'STREAMLINE' in servicesList:
- storm_nimbus_impersonation_acl = storm_site["nimbus.impersonation.acl"] if "nimbus.impersonation.acl" in storm_site else None
- streamline_env = self.getServicesSiteProperties(services, "streamline-env")
- _streamline_principal_name = streamline_env['streamline_principal_name'] if 'streamline_principal_name' in streamline_env else None
- if _streamline_principal_name is not None and storm_nimbus_impersonation_acl is not None:
- streamline_bare_principal = get_bare_principal(_streamline_principal_name)
- storm_nimbus_impersonation_acl.replace('{{streamline_bare_principal}}', streamline_bare_principal)
- putStormSiteProperty('nimbus.impersonation.acl', storm_nimbus_impersonation_acl)
-
- storm_nimbus_autocred_plugin_classes = storm_site["nimbus.autocredential.plugins.classes"] if "nimbus.autocredential.plugins.classes" in storm_site else None
- if storm_nimbus_autocred_plugin_classes is not None:
- new_storm_nimbus_autocred_plugin_classes = ['org.apache.storm.hdfs.security.AutoHDFS',
- 'org.apache.storm.hbase.security.AutoHBase',
- 'org.apache.storm.hive.security.AutoHive']
- new_conf = DefaultStackAdvisor.appendToYamlString(storm_nimbus_autocred_plugin_classes,
- new_storm_nimbus_autocred_plugin_classes)
-
- putStormSiteProperty("nimbus.autocredential.plugins.classes", new_conf)
- else:
- putStormSiteProperty("nimbus.autocredential.plugins.classes", "['org.apache.storm.hdfs.security.AutoHDFS', 'org.apache.storm.hbase.security.AutoHBase', 'org.apache.storm.hive.security.AutoHive']")
-
-
- storm_nimbus_credential_renewer_classes = storm_site["nimbus.credential.renewers.classes"] if "nimbus.credential.renewers.classes" in storm_site else None
- if storm_nimbus_credential_renewer_classes is not None:
- new_storm_nimbus_credential_renewer_classes_array = ['org.apache.storm.hdfs.security.AutoHDFS',
- 'org.apache.storm.hbase.security.AutoHBase',
- 'org.apache.storm.hive.security.AutoHive']
- new_conf = DefaultStackAdvisor.appendToYamlString(storm_nimbus_credential_renewer_classes,
- new_storm_nimbus_credential_renewer_classes_array)
- putStormSiteProperty("nimbus.autocredential.plugins.classes", new_conf)
- else:
- putStormSiteProperty("nimbus.credential.renewers.classes", "['org.apache.storm.hdfs.security.AutoHDFS', 'org.apache.storm.hbase.security.AutoHBase', 'org.apache.storm.hive.security.AutoHive']")
- putStormSiteProperty("nimbus.credential.renewers.freq.secs", "82800")
- pass
-
- def recommendBEACONConfigurations(self, configurations, clusterData, services, hosts):
- beaconEnvProperties = self.getSiteProperties(services['configurations'], 'beacon-env')
- putbeaconEnvProperty = self.putProperty(configurations, "beacon-env", services)
-
- # database URL and driver class recommendations
- if beaconEnvProperties and self.checkSiteProperties(beaconEnvProperties, 'beacon_store_driver') and self.checkSiteProperties(beaconEnvProperties, 'beacon_database'):
- putbeaconEnvProperty('beacon_store_driver', self.getDBDriver(beaconEnvProperties['beacon_database']))
- if beaconEnvProperties and self.checkSiteProperties(beaconEnvProperties, 'beacon_store_db_name', 'beacon_store_url') and self.checkSiteProperties(beaconEnvProperties, 'beacon_database'):
- beaconServerHost = self.getHostWithComponent('BEACON', 'BEACON_SERVER', services, hosts)
- beaconDBConnectionURL = beaconEnvProperties['beacon_store_url']
- protocol = self.getProtocol(beaconEnvProperties['beacon_database'])
- oldSchemaName = getOldValue(self, services, "beacon-env", "beacon_store_db_name")
- oldDBType = getOldValue(self, services, "beacon-env", "beacon_database")
- # under these if constructions we are checking if beacon server hostname available,
- # if it's default db connection url with "localhost" or if schema name was changed or if db type was changed (only for db type change from default mysql to existing mysql)
- # or if protocol according to current db type differs with protocol in db connection url(other db types changes)
- if beaconServerHost is not None:
- if (beaconDBConnectionURL and "//localhost" in beaconDBConnectionURL) or oldSchemaName or oldDBType or (protocol and beaconDBConnectionURL and not beaconDBConnectionURL.startswith(protocol)):
- dbConnection = self.getDBConnectionStringBeacon(beaconEnvProperties['beacon_database']).format(beaconServerHost['Hosts']['host_name'], beaconEnvProperties['beacon_store_db_name'])
- putbeaconEnvProperty('beacon_store_url', dbConnection)
-
- def getDBConnectionStringBeacon(self, databaseType):
- driverDict = {
- 'NEW DERBY DATABASE': 'jdbc:derby:${{beacon.data.dir}}/${{beacon.store.db.name}}-db;create=true',
- 'EXISTING MYSQL DATABASE': 'jdbc:mysql://{0}/{1}',
- 'EXISTING MYSQL / MARIADB DATABASE': 'jdbc:mysql://{0}/{1}',
- 'EXISTING ORACLE DATABASE': 'jdbc:oracle:thin:@//{0}:1521/{1}'
- }
- return driverDict.get(databaseType.upper())
-
def recommendAtlasConfigurations(self, configurations, clusterData, services, hosts):
super(HDP26StackAdvisor, self).recommendAtlasConfigurations(configurations, clusterData, services, hosts)
servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
@@ -482,9 +401,9 @@ class HDP26StackAdvisor(HDP25StackAdvisor):
propertyValue = "https://"+webapp_address+"/ws/v1/applicationhistory"
Logger.info("validateYarnSiteConfigurations: recommended value for webservice url"+services["configurations"]["yarn-site"]["properties"]["yarn.log.server.web-service.url"])
if services["configurations"]["yarn-site"]["properties"]["yarn.log.server.web-service.url"] != propertyValue:
- validationItems = [
+ validationItems.append(
{"config-name": "yarn.log.server.web-service.url",
- "item": self.getWarnItem("Value should be %s" % propertyValue)}]
+ "item": self.getWarnItem("Value should be %s" % propertyValue)})
return self.toConfigurationValidationProblems(validationItems, "yarn-site")
def validateDruidHistoricalConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
http://git-wip-us.apache.org/repos/asf/ambari/blob/5ec0382b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/config-upgrade.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/config-upgrade.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/config-upgrade.xml
index 3881137..c2c1532 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/config-upgrade.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/config-upgrade.xml
@@ -111,12 +111,6 @@
<set key="ranger.plugin.storm.ambari.cluster.name" value="{{cluster_name}}"
if-type="ranger-storm-plugin-properties" if-key="ranger-storm-plugin-enabled" if-key-state="present"/>
</definition>
- <definition xsi:type="configure" id="storm_nimbus_autocred_config" summary="Update Storm's Nimbus AutoCred config">
- <type>storm-site</type>
- <set key="nimbus.autocredential.plugins.classes" value="['org.apache.storm.hdfs.security.AutoHDFS', 'org.apache.storm.hbase.security.AutoHBase', 'org.apache.storm.hive.security.AutoHive']" if-type="streamline-common" if-key="authorizer.class.name" if-key-state="present"/>
- <set key="nimbus.credential.renewers.classes" value="['org.apache.storm.hdfs.security.AutoHDFS', 'org.apache.storm.hbase.security.AutoHBase', 'org.apache.storm.hive.security.AutoHive']" if-type="streamline-common" if-key="authorizer.class.name" if-key-state="present"/>
- <set key="nimbus.credential.renewers.freq.secs" value="82800" if-type="streamline-common" if-key="authorizer.class.name" if-key-state="present"/>
- </definition>
</changes>
</component>
</service>
http://git-wip-us.apache.org/repos/asf/ambari/blob/5ec0382b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml
index 6d8821b..df609cd 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml
@@ -328,10 +328,6 @@
<task xsi:type="configure" id="hdp_2_6_maint_ranger_storm_plugin_cluster_name"/>
</execute-stage>
- <execute-stage service="STORM" component="NIMBUS" title="Apply config changes for Storm AutoCreds">
- <task xsi:type="configure" id="storm_nimbus_autocred_config"/>
- </execute-stage>
-
<!-- YARN -->
<execute-stage service="YARN" component="RESOURCEMANAGER" title="Apply config changes for Ranger Yarn plugin">
<task xsi:type="configure" id="hdp_2_6_maint_ranger_yarn_plugin_cluster_name"/>
http://git-wip-us.apache.org/repos/asf/ambari/blob/5ec0382b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/upgrade-2.6.xml
index 07f0960..b376fa7 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/upgrade-2.6.xml
@@ -965,7 +965,6 @@
<component name="NIMBUS">
<pre-upgrade>
<task xsi:type="configure" id="hdp_2_6_maint_ranger_storm_plugin_cluster_name"/>
- <task xsi:type="configure" id="storm_nimbus_autocred_config"/>
</pre-upgrade>
<pre-downgrade/> <!-- no-op to prevent config changes on downgrade -->
<upgrade>
http://git-wip-us.apache.org/repos/asf/ambari/blob/5ec0382b/ambari-server/src/main/resources/stacks/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/stack_advisor.py b/ambari-server/src/main/resources/stacks/stack_advisor.py
index 8ccbaba..ccbb676 100644
--- a/ambari-server/src/main/resources/stacks/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/stack_advisor.py
@@ -1375,21 +1375,3 @@ class DefaultStackAdvisor(StackAdvisor):
if recommendation:
put_f(name, ",".join(recommendation))
-
- @classmethod
- def appendToYamlString(cls, yaml_string, list_classes):
- updated_yaml_string = ""
- try:
- strip_yaml_str = re.sub('[\[\]\']', ' ', yaml_string)
- klass_array = [x.strip() for x in strip_yaml_str.split(',')]
- if yaml_string:
- for klass in list_classes:
- klass = klass.strip()
- klass_array.append(klass)
- klass_set = set(klass_array)
- klass_list = [("'" + e + "'") for e in klass_set]
- updated_yaml_string = "[" + ",".join(klass_list) + "]"
- except Exception:
- klass_list = [("'" + e + "'") for e in list_classes]
- updated_yaml_string = "[" + ",".join(klass_list) + "]"
- return updated_yaml_string
[33/50] [abbrv] ambari git commit: AMBARI-21771 : Maven cleanup of
ambari-metrics and ambari-logsearch modules. (avijayan)
Posted by ja...@apache.org.
AMBARI-21771 : Maven cleanup of ambari-metrics and ambari-logsearch modules. (avijayan)
(cherry picked from commit cf5c2f56e21d41f0f71cab5b9c3fb34fd56c5c4c)
Change-Id: I8cd293ecb882d688a5e6cd079799708a80777a87
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/2054332b
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/2054332b
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/2054332b
Branch: refs/heads/2.5-maint
Commit: 2054332bddc69862b48943d2209ab4f2e9eb3db4
Parents: 688258b
Author: Aravindan Vijayan <av...@hortonworks.com>
Authored: Mon Aug 21 14:03:57 2017 -0700
Committer: Sumit Mohanty <sm...@hortonworks.com>
Committed: Mon Aug 21 17:09:40 2017 -0700
----------------------------------------------------------------------
ambari-logsearch/ambari-logsearch-assembly/pom.xml | 3 ++-
ambari-metrics/ambari-metrics-assembly/pom.xml | 2 ++
2 files changed, 4 insertions(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/2054332b/ambari-logsearch/ambari-logsearch-assembly/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-assembly/pom.xml b/ambari-logsearch/ambari-logsearch-assembly/pom.xml
index 48d16b5..8878997 100644
--- a/ambari-logsearch/ambari-logsearch-assembly/pom.xml
+++ b/ambari-logsearch/ambari-logsearch-assembly/pom.xml
@@ -192,6 +192,7 @@
<exclude>classes/log4j.xml</exclude>
<exclude>classes/logfeeder.properties</exclude>
<exclude>classes/grok-patterns</exclude>
+ <exclude>libs/checkstyle*.jar</exclude>
</excludes>
</source>
</sources>
@@ -453,7 +454,7 @@
<group>root</group>
</mapper>
<excludes>
- classes/log4j.xml,classes/logfeeder.properties,classes/grok-patterns
+ classes/log4j.xml,classes/logfeeder.properties,classes/grok-patterns,libs/checkstyle*.jar
</excludes>
</data>
<data>
http://git-wip-us.apache.org/repos/asf/ambari/blob/2054332b/ambari-metrics/ambari-metrics-assembly/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-assembly/pom.xml b/ambari-metrics/ambari-metrics-assembly/pom.xml
index 4798b3f..b6b9ad7 100644
--- a/ambari-metrics/ambari-metrics-assembly/pom.xml
+++ b/ambari-metrics/ambari-metrics-assembly/pom.xml
@@ -243,6 +243,7 @@
<location>${collector.dir}/target/lib</location>
<excludes>
<exclude>*tests.jar</exclude>
+ <exclude>findbugs*.jar</exclude>
</excludes>
</source>
<source>
@@ -262,6 +263,7 @@
<exclude>bin/**</exclude>
<exclude>bin/*</exclude>
<exclude>lib/*tests.jar</exclude>
+ <exclude>lib/findbugs*.jar</exclude>
</excludes>
</source>
</sources>
[26/50] [abbrv] ambari git commit: AMBARI-21749. Remove DB
consistency check for topology tables (magyari_sandor)
Posted by ja...@apache.org.
AMBARI-21749. Remove DB consistency check for topology tables (magyari_sandor)
(cherry picked from commit 5c09d0760eb62ff18f120b0688ff4eb7e9b63227)
Change-Id: If7814db6100e121aaa88b3ffb69a9a4152a59ebe
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/5a80b527
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/5a80b527
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/5a80b527
Branch: refs/heads/2.5-maint
Commit: 5a80b5274a7aad0312ac174fa3e6f247687c35b1
Parents: 2030db8
Author: Sandor Magyari <sm...@hortonworks.com>
Authored: Fri Aug 18 13:13:29 2017 +0200
Committer: Sumit Mohanty <sm...@hortonworks.com>
Committed: Sat Aug 19 09:28:13 2017 -0700
----------------------------------------------------------------------
.../checks/DatabaseConsistencyCheckHelper.java | 62 -------------------
.../server/topology/PersistedStateImpl.java | 23 +++----
.../DatabaseConsistencyCheckHelperTest.java | 63 --------------------
3 files changed, 13 insertions(+), 135 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/5a80b527/ambari-server/src/main/java/org/apache/ambari/server/checks/DatabaseConsistencyCheckHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/checks/DatabaseConsistencyCheckHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/checks/DatabaseConsistencyCheckHelper.java
index 533fad7..d59e676 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/checks/DatabaseConsistencyCheckHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/checks/DatabaseConsistencyCheckHelper.java
@@ -187,7 +187,6 @@ public class DatabaseConsistencyCheckHelper {
checkForHostsWithoutState();
checkHostComponentStatesCountEqualsHostComponentsDesiredStates();
checkServiceConfigs();
- checkTopologyTables();
checkForLargeTables();
checkConfigGroupHostMapping(true);
LOG.info("******************************* Check database completed *******************************");
@@ -493,67 +492,6 @@ public class DatabaseConsistencyCheckHelper {
}
}
-
- /**
- * This method checks that for each row in topology_request there is at least one row in topology_logical_request,
- * topology_host_request, topology_host_task, topology_logical_task.
- * */
- static void checkTopologyTables() {
- LOG.info("Checking Topology tables");
-
- String SELECT_REQUEST_COUNT_QUERY = "select count(tpr.id) from topology_request tpr";
-
- String SELECT_JOINED_COUNT_QUERY = "select count(DISTINCT tpr.id) from topology_request tpr join " +
- "topology_logical_request tlr on tpr.id = tlr.request_id";
-
- String SELECT_HOST_REQUEST_COUNT_QUERY = "select count(thr.id) from topology_host_request thr";
-
- String SELECT_HOST_JOINED_COUNT_QUERY = "select count(DISTINCT thr.id) from topology_host_request thr join " +
- "topology_host_task tht on thr.id = tht.host_request_id join topology_logical_task " +
- "tlt on tht.id = tlt.host_task_id";
-
- Statement statement = null;
-
- if (connection == null) {
- if (dbAccessor == null) {
- dbAccessor = injector.getInstance(DBAccessor.class);
- }
- connection = dbAccessor.getConnection();
- }
-
- try {
- statement = connection.createStatement(ResultSet.TYPE_SCROLL_SENSITIVE, ResultSet.CONCUR_UPDATABLE);
-
- int topologyRequestCount = runQuery(statement, SELECT_REQUEST_COUNT_QUERY);
- int topologyRequestTablesJoinedCount = runQuery(statement, SELECT_JOINED_COUNT_QUERY);
-
- if (topologyRequestCount != topologyRequestTablesJoinedCount) {
- error("Your topology request hierarchy is not complete for each row in topology_request should exist " +
- "at least one row in topology_logical_request");
- }
-
- int topologyHostRequestCount = runQuery(statement, SELECT_HOST_REQUEST_COUNT_QUERY);
- int topologyHostRequestTablesJoinedCount = runQuery(statement, SELECT_HOST_JOINED_COUNT_QUERY);
-
- if (topologyHostRequestCount != topologyHostRequestTablesJoinedCount) {
- error("Your topology request hierarchy is not complete for each row in topology_host_request should exist " +
- "at least one row in topology_host_task, topology_logical_task.");
- }
-
- } catch (SQLException e) {
- LOG.error("Exception occurred during topology request tables check: ", e);
- } finally {
- if (statement != null) {
- try {
- statement.close();
- } catch (SQLException e) {
- LOG.error("Exception occurred during statement closing procedure: ", e);
- }
- }
- }
-
- }
-
private static int runQuery(Statement statement, String query) {
ResultSet rs = null;
int result = 0;
http://git-wip-us.apache.org/repos/asf/ambari/blob/5a80b527/ambari-server/src/main/java/org/apache/ambari/server/topology/PersistedStateImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/PersistedStateImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/PersistedStateImpl.java
index 1def4df..ebc5fbc 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/PersistedStateImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/PersistedStateImpl.java
@@ -223,18 +223,21 @@ public class PersistedStateImpl implements PersistedState {
}
TopologyLogicalRequestEntity logicalRequestEntity = entity.getTopologyLogicalRequestEntity();
- Long logicalId = logicalRequestEntity.getId();
+ if (logicalRequestEntity != null) {
+ try {
+ Long logicalId = logicalRequestEntity.getId();
- try {
- //todo: fix initialization of ActionManager.requestCounter to account for logical requests
- //todo: until this is fixed, increment the counter for every recovered logical request
- //todo: this will cause gaps in the request id's after recovery
- ambariContext.getNextRequestId();
- allRequests.get(clusterTopology).add(logicalRequestFactory.createRequest(
- logicalId, replayedRequest, clusterTopology, logicalRequestEntity));
- } catch (AmbariException e) {
- throw new RuntimeException("Failed to construct logical request during replay: " + e, e);
+ //todo: fix initialization of ActionManager.requestCounter to account for logical requests
+ //todo: until this is fixed, increment the counter for every recovered logical request
+ //todo: this will cause gaps in the request id's after recovery
+ ambariContext.getNextRequestId();
+ allRequests.get(clusterTopology).add(logicalRequestFactory.createRequest(
+ logicalId, replayedRequest, clusterTopology, logicalRequestEntity));
+ } catch (AmbariException e) {
+ throw new RuntimeException("Failed to construct logical request during replay: " + e, e);
+ }
}
+
}
return allRequests;
http://git-wip-us.apache.org/repos/asf/ambari/blob/5a80b527/ambari-server/src/test/java/org/apache/ambari/server/checks/DatabaseConsistencyCheckHelperTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/checks/DatabaseConsistencyCheckHelperTest.java b/ambari-server/src/test/java/org/apache/ambari/server/checks/DatabaseConsistencyCheckHelperTest.java
index 9e85fa2..1e4cf58 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/checks/DatabaseConsistencyCheckHelperTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/checks/DatabaseConsistencyCheckHelperTest.java
@@ -189,69 +189,6 @@ public class DatabaseConsistencyCheckHelperTest {
}
@Test
- public void testCheckTopologyTablesAreConsistent() throws Exception {
- testCheckTopologyTablesConsistent(2);
- Assert.assertFalse(DatabaseConsistencyCheckHelper.getLastCheckResult().isError());
- }
-
- @Test
- public void testCheckTopologyTablesAreNotConsistent() throws Exception {
- testCheckTopologyTablesConsistent(1);
- Assert.assertTrue(DatabaseConsistencyCheckHelper.getLastCheckResult().isError());
- }
-
- private void testCheckTopologyTablesConsistent(int resultCount) throws Exception {
- EasyMockSupport easyMockSupport = new EasyMockSupport();
-
- final DBAccessor mockDBDbAccessor = easyMockSupport.createNiceMock(DBAccessor.class);
- final Connection mockConnection = easyMockSupport.createNiceMock(Connection.class);
- final ResultSet mockCountResultSet = easyMockSupport.createNiceMock(ResultSet.class);
- final ResultSet mockJoinResultSet = easyMockSupport.createNiceMock(ResultSet.class);
- final Statement mockStatement = easyMockSupport.createNiceMock(Statement.class);
-
- final StackManagerFactory mockStackManagerFactory = easyMockSupport.createNiceMock(StackManagerFactory.class);
- final EntityManager mockEntityManager = easyMockSupport.createNiceMock(EntityManager.class);
- final Clusters mockClusters = easyMockSupport.createNiceMock(Clusters.class);
- final OsFamily mockOSFamily = easyMockSupport.createNiceMock(OsFamily.class);
- final Injector mockInjector = Guice.createInjector(new AbstractModule() {
- @Override
- protected void configure() {
-
- bind(StackManagerFactory.class).toInstance(mockStackManagerFactory);
- bind(EntityManager.class).toInstance(mockEntityManager);
- bind(DBAccessor.class).toInstance(mockDBDbAccessor);
- bind(Clusters.class).toInstance(mockClusters);
- bind(OsFamily.class).toInstance(mockOSFamily);
- }
- });
-
- expect(mockConnection.createStatement(ResultSet.TYPE_SCROLL_SENSITIVE, ResultSet.CONCUR_UPDATABLE)).andReturn(mockStatement);
- expect(mockCountResultSet.next()).andReturn(true).once();
- expect(mockCountResultSet.getInt(1)).andReturn(2);
- expect(mockJoinResultSet.next()).andReturn(true).once();
- expect(mockJoinResultSet.getInt(1)).andReturn(resultCount);
- expect(mockStatement.executeQuery("select count(tpr.id) from topology_request tpr")).andReturn(mockCountResultSet);
- expect(mockStatement.executeQuery("select count(DISTINCT tpr.id) from topology_request tpr join " +
- "topology_logical_request tlr on tpr.id = tlr.request_id")).andReturn(mockJoinResultSet);
-
- expect(mockStatement.executeQuery("select count(thr.id) from topology_host_request thr")).andReturn(mockCountResultSet);
- expect(mockStatement.executeQuery("select count(DISTINCT thr.id) from topology_host_request thr join " +
- "topology_host_task tht on thr.id = tht.host_request_id join topology_logical_task " +
- "tlt on tht.id = tlt.host_task_id")).andReturn(mockJoinResultSet);
-
- DatabaseConsistencyCheckHelper.setInjector(mockInjector);
- DatabaseConsistencyCheckHelper.setConnection(mockConnection);
-
- easyMockSupport.replayAll();
-
-
- DatabaseConsistencyCheckHelper.checkTopologyTables();
-
- easyMockSupport.verifyAll();
-
- }
-
- @Test
public void testCheckHostComponentStatesCountEqualsHostComponentsDesiredStates() throws Exception {
EasyMockSupport easyMockSupport = new EasyMockSupport();
[39/50] [abbrv] ambari git commit: AMBARI-21794. Post Ambari upgrade
on a migration cluster Oozie failed to restart when LZO compression is
enabled. (swagle)
Posted by ja...@apache.org.
AMBARI-21794. Post Ambari upgrade on a migration cluster Oozie failed to restart when LZO compression is enabled. (swagle)
Change-Id: I8097eb4bb752040fd11ff3853004551cde81182c
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/df448220
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/df448220
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/df448220
Branch: refs/heads/2.5-maint
Commit: df44822039a0e59b67004e4e07f46f37b356da55
Parents: 4e453f6
Author: Siddharth Wagle <sw...@hortonworks.com>
Authored: Wed Aug 23 13:38:14 2017 -0700
Committer: Siddharth Wagle <sw...@hortonworks.com>
Committed: Wed Aug 23 13:43:20 2017 -0700
----------------------------------------------------------------------
.../package/alerts/alert_check_oozie_server.py | 246 +++++++++
.../services/OOZIE/package/files/oozieSmoke2.sh | 84 +++
.../files/prepareOozieHdfsDirectories.sh | 42 ++
.../OOZIE/package/files/wrap_ooziedb.sh | 31 ++
.../scripts/check_oozie_server_status.py | 38 ++
.../services/OOZIE/package/scripts/oozie.py | 549 +++++++++++++++++++
.../OOZIE/package/scripts/oozie_client.py | 76 +++
.../OOZIE/package/scripts/oozie_server.py | 159 ++++++
.../package/scripts/oozie_server_upgrade.py | 247 +++++++++
.../OOZIE/package/scripts/oozie_service.py | 186 +++++++
.../services/OOZIE/package/scripts/params.py | 39 ++
.../OOZIE/package/scripts/params_linux.py | 387 +++++++++++++
.../OOZIE/package/scripts/params_windows.py | 34 ++
.../OOZIE/package/scripts/service_check.py | 133 +++++
.../OOZIE/package/scripts/status_params.py | 65 +++
.../OOZIE/package/templates/adminusers.txt.j2 | 28 +
.../package/templates/oozie-log4j.properties.j2 | 93 ++++
.../OOZIE/package/templates/oozie.conf.j2 | 35 ++
.../package/templates/zkmigrator_jaas.conf.j2 | 26 +
19 files changed, 2498 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/df448220/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/OOZIE/package/alerts/alert_check_oozie_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/OOZIE/package/alerts/alert_check_oozie_server.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/OOZIE/package/alerts/alert_check_oozie_server.py
new file mode 100644
index 0000000..54eef18
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/OOZIE/package/alerts/alert_check_oozie_server.py
@@ -0,0 +1,246 @@
+#!/usr/bin/env python
+
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+import os
+import re
+
+from resource_management.core import global_lock
+from resource_management.core.environment import Environment
+from resource_management.core.resources import Execute
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.functions import get_klist_path
+from resource_management.libraries.functions import stack_tools
+from ambari_commons.os_check import OSConst, OSCheck
+from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
+from urlparse import urlparse
+
+STACK_ROOT_PATTERN = "{{ stack_root }}"
+RESULT_CODE_OK = 'OK'
+RESULT_CODE_CRITICAL = 'CRITICAL'
+RESULT_CODE_UNKNOWN = 'UNKNOWN'
+
+if OSCheck.is_windows_family():
+ OOZIE_ENV_HTTPS_RE = r"set\s+OOZIE_HTTPS_PORT=(\d+)"
+else:
+ OOZIE_ENV_HTTPS_RE = r"export\s+OOZIE_HTTPS_PORT=(\d+)"
+
+# The configured Kerberos executable search paths, if any
+KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY = '{{kerberos-env/executable_search_paths}}'
+
+OOZIE_URL_KEY = '{{oozie-site/oozie.base.url}}'
+SECURITY_ENABLED = '{{cluster-env/security_enabled}}'
+OOZIE_USER = '{{oozie-env/oozie_user}}'
+OOZIE_CONF_DIR = "{0}/current/oozie-server/conf".format(STACK_ROOT_PATTERN)
+OOZIE_CONF_DIR_LEGACY = '/etc/oozie/conf'
+OOZIE_HTTPS_PORT = '{{oozie-site/oozie.https.port}}'
+OOZIE_ENV_CONTENT = '{{oozie-env/content}}'
+
+USER_KEYTAB_KEY = '{{oozie-site/oozie.service.HadoopAccessorService.keytab.file}}'
+USER_PRINCIPAL_KEY = '{{oozie-site/oozie.service.HadoopAccessorService.kerberos.principal}}'
+USER_KEY = '{{oozie-env/oozie_user}}'
+
+# default keytab location
+USER_KEYTAB_SCRIPT_PARAM_KEY = 'default.oozie.keytab'
+USER_KEYTAB_DEFAULT = '/etc/security/keytabs/oozie.headless.keytab'
+
+# default user principal
+USER_PRINCIPAL_SCRIPT_PARAM_KEY = 'default.oozie.principal'
+USER_PRINCIPAL_DEFAULT = 'oozie@EXAMPLE.COM'
+
+# default user
+USER_DEFAULT = 'oozie'
+
+STACK_NAME_KEY = '{{cluster-env/stack_name}}'
+STACK_ROOT_KEY = '{{cluster-env/stack_root}}'
+STACK_ROOT_DEFAULT = '/usr/hdp'
+
+class KerberosPropertiesNotFound(Exception): pass
+
+@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
+def get_tokens():
+ """
+ Returns a tuple of tokens in the format {{site/property}} that will be used
+ to build the dictionary passed into execute
+ """
+ return (OOZIE_URL_KEY,)
+
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def get_tokens():
+ """
+ Returns a tuple of tokens in the format {{site/property}} that will be used
+ to build the dictionary passed into execute
+ """
+ return (OOZIE_URL_KEY, USER_PRINCIPAL_KEY, SECURITY_ENABLED, USER_KEYTAB_KEY, KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY,
+ USER_KEY, OOZIE_HTTPS_PORT, OOZIE_ENV_CONTENT, STACK_NAME_KEY, STACK_ROOT_KEY)
+
+@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
+def get_check_command(oozie_url, host_name, configurations):
+ from resource_management.libraries.functions import reload_windows_env
+ reload_windows_env()
+ oozie_home = os.environ['OOZIE_HOME']
+ oozie_cmd = os.path.join(oozie_home, 'bin', 'oozie.cmd')
+ command = format("cmd /c {oozie_cmd} admin -oozie {oozie_url} -status")
+ return (command, None, None)
+
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def get_check_command(oozie_url, host_name, configurations, parameters, only_kinit):
+ kerberos_env = None
+
+ user = USER_DEFAULT
+ if USER_KEY in configurations:
+ user = configurations[USER_KEY]
+
+ if is_security_enabled(configurations):
+ # defaults
+ user_keytab = USER_KEYTAB_DEFAULT
+ user_principal = USER_PRINCIPAL_DEFAULT
+
+ # check script params
+ if USER_PRINCIPAL_SCRIPT_PARAM_KEY in parameters:
+ user_principal = parameters[USER_PRINCIPAL_SCRIPT_PARAM_KEY]
+ user_principal = user_principal.replace('_HOST', host_name.lower())
+ if USER_KEYTAB_SCRIPT_PARAM_KEY in parameters:
+ user_keytab = parameters[USER_KEYTAB_SCRIPT_PARAM_KEY]
+
+ # check configurations last as they should always take precedence
+ if USER_PRINCIPAL_KEY in configurations:
+ user_principal = configurations[USER_PRINCIPAL_KEY]
+ user_principal = user_principal.replace('_HOST', host_name.lower())
+ if USER_KEYTAB_KEY in configurations:
+ user_keytab = configurations[USER_KEYTAB_KEY]
+
+ # Create the kerberos credentials cache (ccache) file and set it in the environment to use
+ # when executing curl
+ env = Environment.get_instance()
+ ccache_file = "{0}{1}oozie_alert_cc_{2}".format(env.tmp_dir, os.sep, os.getpid())
+ kerberos_env = {'KRB5CCNAME': ccache_file}
+
+ # Get the configured Kerberos executable search paths, if any
+ kerberos_executable_search_paths = None
+ if KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY in configurations:
+ kerberos_executable_search_paths = configurations[KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY]
+
+ klist_path_local = get_klist_path(kerberos_executable_search_paths)
+ kinit_path_local = get_kinit_path(kerberos_executable_search_paths)
+ kinit_part_command = format("{kinit_path_local} -l 5m20s -c {ccache_file} -kt {user_keytab} {user_principal}; ")
+
+ # Determine if we need to kinit by testing to see if the relevant cache exists and has
+ # non-expired tickets. Tickets are marked to expire after 5 minutes to help reduce the number
+ # it kinits we do but recover quickly when keytabs are regenerated
+
+ if only_kinit:
+ kinit_command = kinit_part_command
+ else:
+ kinit_command = "{0} -s {1} || ".format(klist_path_local, ccache_file) + kinit_part_command
+
+ # prevent concurrent kinit
+ kinit_lock = global_lock.get_lock(global_lock.LOCK_TYPE_KERBEROS)
+ kinit_lock.acquire()
+ try:
+ Execute(kinit_command, environment=kerberos_env, user=user)
+ finally:
+ kinit_lock.release()
+
+ # Configure stack root
+ stack_root = STACK_ROOT_DEFAULT
+ if STACK_NAME_KEY in configurations and STACK_ROOT_KEY in configurations:
+ stack_root = stack_tools.get_stack_root(configurations[STACK_NAME_KEY], configurations[STACK_ROOT_KEY]).lower()
+
+ # oozie configuration directory using a symlink
+ oozie_config_directory = OOZIE_CONF_DIR.replace(STACK_ROOT_PATTERN, stack_root)
+ if not os.path.exists(oozie_config_directory):
+ oozie_config_directory = OOZIE_CONF_DIR_LEGACY
+
+ command = "source {0}/oozie-env.sh ; oozie admin -oozie {1} -status".format(
+ oozie_config_directory, oozie_url)
+
+ return (command, kerberos_env, user)
+
+def execute(configurations={}, parameters={}, host_name=None):
+ """
+ Returns a tuple containing the result code and a pre-formatted result label
+
+ Keyword arguments:
+ configurations (dictionary): a mapping of configuration key to value
+ parameters (dictionary): a mapping of script parameter key to value
+ host_name (string): the name of this host where the alert is running
+ """
+
+ if configurations is None:
+ return (RESULT_CODE_UNKNOWN, ['There were no configurations supplied to the script.'])
+
+ if not OOZIE_URL_KEY in configurations:
+ return (RESULT_CODE_UNKNOWN, ['The Oozie URL is a required parameter.'])
+
+ https_port = None
+ # try to get https port form oozie-env content
+ if OOZIE_ENV_CONTENT in configurations:
+ for line in configurations[OOZIE_ENV_CONTENT].splitlines():
+ result = re.match(OOZIE_ENV_HTTPS_RE, line)
+
+ if result is not None:
+ https_port = result.group(1)
+ # or from oozie-site.xml
+ if https_port is None and OOZIE_HTTPS_PORT in configurations:
+ https_port = configurations[OOZIE_HTTPS_PORT]
+
+ oozie_url = configurations[OOZIE_URL_KEY]
+
+ # construct proper url for https
+ if https_port is not None:
+ parsed_url = urlparse(oozie_url)
+ oozie_url = oozie_url.replace(parsed_url.scheme, "https")
+ if parsed_url.port is None:
+ oozie_url.replace(parsed_url.hostname, ":".join([parsed_url.hostname, str(https_port)]))
+ else:
+ oozie_url = oozie_url.replace(str(parsed_url.port), str(https_port))
+
+ # https will not work with localhost address, we need put fqdn
+ if https_port is None:
+ oozie_url = oozie_url.replace(urlparse(oozie_url).hostname, host_name)
+
+ (code, msg) = get_check_result(oozie_url, host_name, configurations, parameters, False)
+
+ # sometimes real lifetime for ticket is less than we have set(5m20s aS of now)
+ # so i've added this double check with rekinit command to be sure thaT it's not problem with ticket lifetime
+ if is_security_enabled(configurations) and code == RESULT_CODE_CRITICAL:
+ (code, msg) = get_check_result(oozie_url, host_name, configurations, parameters, True)
+
+ return (code, msg)
+
+
+def get_check_result(oozie_url, host_name, configurations, parameters, only_kinit):
+ try:
+ command, env, user = get_check_command(oozie_url, host_name, configurations, parameters, only_kinit)
+ # execute the command
+ Execute(command, environment=env, user=user)
+
+ return (RESULT_CODE_OK, ["Successful connection to {0}".format(oozie_url)])
+ except KerberosPropertiesNotFound, ex:
+ return (RESULT_CODE_UNKNOWN, [str(ex)])
+ except Exception, ex:
+ return (RESULT_CODE_CRITICAL, [str(ex)])
+
+def is_security_enabled(configurations):
+ security_enabled = False
+ if SECURITY_ENABLED in configurations:
+ security_enabled = str(configurations[SECURITY_ENABLED]).upper() == 'TRUE'
+
+ return security_enabled
http://git-wip-us.apache.org/repos/asf/ambari/blob/df448220/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/OOZIE/package/files/oozieSmoke2.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/OOZIE/package/files/oozieSmoke2.sh b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/OOZIE/package/files/oozieSmoke2.sh
new file mode 100644
index 0000000..60716ae
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/OOZIE/package/files/oozieSmoke2.sh
@@ -0,0 +1,84 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+export os_family=$1
+export oozie_lib_dir=$2
+export oozie_conf_dir=$3
+export oozie_bin_dir=$4
+export oozie_server_url=$5
+export oozie_examples_dir=$6
+export hadoop_conf_dir=$7
+export hadoop_bin_dir=$8
+export smoke_test_user=$9
+export job_name=${10}
+export security_enabled=${11}
+export smoke_user_keytab=${12}
+export kinit_path_local=${13}
+export smokeuser_principal=${14}
+
+function checkOozieJobStatus {
+ local job_id=$1
+ local num_of_tries=$2
+ #default num_of_tries to 10 if not present
+ num_of_tries=${num_of_tries:-10}
+ local i=0
+ local rc=1
+ local cmd="source ${oozie_conf_dir}/oozie-env.sh ; ${oozie_bin_dir}/oozie job -oozie ${OOZIE_SERVER} -info $job_id"
+ /var/lib/ambari-agent/ambari-sudo.sh su ${smoke_test_user} -s /bin/bash - -c "$cmd"
+ while [ $i -lt $num_of_tries ] ; do
+ cmd_output=`/var/lib/ambari-agent/ambari-sudo.sh su ${smoke_test_user} -s /bin/bash - -c "$cmd"`
+ (IFS='';echo $cmd_output)
+ act_status=$(IFS='';echo $cmd_output | grep ^Status | cut -d':' -f2 | sed 's| ||g')
+ echo "workflow_status=$act_status"
+ if [ "RUNNING" == "$act_status" ]; then
+ #increment the counter and get the status again after waiting for 15 secs
+ sleep 15
+ (( i++ ))
+ elif [ "SUCCEEDED" == "$act_status" ]; then
+ rc=0;
+ break;
+ else
+ rc=1
+ break;
+ fi
+ done
+ return $rc
+}
+
+export OOZIE_EXIT_CODE=0
+export OOZIE_SERVER=$oozie_server_url
+
+cd $oozie_examples_dir
+
+if [[ $security_enabled == "True" ]]; then
+ kinitcmd="${kinit_path_local} -kt ${smoke_user_keytab} ${smokeuser_principal}; "
+else
+ kinitcmd=""
+fi
+
+cmd="${kinitcmd}source ${oozie_conf_dir}/oozie-env.sh ; ${oozie_bin_dir}/oozie -Doozie.auth.token.cache=false job -oozie $OOZIE_SERVER -config $oozie_examples_dir/examples/apps/${job_name}/job.properties -run"
+echo $cmd
+job_info=`/var/lib/ambari-agent/ambari-sudo.sh su ${smoke_test_user} -s /bin/bash - -c "$cmd" | grep "job:"`
+job_id="`echo $job_info | cut -d':' -f2`"
+checkOozieJobStatus "$job_id" 15
+OOZIE_EXIT_CODE="$?"
+exit $OOZIE_EXIT_CODE
http://git-wip-us.apache.org/repos/asf/ambari/blob/df448220/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/OOZIE/package/files/prepareOozieHdfsDirectories.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/OOZIE/package/files/prepareOozieHdfsDirectories.sh b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/OOZIE/package/files/prepareOozieHdfsDirectories.sh
new file mode 100644
index 0000000..f2bee2d
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/OOZIE/package/files/prepareOozieHdfsDirectories.sh
@@ -0,0 +1,42 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+export oozie_conf_dir=$1
+export oozie_examples_dir=$2
+export hadoop_conf_dir=$3
+export JOBTRACKER=$4
+export NAMENODE=$5
+export QUEUE=$6
+export JOB_NAME=$7
+
+cd $oozie_examples_dir
+
+/var/lib/ambari-agent/ambari-sudo.sh tar -zxf oozie-examples.tar.gz
+/var/lib/ambari-agent/ambari-sudo.sh chmod -R o+rx examples
+
+/var/lib/ambari-agent/ambari-sudo.sh sed -i "s|nameNode=hdfs://localhost:8020|nameNode=$NAMENODE|g" examples/apps/$JOB_NAME/job.properties
+/var/lib/ambari-agent/ambari-sudo.sh sed -i "s|nameNode=hdfs://localhost:9000|nameNode=$NAMENODE|g" examples/apps/$JOB_NAME/job.properties
+/var/lib/ambari-agent/ambari-sudo.sh sed -i "s|jobTracker=localhost:8021|jobTracker=$JOBTRACKER|g" examples/apps/$JOB_NAME/job.properties
+/var/lib/ambari-agent/ambari-sudo.sh sed -i "s|jobTracker=localhost:9001|jobTracker=$JOBTRACKER|g" examples/apps/$JOB_NAME/job.properties
+/var/lib/ambari-agent/ambari-sudo.sh sed -i "s|jobTracker=localhost:8032|jobTracker=$JOBTRACKER|g" examples/apps/$JOB_NAME/job.properties
+/var/lib/ambari-agent/ambari-sudo.sh sed -i "s|queueName=default|queueName=$QUEUE|g" examples/apps/$JOB_NAME/job.properties
+/var/lib/ambari-agent/ambari-sudo.sh sed -i "s|oozie.wf.application.path=hdfs://localhost:9000|oozie.wf.application.path=$NAMENODE|g" examples/apps/$JOB_NAME/job.properties
http://git-wip-us.apache.org/repos/asf/ambari/blob/df448220/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/OOZIE/package/files/wrap_ooziedb.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/OOZIE/package/files/wrap_ooziedb.sh b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/OOZIE/package/files/wrap_ooziedb.sh
new file mode 100644
index 0000000..36576b5
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/OOZIE/package/files/wrap_ooziedb.sh
@@ -0,0 +1,31 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+OUT=`cd /var/tmp/oozie && /usr/lib/oozie/bin/ooziedb.sh "$@" 2>&1`
+EC=$?
+echo $OUT
+GRVAR=`echo ${OUT} | grep -o "java.lang.Exception: DB schema exists"`
+if [ ${EC} -ne 0 ] && [ -n "$GRVAR" ]
+then
+ exit 0
+else
+ exit $EC
+fi
http://git-wip-us.apache.org/repos/asf/ambari/blob/df448220/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/OOZIE/package/scripts/check_oozie_server_status.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/OOZIE/package/scripts/check_oozie_server_status.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/OOZIE/package/scripts/check_oozie_server_status.py
new file mode 100644
index 0000000..7c69779
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/OOZIE/package/scripts/check_oozie_server_status.py
@@ -0,0 +1,38 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
+from ambari_commons import OSConst
+
+
+@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
+def check_oozie_server_status():
+ import status_params
+ from resource_management.libraries.functions.windows_service_utils import check_windows_service_status
+
+ check_windows_service_status(status_params.oozie_server_win_service_name)
+
+
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def check_oozie_server_status():
+ import status_params
+ from resource_management.libraries.functions.check_process_status import check_process_status
+
+ check_process_status(status_params.pid_file)
+
http://git-wip-us.apache.org/repos/asf/ambari/blob/df448220/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/OOZIE/package/scripts/oozie.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/OOZIE/package/scripts/oozie.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/OOZIE/package/scripts/oozie.py
new file mode 100644
index 0000000..aa5bc30
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/OOZIE/package/scripts/oozie.py
@@ -0,0 +1,549 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+# Python Imports
+import os
+import re
+
+# Resource Management Imports
+from resource_management.core.resources.service import ServiceConfig
+from resource_management.core.resources.system import Directory, Execute, File
+from resource_management.core.source import DownloadSource
+from resource_management.core.source import InlineTemplate
+from resource_management.core.source import Template
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions.version import format_stack_version
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions.oozie_prepare_war import prepare_war
+from resource_management.libraries.functions.copy_tarball import get_current_version
+from resource_management.libraries.resources.xml_config import XmlConfig
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions.security_commons import update_credential_provider_path
+from resource_management.core.resources.packaging import Package
+from resource_management.core.shell import as_user, as_sudo, call
+from resource_management.core.exceptions import Fail
+
+from resource_management.libraries.functions.setup_atlas_hook import has_atlas_in_cluster, setup_atlas_hook
+from ambari_commons.constants import SERVICE, UPGRADE_TYPE_NON_ROLLING, UPGRADE_TYPE_ROLLING
+from resource_management.libraries.functions.constants import Direction
+
+from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
+from ambari_commons import OSConst
+from ambari_commons.inet_utils import download_file
+
+from resource_management.core import Logger
+
+@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
+def oozie(is_server=False, upgrade_type=None):
+ import params
+
+ from status_params import oozie_server_win_service_name
+
+ XmlConfig("oozie-site.xml",
+ conf_dir=params.oozie_conf_dir,
+ configurations=params.config['configurations']['oozie-site'],
+ owner=params.oozie_user,
+ mode='f',
+ configuration_attributes=params.config['configuration_attributes']['oozie-site']
+ )
+
+ File(os.path.join(params.oozie_conf_dir, "oozie-env.cmd"),
+ owner=params.oozie_user,
+ content=InlineTemplate(params.oozie_env_cmd_template)
+ )
+
+ Directory(params.oozie_tmp_dir,
+ owner=params.oozie_user,
+ create_parents = True,
+ )
+
+ if is_server:
+ # Manually overriding service logon user & password set by the installation package
+ ServiceConfig(oozie_server_win_service_name,
+ action="change_user",
+ username = params.oozie_user,
+ password = Script.get_password(params.oozie_user))
+
+ download_file(os.path.join(params.config['hostLevelParams']['jdk_location'], "sqljdbc4.jar"),
+ os.path.join(params.oozie_root, "extra_libs", "sqljdbc4.jar")
+ )
+ webapps_sqljdbc_path = os.path.join(params.oozie_home, "oozie-server", "webapps", "oozie", "WEB-INF", "lib", "sqljdbc4.jar")
+ if os.path.isfile(webapps_sqljdbc_path):
+ download_file(os.path.join(params.config['hostLevelParams']['jdk_location'], "sqljdbc4.jar"),
+ webapps_sqljdbc_path
+ )
+ download_file(os.path.join(params.config['hostLevelParams']['jdk_location'], "sqljdbc4.jar"),
+ os.path.join(params.oozie_home, "share", "lib", "oozie", "sqljdbc4.jar")
+ )
+ download_file(os.path.join(params.config['hostLevelParams']['jdk_location'], "sqljdbc4.jar"),
+ os.path.join(params.oozie_home, "temp", "WEB-INF", "lib", "sqljdbc4.jar")
+ )
+
+# TODO: see if see can remove this
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def oozie(is_server=False, upgrade_type=None):
+ import params
+
+ if is_server:
+ params.HdfsResource(params.oozie_hdfs_user_dir,
+ type="directory",
+ action="create_on_execute",
+ owner=params.oozie_user,
+ mode=params.oozie_hdfs_user_mode
+ )
+ params.HdfsResource(None, action="execute")
+ Directory(params.conf_dir,
+ create_parents = True,
+ owner = params.oozie_user,
+ group = params.user_group
+ )
+
+ params.oozie_site = update_credential_provider_path(params.oozie_site,
+ 'oozie-site',
+ os.path.join(params.conf_dir, 'oozie-site.jceks'),
+ params.oozie_user,
+ params.user_group
+ )
+
+ XmlConfig("oozie-site.xml",
+ conf_dir = params.conf_dir,
+ configurations = params.oozie_site,
+ configuration_attributes=params.config['configuration_attributes']['oozie-site'],
+ owner = params.oozie_user,
+ group = params.user_group,
+ mode = 0664
+ )
+ File(format("{conf_dir}/oozie-env.sh"),
+ owner=params.oozie_user,
+ content=InlineTemplate(params.oozie_env_sh_template),
+ group=params.user_group,
+ )
+
+ # On some OS this folder could be not exists, so we will create it before pushing there files
+ Directory(params.limits_conf_dir,
+ create_parents=True,
+ owner='root',
+ group='root'
+ )
+
+ File(os.path.join(params.limits_conf_dir, 'oozie.conf'),
+ owner='root',
+ group='root',
+ mode=0644,
+ content=Template("oozie.conf.j2")
+ )
+
+ if (params.log4j_props != None):
+ File(format("{params.conf_dir}/oozie-log4j.properties"),
+ mode=0644,
+ group=params.user_group,
+ owner=params.oozie_user,
+ content=InlineTemplate(params.log4j_props)
+ )
+ elif (os.path.exists(format("{params.conf_dir}/oozie-log4j.properties"))):
+ File(format("{params.conf_dir}/oozie-log4j.properties"),
+ mode=0644,
+ group=params.user_group,
+ owner=params.oozie_user
+ )
+
+ if params.stack_version_formatted and check_stack_feature(StackFeature.OOZIE_ADMIN_USER, params.stack_version_formatted):
+ File(format("{params.conf_dir}/adminusers.txt"),
+ mode=0644,
+ group=params.user_group,
+ owner=params.oozie_user,
+ content=Template('adminusers.txt.j2', oozie_admin_users=params.oozie_admin_users)
+ )
+ else:
+ File ( format("{params.conf_dir}/adminusers.txt"),
+ owner = params.oozie_user,
+ group = params.user_group
+ )
+
+ if params.jdbc_driver_name == "com.mysql.jdbc.Driver" or \
+ params.jdbc_driver_name == "com.microsoft.sqlserver.jdbc.SQLServerDriver" or \
+ params.jdbc_driver_name == "org.postgresql.Driver" or \
+ params.jdbc_driver_name == "oracle.jdbc.driver.OracleDriver":
+ File(format("/usr/lib/ambari-agent/{check_db_connection_jar_name}"),
+ content = DownloadSource(format("{jdk_location}{check_db_connection_jar_name}")),
+ )
+ pass
+
+ oozie_ownership()
+
+ if is_server:
+ oozie_server_specific(upgrade_type)
+
+def oozie_ownership():
+ import params
+
+ File ( format("{conf_dir}/hadoop-config.xml"),
+ owner = params.oozie_user,
+ group = params.user_group
+ )
+
+ File ( format("{conf_dir}/oozie-default.xml"),
+ owner = params.oozie_user,
+ group = params.user_group
+ )
+
+ Directory ( format("{conf_dir}/action-conf"),
+ owner = params.oozie_user,
+ group = params.user_group
+ )
+
+ File ( format("{conf_dir}/action-conf/hive.xml"),
+ owner = params.oozie_user,
+ group = params.user_group
+ )
+
+def get_oozie_ext_zip_source_paths(upgrade_type, params):
+ """
+ Get an ordered list of Oozie ext zip file paths from the source stack.
+ :param upgrade_type: Upgrade type will be None if not in the middle of a stack upgrade.
+ :param params: Expected to contain fields for ext_js_path, upgrade_direction, source_stack_name, and ext_js_file
+ :return: Source paths to use for Oozie extension zip file
+ """
+ # Default to /usr/share/$TARGETSTACK-oozie/ext-2.2.zip
+ paths = []
+ source_ext_js_path = params.ext_js_path
+ # Preferred location used by HDP and BigInsights 4.2.5
+ if upgrade_type is not None and params.upgrade_direction == Direction.UPGRADE:
+ source_ext_js_path = "/usr/share/" + params.source_stack_name.upper() + "-oozie/" + params.ext_js_file
+ paths.append(source_ext_js_path)
+
+ # Alternate location used by BigInsights 4.2.0 when migrating to another stack.
+ paths.append("/var/lib/oozie/" + params.ext_js_file)
+
+ return paths
+
+def oozie_server_specific(upgrade_type):
+ import params
+
+ no_op_test = as_user(format("ls {pid_file} >/dev/null 2>&1 && ps -p `cat {pid_file}` >/dev/null 2>&1"), user=params.oozie_user)
+
+ File(params.pid_file,
+ action="delete",
+ not_if=no_op_test
+ )
+
+ oozie_server_directories = [format("{oozie_home}/{oozie_tmp_dir}"), params.oozie_pid_dir, params.oozie_log_dir, params.oozie_tmp_dir, params.oozie_data_dir, params.oozie_lib_dir, params.oozie_webapps_dir, params.oozie_webapps_conf_dir, params.oozie_server_dir]
+ Directory( oozie_server_directories,
+ owner = params.oozie_user,
+ group = params.user_group,
+ mode = 0755,
+ create_parents = True,
+ cd_access="a",
+ )
+
+ Directory(params.oozie_libext_dir,
+ create_parents = True,
+ )
+
+ hashcode_file = format("{oozie_home}/.hashcode")
+ skip_recreate_sharelib = format("test -f {hashcode_file} && test -d {oozie_home}/share")
+
+ untar_sharelib = ('tar','-xvf',format('{oozie_home}/oozie-sharelib.tar.gz'),'-C',params.oozie_home)
+
+ Execute( untar_sharelib, # time-expensive
+ not_if = format("{no_op_test} || {skip_recreate_sharelib}"),
+ sudo = True,
+ )
+
+ configure_cmds = []
+ # Default to /usr/share/$TARGETSTACK-oozie/ext-2.2.zip as the first path
+ source_ext_zip_paths = get_oozie_ext_zip_source_paths(upgrade_type, params)
+
+ # Copy the first oozie ext-2.2.zip file that is found.
+ # This uses a list to handle the cases when migrating from some versions of BigInsights to HDP.
+ if source_ext_zip_paths is not None:
+ for source_ext_zip_path in source_ext_zip_paths:
+ if os.path.isfile(source_ext_zip_path):
+ configure_cmds.append(('cp', source_ext_zip_path, params.oozie_libext_dir))
+ configure_cmds.append(('chown', format('{oozie_user}:{user_group}'), format('{oozie_libext_dir}/{ext_js_file}')))
+
+ Execute(configure_cmds,
+ not_if=no_op_test,
+ sudo=True,
+ )
+ break
+
+
+ Directory(params.oozie_webapps_conf_dir,
+ owner = params.oozie_user,
+ group = params.user_group,
+ recursive_ownership = True,
+ recursion_follow_links = True,
+ )
+
+ # download the database JAR
+ download_database_library_if_needed()
+
+ #falcon el extension
+ if params.has_falcon_host:
+ Execute(format('{sudo} cp {falcon_home}/oozie/ext/falcon-oozie-el-extension-*.jar {oozie_libext_dir}'),
+ not_if = no_op_test)
+
+ Execute(format('{sudo} chown {oozie_user}:{user_group} {oozie_libext_dir}/falcon-oozie-el-extension-*.jar'),
+ not_if = no_op_test)
+
+ if params.lzo_enabled and len(params.all_lzo_packages) > 0:
+ Package(params.all_lzo_packages,
+ retry_on_repo_unavailability=params.agent_stack_retry_on_unavailability,
+ retry_count=params.agent_stack_retry_count)
+ Execute(format('{sudo} cp {hadoop_lib_home}/hadoop-lzo*.jar {oozie_lib_dir}'),
+ not_if = no_op_test,
+ )
+
+ prepare_war(params)
+
+ File(hashcode_file,
+ mode = 0644,
+ )
+
+ if params.stack_version_formatted and check_stack_feature(StackFeature.OOZIE_CREATE_HIVE_TEZ_CONFIGS, params.stack_version_formatted):
+ # Create hive-site and tez-site configs for oozie
+ Directory(params.hive_conf_dir,
+ create_parents = True,
+ owner = params.oozie_user,
+ group = params.user_group
+ )
+ if 'hive-site' in params.config['configurations']:
+ hive_site_config = update_credential_provider_path(params.config['configurations']['hive-site'],
+ 'hive-site',
+ os.path.join(params.hive_conf_dir, 'hive-site.jceks'),
+ params.oozie_user,
+ params.user_group
+ )
+ XmlConfig("hive-site.xml",
+ conf_dir=params.hive_conf_dir,
+ configurations=hive_site_config,
+ configuration_attributes=params.config['configuration_attributes']['hive-site'],
+ owner=params.oozie_user,
+ group=params.user_group,
+ mode=0644
+ )
+ if 'tez-site' in params.config['configurations']:
+ XmlConfig( "tez-site.xml",
+ conf_dir = params.hive_conf_dir,
+ configurations = params.config['configurations']['tez-site'],
+ configuration_attributes=params.config['configuration_attributes']['tez-site'],
+ owner = params.oozie_user,
+ group = params.user_group,
+ mode = 0664
+ )
+
+ # If Atlas is also installed, need to generate Atlas Hive hook (hive-atlas-application.properties file) in directory
+ # {stack_root}/{current_version}/atlas/hook/hive/
+ # Because this is a .properties file instead of an xml file, it will not be read automatically by Oozie.
+ # However, should still save the file on this host so that can upload it to the Oozie Sharelib in DFS.
+ if has_atlas_in_cluster():
+ atlas_hook_filepath = os.path.join(params.hive_conf_dir, params.atlas_hook_filename)
+ Logger.info("Has atlas in cluster, will save Atlas Hive hook into location %s" % str(atlas_hook_filepath))
+ setup_atlas_hook(SERVICE.HIVE, params.hive_atlas_application_properties, atlas_hook_filepath, params.oozie_user, params.user_group)
+
+ Directory(params.oozie_server_dir,
+ owner = params.oozie_user,
+ group = params.user_group,
+ recursive_ownership = True,
+ )
+ if params.security_enabled:
+ File(os.path.join(params.conf_dir, 'zkmigrator_jaas.conf'),
+ owner=params.oozie_user,
+ group=params.user_group,
+ content=Template("zkmigrator_jaas.conf.j2")
+ )
+
+def __parse_sharelib_from_output(output):
+ """
+ Return the parent directory of the first path from the output of the "oozie admin -shareliblist command $comp"
+ Output will match pattern like:
+
+ Potential errors
+ [Available ShareLib]
+ hive
+ hdfs://server:8020/user/oozie/share/lib/lib_20160811235630/hive/file1.jar
+ hdfs://server:8020/user/oozie/share/lib/lib_20160811235630/hive/file2.jar
+ """
+ if output is not None:
+ pattern = re.compile(r"\[Available ShareLib\]\n\S*?\n(.*share.*)", re.IGNORECASE)
+ m = pattern.search(output)
+ if m and len(m.groups()) == 1:
+ jar_path = m.group(1)
+ # Remove leading/trailing spaces and get the containing directory
+ sharelib_dir = os.path.dirname(jar_path.strip())
+ return sharelib_dir
+ return None
+
+def copy_atlas_hive_hook_to_dfs_share_lib(upgrade_type=None, upgrade_direction=None):
+ """
+ If the Atlas Hive Hook direcotry is present, Atlas is installed, and this is the first Oozie Server,
+ then copy the entire contents of that directory to the Oozie Sharelib in DFS, e.g.,
+ /usr/$stack/$current_version/atlas/hook/hive/ -> hdfs:///user/oozie/share/lib/lib_$timetamp/hive
+
+ :param upgrade_type: If in the middle of a stack upgrade, the type as UPGRADE_TYPE_ROLLING or UPGRADE_TYPE_NON_ROLLING
+ :param upgrade_direction: If in the middle of a stack upgrade, the direction as Direction.UPGRADE or Direction.DOWNGRADE.
+ """
+ import params
+
+ # Calculate the effective version since this code can also be called during EU/RU in the upgrade direction.
+ effective_version = params.stack_version_formatted if upgrade_type is None else format_stack_version(params.version)
+ if not check_stack_feature(StackFeature.ATLAS_HOOK_SUPPORT, effective_version):
+ return
+
+ # Important that oozie_server_hostnames is sorted by name so that this only runs on a single Oozie server.
+ if not (len(params.oozie_server_hostnames) > 0 and params.hostname == params.oozie_server_hostnames[0]):
+ Logger.debug("Will not attempt to copy Atlas Hive hook to DFS since this is not the first Oozie Server "
+ "sorted by hostname.")
+ return
+
+ if not has_atlas_in_cluster():
+ Logger.debug("Will not attempt to copy Atlas Hve hook to DFS since Atlas is not installed on the cluster.")
+ return
+
+ if upgrade_type is not None and upgrade_direction == Direction.DOWNGRADE:
+ Logger.debug("Will not attempt to copy Atlas Hve hook to DFS since in the middle of Rolling/Express upgrade "
+ "and performing a Downgrade.")
+ return
+
+ current_version = get_current_version()
+ atlas_hive_hook_dir = format("{stack_root}/{current_version}/atlas/hook/hive/")
+ if not os.path.exists(atlas_hive_hook_dir):
+ Logger.error(format("ERROR. Atlas is installed in cluster but this Oozie server doesn't "
+ "contain directory {atlas_hive_hook_dir}"))
+ return
+
+ atlas_hive_hook_impl_dir = os.path.join(atlas_hive_hook_dir, "atlas-hive-plugin-impl")
+
+ num_files = len([name for name in os.listdir(atlas_hive_hook_impl_dir) if os.path.exists(os.path.join(atlas_hive_hook_impl_dir, name))])
+ Logger.info("Found %d files/directories inside Atlas Hive hook impl directory %s"% (num_files, atlas_hive_hook_impl_dir))
+
+ # This can return over 100 files, so take the first 5 lines after "Available ShareLib"
+ # Use -oozie http(s):localhost:{oozie_server_admin_port}/oozie as oozie-env does not export OOZIE_URL
+ command = format(r'source {conf_dir}/oozie-env.sh ; oozie admin -oozie {oozie_base_url} -shareliblist hive | grep "\[Available ShareLib\]" -A 5')
+
+ try:
+ code, out = call(command, user=params.oozie_user, tries=10, try_sleep=5, logoutput=True)
+ if code == 0 and out is not None:
+ hive_sharelib_dir = __parse_sharelib_from_output(out)
+
+ if hive_sharelib_dir is None:
+ raise Fail("Could not parse Hive sharelib from output.")
+
+ Logger.info("Parsed Hive sharelib = %s and will attempt to copy/replace %d files to it from %s" %
+ (hive_sharelib_dir, num_files, atlas_hive_hook_impl_dir))
+
+ params.HdfsResource(hive_sharelib_dir,
+ type="directory",
+ action="create_on_execute",
+ source=atlas_hive_hook_impl_dir,
+ user=params.hdfs_user,
+ owner=params.oozie_user,
+ group=params.hdfs_user,
+ mode=0755,
+ recursive_chown=True,
+ recursive_chmod=True,
+ replace_existing_files=True
+ )
+
+ Logger.info("Copying Atlas Hive hook properties file to Oozie Sharelib in DFS.")
+ atlas_hook_filepath_source = os.path.join(params.hive_conf_dir, params.atlas_hook_filename)
+ atlas_hook_file_path_dest_in_dfs = os.path.join(hive_sharelib_dir, params.atlas_hook_filename)
+ params.HdfsResource(atlas_hook_file_path_dest_in_dfs,
+ type="file",
+ source=atlas_hook_filepath_source,
+ action="create_on_execute",
+ owner=params.oozie_user,
+ group=params.hdfs_user,
+ mode=0755,
+ replace_existing_files=True
+ )
+ params.HdfsResource(None, action="execute")
+
+ # Update the sharelib after making any changes
+ # Use -oozie http(s):localhost:{oozie_server_admin_port}/oozie as oozie-env does not export OOZIE_URL
+ command = format("source {conf_dir}/oozie-env.sh ; oozie admin -oozie {oozie_base_url} -sharelibupdate")
+ code, out = call(command, user=params.oozie_user, tries=5, try_sleep=5, logoutput=True)
+ if code == 0 and out is not None:
+ Logger.info("Successfully updated the Oozie ShareLib")
+ else:
+ raise Exception("Could not update the Oozie ShareLib after uploading the Atlas Hive hook directory to DFS. "
+ "Code: %s" % str(code))
+ else:
+ raise Exception("Code is non-zero or output is empty. Code: %s" % str(code))
+ except Fail, e:
+ Logger.error("Failed to get Hive sharelib directory in DFS. %s" % str(e))
+
+
+def download_database_library_if_needed(target_directory = None):
+ """
+ Downloads the library to use when connecting to the Oozie database, if
+ necessary. The library will be downloaded to 'params.target' unless
+ otherwise specified.
+ :param target_directory: the location where the database library will be
+ downloaded to.
+ :return:
+ """
+ import params
+ jdbc_drivers = ["com.mysql.jdbc.Driver",
+ "com.microsoft.sqlserver.jdbc.SQLServerDriver",
+ "oracle.jdbc.driver.OracleDriver","sap.jdbc4.sqlanywhere.IDriver"]
+
+ # check to see if the JDBC driver name is in the list of ones that need to
+ # be downloaded
+ if params.jdbc_driver_name not in jdbc_drivers or not params.jdbc_driver_jar:
+ return
+
+ if params.previous_jdbc_jar and os.path.isfile(params.previous_jdbc_jar):
+ File(params.previous_jdbc_jar, action='delete')
+
+ # if the target directory is not specified
+ if target_directory is None:
+ target_jar_with_directory = params.target
+ else:
+ # create the full path using the supplied target directory and the JDBC JAR
+ target_jar_with_directory = target_directory + os.path.sep + params.jdbc_driver_jar
+
+ if not os.path.exists(target_jar_with_directory):
+ File(params.downloaded_custom_connector,
+ content = DownloadSource(params.driver_curl_source))
+
+ if params.sqla_db_used:
+ untar_sqla_type2_driver = ('tar', '-xvf', params.downloaded_custom_connector, '-C', params.tmp_dir)
+
+ Execute(untar_sqla_type2_driver, sudo = True)
+
+ Execute(format("yes | {sudo} cp {jars_path_in_archive} {oozie_libext_dir}"))
+
+ Directory(params.jdbc_libs_dir,
+ create_parents = True)
+
+ Execute(format("yes | {sudo} cp {libs_path_in_archive} {jdbc_libs_dir}"))
+
+ Execute(format("{sudo} chown -R {oozie_user}:{user_group} {oozie_libext_dir}/*"))
+
+ else:
+ Execute(('cp', '--remove-destination', params.downloaded_custom_connector, target_jar_with_directory),
+ path=["/bin", "/usr/bin/"],
+ sudo = True)
+
+ File(target_jar_with_directory, owner = params.oozie_user,
+ group = params.user_group)
http://git-wip-us.apache.org/repos/asf/ambari/blob/df448220/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/OOZIE/package/scripts/oozie_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/OOZIE/package/scripts/oozie_client.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/OOZIE/package/scripts/oozie_client.py
new file mode 100644
index 0000000..9bbca9f
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/OOZIE/package/scripts/oozie_client.py
@@ -0,0 +1,76 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from oozie import oozie
+from oozie_service import oozie_service
+from resource_management.core.exceptions import ClientComponentHasNoStatus
+
+class OozieClient(Script):
+
+ def get_component_name(self):
+ return "oozie-client"
+
+ def install(self, env):
+ self.install_packages(env)
+ self.configure(env)
+
+
+ def configure(self, env):
+ import params
+ env.set_params(params)
+
+ oozie(is_server=False)
+
+ def status(self, env):
+ raise ClientComponentHasNoStatus()
+
+
+ def pre_upgrade_restart(self, env, upgrade_type=None):
+ import params
+ env.set_params(params)
+
+ # this function should not execute if the version can't be determined or
+ # the stack does not support rolling upgrade
+ if not (params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version)):
+ return
+
+ Logger.info("Executing Oozie Client Stack Upgrade pre-restart")
+ conf_select.select(params.stack_name, "oozie", params.version)
+ stack_select.select("oozie-client", params.version)
+
+ # We substitute some configs (oozie.authentication.kerberos.principal) before generation (see oozie.py and params.py).
+ # This function returns changed configs (it's used for config generation before config download)
+ def generate_configs_get_xml_file_content(self, filename, dictionary):
+ if dictionary == 'oozie-site':
+ import params
+ config = self.get_config()
+ return {'configurations': params.oozie_site,
+ 'configuration_attributes': config['configuration_attributes'][dictionary]}
+ else:
+ return super(OozieClient, self).generate_configs_get_xml_file_content(filename, dictionary)
+
+if __name__ == "__main__":
+ OozieClient().execute()
http://git-wip-us.apache.org/repos/asf/ambari/blob/df448220/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/OOZIE/package/scripts/oozie_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/OOZIE/package/scripts/oozie_server.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/OOZIE/package/scripts/oozie_server.py
new file mode 100644
index 0000000..b3a8643
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/OOZIE/package/scripts/oozie_server.py
@@ -0,0 +1,159 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management.core import Logger
+from resource_management.libraries.script import Script
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions import default
+from resource_management.libraries.functions.constants import Direction
+from resource_management.libraries.functions.security_commons import build_expectations
+from resource_management.libraries.functions.security_commons import cached_kinit_executor
+from resource_management.libraries.functions.security_commons import get_params_from_filesystem
+from resource_management.libraries.functions.security_commons import validate_security_config_properties
+from resource_management.libraries.functions.security_commons import FILE_TYPE_XML
+
+from ambari_commons import OSConst
+from ambari_commons.os_family_impl import OsFamilyImpl
+from ambari_commons.constants import UPGRADE_TYPE_NON_ROLLING, UPGRADE_TYPE_ROLLING
+
+from oozie import oozie
+from oozie_service import oozie_service
+from oozie_server_upgrade import OozieUpgrade
+
+from check_oozie_server_status import check_oozie_server_status
+from resource_management.core.resources.zkmigrator import ZkMigrator
+
+class OozieServer(Script):
+
+ def get_component_name(self):
+ return "oozie-server"
+
+ def install(self, env):
+ self.install_packages(env)
+
+ def configure(self, env, upgrade_type=None):
+ import params
+
+ # The configure command doesn't actually receive the upgrade_type from Script.py, so get it from the config dictionary
+ if upgrade_type is None:
+ upgrade_type = Script.get_upgrade_type(default("/commandParams/upgrade_type", ""))
+
+ if upgrade_type is not None and params.upgrade_direction == Direction.UPGRADE and params.version is not None:
+ Logger.info(format("Configuring Oozie during upgrade type: {upgrade_type}, direction: {params.upgrade_direction}, and version {params.version}"))
+ if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
+ # In order for the "<stack-root>/current/oozie-<client/server>" point to the new version of
+ # oozie, we need to create the symlinks both for server and client.
+ # This is required as both need to be pointing to new installed oozie version.
+
+ # Sets the symlink : eg: <stack-root>/current/oozie-client -> <stack-root>/a.b.c.d-<version>/oozie
+ stack_select.select("oozie-client", params.version)
+ # Sets the symlink : eg: <stack-root>/current/oozie-server -> <stack-root>/a.b.c.d-<version>/oozie
+ stack_select.select("oozie-server", params.version)
+
+ if params.version and check_stack_feature(StackFeature.CONFIG_VERSIONING, params.version):
+ conf_select.select(params.stack_name, "oozie", params.version)
+
+ env.set_params(params)
+ oozie(is_server=True, upgrade_type=upgrade_type)
+
+ def start(self, env, upgrade_type=None):
+ import params
+ env.set_params(params)
+
+ self.configure(env)
+
+ # preparing the WAR file must run after configure since configure writes out
+ # oozie-env.sh which is needed to have the right environment directories setup!
+ if upgrade_type is not None:
+ OozieUpgrade.prepare_warfile()
+
+ oozie_service(action='start', upgrade_type=upgrade_type)
+
+ def stop(self, env, upgrade_type=None):
+ import params
+ env.set_params(params)
+ oozie_service(action='stop', upgrade_type=upgrade_type)
+
+
+ def status(self, env):
+ import status_params
+ env.set_params(status_params)
+ check_oozie_server_status()
+
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class OozieServerDefault(OozieServer):
+
+ def pre_upgrade_restart(self, env, upgrade_type=None):
+ """
+ Performs the tasks that should be done before an upgrade of oozie. This includes:
+ - backing up configurations
+ - running <stack-selector-tool> and <conf-selector-tool>
+ - restoring configurations
+ - preparing the libext directory
+ :param env:
+ :return:
+ """
+ import params
+ env.set_params(params)
+
+ # this function should not execute if the version can't be determined or
+ # the stack does not support rolling upgrade
+ if not (params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version)):
+ return
+
+ Logger.info("Executing Oozie Server Stack Upgrade pre-restart")
+
+ if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
+ conf_select.select(params.stack_name, "oozie", params.version)
+ stack_select.select("oozie-server", params.version)
+
+ OozieUpgrade.prepare_libext_directory(upgrade_type=upgrade_type)
+
+ def disable_security(self, env):
+ import params
+ if not params.stack_supports_zk_security:
+ Logger.info("Stack doesn't support zookeeper security")
+ return
+ if not params.zk_connection_string:
+ Logger.info("No zookeeper connection string. Skipping reverting ACL")
+ return
+ zkmigrator = ZkMigrator(params.zk_connection_string, params.java_exec, params.java64_home, params.jaas_file, params.oozie_user)
+ zkmigrator.set_acls(params.zk_namespace if params.zk_namespace.startswith('/') else '/' + params.zk_namespace, 'world:anyone:crdwa')
+
+ def get_log_folder(self):
+ import params
+ return params.oozie_log_dir
+
+ def get_user(self):
+ import params
+ return params.oozie_user
+
+
+@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
+class OozieServerWindows(OozieServer):
+ pass
+
+if __name__ == "__main__":
+ OozieServer().execute()
http://git-wip-us.apache.org/repos/asf/ambari/blob/df448220/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/OOZIE/package/scripts/oozie_server_upgrade.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/OOZIE/package/scripts/oozie_server_upgrade.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/OOZIE/package/scripts/oozie_server_upgrade.py
new file mode 100644
index 0000000..eb57c22
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/OOZIE/package/scripts/oozie_server_upgrade.py
@@ -0,0 +1,247 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import glob
+import os
+import shutil
+
+from resource_management.core.logger import Logger
+from resource_management.core.exceptions import Fail
+from resource_management.core.resources.system import Execute
+from resource_management.core.resources.system import Directory
+from resource_management.core.resources.system import File
+from resource_management.libraries.functions import Direction
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions.oozie_prepare_war import prepare_war
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions.stack_features import check_stack_feature
+
+import oozie
+
+BACKUP_TEMP_DIR = "oozie-upgrade-backup"
+BACKUP_CONF_ARCHIVE = "oozie-conf-backup.tar"
+
+class OozieUpgrade(Script):
+
+ @staticmethod
+ def prepare_libext_directory(upgrade_type=None):
+ """
+ Performs the following actions on libext:
+ - creates <stack-root>/current/oozie/libext and recursively
+ - set 777 permissions on it and its parents.
+ - downloads JDBC driver JAR if needed
+ - copies Falcon JAR for the Oozie WAR if needed
+ """
+ import params
+
+ # some stack versions don't need the lzo compression libraries
+ target_version_needs_compression_libraries = params.version and check_stack_feature(StackFeature.LZO, params.version)
+
+ # ensure the directory exists
+ Directory(params.oozie_libext_dir, mode = 0777)
+
+ # get all hadooplzo* JAR files
+ # <stack-selector-tool> set hadoop-client has not run yet, therefore we cannot use
+ # <stack-root>/current/hadoop-client ; we must use params.version directly
+ # however, this only works when upgrading beyond 2.2.0.0; don't do this
+ # for downgrade to 2.2.0.0 since hadoop-lzo will not be present
+ # This can also be called during a Downgrade.
+ # When a version is Installed, it is responsible for downloading the hadoop-lzo packages
+ # if lzo is enabled.
+ if params.lzo_enabled and (params.upgrade_direction == Direction.UPGRADE or target_version_needs_compression_libraries):
+ hadoop_lzo_pattern = 'hadoop-lzo*.jar'
+ hadoop_client_new_lib_dir = format("{stack_root}/{version}/hadoop/lib")
+
+ files = glob.iglob(os.path.join(hadoop_client_new_lib_dir, hadoop_lzo_pattern))
+ if not files:
+ raise Fail("There are no files at {0} matching {1}".format(
+ hadoop_client_new_lib_dir, hadoop_lzo_pattern))
+
+ # copy files into libext
+ files_copied = False
+ for file in files:
+ if os.path.isfile(file):
+ Logger.info("Copying {0} to {1}".format(str(file), params.oozie_libext_dir))
+ shutil.copy2(file, params.oozie_libext_dir)
+ files_copied = True
+
+ if not files_copied:
+ raise Fail("There are no files at {0} matching {1}".format(
+ hadoop_client_new_lib_dir, hadoop_lzo_pattern))
+
+ # something like <stack-root>/current/oozie-server/libext/ext-2.2.zip
+ oozie_ext_zip_target_path = os.path.join(params.oozie_libext_dir, params.ext_js_file)
+
+ # Copy ext ZIP to libext dir
+ # Default to /usr/share/$TARGETSTACK-oozie/ext-2.2.zip as the first path
+ source_ext_zip_paths = oozie.get_oozie_ext_zip_source_paths(upgrade_type, params)
+
+ found_at_least_one_oozie_ext_file = False
+
+ # Copy the first oozie ext-2.2.zip file that is found.
+ # This uses a list to handle the cases when migrating from some versions of BigInsights to HDP.
+ if source_ext_zip_paths is not None:
+ for source_ext_zip_path in source_ext_zip_paths:
+ if os.path.isfile(source_ext_zip_path):
+ found_at_least_one_oozie_ext_file = True
+ Logger.info("Copying {0} to {1}".format(source_ext_zip_path, params.oozie_libext_dir))
+ Execute(("cp", source_ext_zip_path, params.oozie_libext_dir), sudo=True)
+ Execute(("chown", format("{oozie_user}:{user_group}"), oozie_ext_zip_target_path), sudo=True)
+ File(oozie_ext_zip_target_path,
+ mode=0644
+ )
+ break
+
+ if not found_at_least_one_oozie_ext_file:
+ raise Fail("Unable to find any Oozie source extension files from the following paths {0}".format(source_ext_zip_paths))
+
+ # Redownload jdbc driver to a new current location
+ oozie.download_database_library_if_needed()
+
+ # get the upgrade version in the event that it's needed
+ upgrade_stack = stack_select._get_upgrade_stack()
+ if upgrade_stack is None or len(upgrade_stack) < 2 or upgrade_stack[1] is None:
+ raise Fail("Unable to determine the stack that is being upgraded to or downgraded to.")
+
+ stack_version = upgrade_stack[1]
+
+ # copy the Falcon JAR if needed; falcon has not upgraded yet, so we must
+ # use the versioned falcon directory
+ if params.has_falcon_host:
+ versioned_falcon_jar_directory = "{0}/{1}/falcon/oozie/ext/falcon-oozie-el-extension-*.jar".format(params.stack_root, stack_version)
+ Logger.info("Copying {0} to {1}".format(versioned_falcon_jar_directory, params.oozie_libext_dir))
+
+ Execute(format('{sudo} cp {versioned_falcon_jar_directory} {oozie_libext_dir}'))
+ Execute(format('{sudo} chown {oozie_user}:{user_group} {oozie_libext_dir}/falcon-oozie-el-extension-*.jar'))
+
+
+ @staticmethod
+ def prepare_warfile():
+ """
+ Invokes the 'prepare-war' command in Oozie in order to create the WAR.
+ The prepare-war command uses the input WAR from ${OOZIE_HOME}/oozie.war and
+ outputs the prepared WAR to ${CATALINA_BASE}/webapps/oozie.war - because of this,
+ both of these environment variables must point to the upgraded oozie-server path and
+ not oozie-client since it was not yet updated.
+
+ This method will also perform a kinit if necessary.
+ :return:
+ """
+ import params
+
+ # get the kerberos token if necessary to execute commands as oozie
+ if params.security_enabled:
+ oozie_principal_with_host = params.oozie_principal.replace("_HOST", params.hostname)
+ command = format("{kinit_path_local} -kt {oozie_keytab} {oozie_principal_with_host}")
+ Execute(command, user=params.oozie_user, logoutput=True)
+
+ prepare_war(params)
+
+
+ def upgrade_oozie_database_and_sharelib(self, env):
+ """
+ Performs the creation and upload of the sharelib and the upgrade of the
+ database. This method will also perform a kinit if necessary.
+ It is run before the upgrade of oozie begins exactly once as part of the
+ upgrade orchestration.
+
+ Since this runs before the upgrade has occurred, it should not use any
+ "current" directories since they will still be pointing to the older
+ version of Oozie. Instead, it should use versioned directories to ensure
+ that the commands running are from the oozie version about to be upgraded to.
+ :return:
+ """
+ import params
+ env.set_params(params)
+
+ Logger.info("Will upgrade the Oozie database")
+
+ # get the kerberos token if necessary to execute commands as oozie
+ if params.security_enabled:
+ oozie_principal_with_host = params.oozie_principal.replace("_HOST", params.hostname)
+ command = format("{kinit_path_local} -kt {oozie_keytab} {oozie_principal_with_host}")
+ Execute(command, user=params.oozie_user, logoutput=True)
+
+ upgrade_stack = stack_select._get_upgrade_stack()
+ if upgrade_stack is None or len(upgrade_stack) < 2 or upgrade_stack[1] is None:
+ raise Fail("Unable to determine the stack that is being upgraded to or downgraded to.")
+
+ stack_version = upgrade_stack[1]
+
+ # upgrade oozie DB
+ Logger.info(format('Upgrading the Oozie database, using version {stack_version}'))
+
+ # the database upgrade requires the db driver JAR, but since we have
+ # not yet run <stack-selector-tool> to upgrade the current points, we have to use
+ # the versioned libext directory as the location[[-vufdtffr,
+ versioned_libext_dir = "{0}/{1}/oozie/libext".format(params.stack_root, stack_version)
+ oozie.download_database_library_if_needed(target_directory=versioned_libext_dir)
+
+ database_upgrade_command = "{0}/{1}/oozie/bin/ooziedb.sh upgrade -run".format(params.stack_root, stack_version)
+ Execute(database_upgrade_command, user=params.oozie_user, logoutput=True)
+
+ # install new sharelib to HDFS
+ self.create_sharelib(env)
+
+
+ def create_sharelib(self, env):
+ """
+ Performs the creation and upload of the sharelib.
+ This method will also perform a kinit if necessary.
+ It is run before the upgrade of oozie begins exactly once as part of the
+ upgrade orchestration.
+
+ Since this runs before the upgrade has occurred, it should not use any
+ "current" directories since they will still be pointing to the older
+ version of Oozie. Instead, it should use versioned directories to ensure
+ that the commands running are from the oozie version about to be upgraded to.
+ :param env:
+ :return:
+ """
+ import params
+ env.set_params(params)
+
+ Logger.info('Creating a new sharelib and uploading it to HDFS...')
+
+ # ensure the oozie directory exists for the sharelib
+ params.HdfsResource(format("{oozie_hdfs_user_dir}/share"),
+ action = "create_on_execute",
+ type = "directory",
+ owner = "oozie",
+ group = "hadoop",
+ mode = 0755,
+ recursive_chmod = True)
+
+ params.HdfsResource(None, action = "execute")
+
+ upgrade_stack = stack_select._get_upgrade_stack()
+ if upgrade_stack is None or upgrade_stack[1] is None:
+ raise Fail("Unable to determine the stack that is being upgraded to or downgraded to.")
+
+ stack_version = upgrade_stack[1]
+
+ # install new sharelib to HDFS
+ sharelib_command = "{0}/{1}/oozie/bin/oozie-setup.sh sharelib create -fs {2}".format(
+ params.stack_root, stack_version, params.fs_root)
+
+ Execute(sharelib_command, user=params.oozie_user, logoutput=True)
+
+if __name__ == "__main__":
+ OozieUpgrade().execute()
http://git-wip-us.apache.org/repos/asf/ambari/blob/df448220/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/OOZIE/package/scripts/oozie_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/OOZIE/package/scripts/oozie_service.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/OOZIE/package/scripts/oozie_service.py
new file mode 100644
index 0000000..7b15a63
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/OOZIE/package/scripts/oozie_service.py
@@ -0,0 +1,186 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+# Python Imports
+import os
+
+# Local Imports
+from oozie import copy_atlas_hive_hook_to_dfs_share_lib
+
+# Resource Managemente Imports
+from resource_management.core import shell, sudo
+from resource_management import *
+from resource_management.core.shell import as_user
+from resource_management.core.logger import Logger
+from resource_management.libraries.functions.show_logs import show_logs
+from resource_management.libraries.providers.hdfs_resource import WebHDFSUtil
+from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
+from ambari_commons import OSConst
+
+from resource_management.core import Logger
+
+@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
+def oozie_service(action='start', upgrade_type=None):
+ import params
+
+ if action == 'start':
+ cmd = format("cmd /C \"cd /d {oozie_tmp_dir} && {oozie_home}\\bin\\ooziedb.cmd create -sqlfile oozie.sql -run\"")
+ Execute(cmd, user=params.oozie_user, ignore_failures=True)
+ Service(params.oozie_server_win_service_name, action="start")
+ elif action == 'stop':
+ Service(params.oozie_server_win_service_name, action="stop")
+
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def oozie_service(action = 'start', upgrade_type=None):
+ """
+ Starts or stops the Oozie service
+ :param action: 'start' or 'stop'
+ :param upgrade_type: type of upgrade, either "rolling" or "non_rolling"
+ skipped since a variation of them was performed during the rolling upgrade
+ :return:
+ """
+ import params
+
+ environment={'OOZIE_CONFIG': params.conf_dir}
+
+ if params.security_enabled:
+ if params.oozie_principal is None:
+ oozie_principal_with_host = 'missing_principal'
+ else:
+ oozie_principal_with_host = params.oozie_principal.replace("_HOST", params.hostname)
+ kinit_if_needed = format("{kinit_path_local} -kt {oozie_keytab} {oozie_principal_with_host};")
+ else:
+ kinit_if_needed = ""
+
+ no_op_test = as_user(format("ls {pid_file} >/dev/null 2>&1 && ps -p `cat {pid_file}` >/dev/null 2>&1"), user=params.oozie_user)
+
+ if action == 'start':
+ start_cmd = format("cd {oozie_tmp_dir} && {oozie_home}/bin/oozie-start.sh")
+ path_to_jdbc = params.target
+
+ if params.jdbc_driver_name == "com.mysql.jdbc.Driver" or \
+ params.jdbc_driver_name == "com.microsoft.sqlserver.jdbc.SQLServerDriver" or \
+ params.jdbc_driver_name == "org.postgresql.Driver" or \
+ params.jdbc_driver_name == "oracle.jdbc.driver.OracleDriver":
+
+ if not params.jdbc_driver_jar:
+ path_to_jdbc = format("{oozie_libext_dir}/") + \
+ params.default_connectors_map[params.jdbc_driver_name] if params.jdbc_driver_name in params.default_connectors_map else None
+ if not os.path.isfile(path_to_jdbc):
+ path_to_jdbc = format("{oozie_libext_dir}/") + "*"
+ error_message = "Error! Sorry, but we can't find jdbc driver with default name " + params.default_connectors_map[params.jdbc_driver_name] + \
+ " in oozie lib dir. So, db connection check can fail. Please run 'ambari-server setup --jdbc-db={db_name} --jdbc-driver={path_to_jdbc} on server host.'"
+ Logger.error(error_message)
+
+ db_connection_check_command = format("{java_home}/bin/java -cp {check_db_connection_jar}:{path_to_jdbc} org.apache.ambari.server.DBConnectionVerification '{oozie_jdbc_connection_url}' {oozie_metastore_user_name} {oozie_metastore_user_passwd!p} {jdbc_driver_name}")
+ else:
+ db_connection_check_command = None
+
+ if upgrade_type is None:
+ if not os.path.isfile(path_to_jdbc) and params.jdbc_driver_name == "org.postgresql.Driver":
+ print format("ERROR: jdbc file {target} is unavailable. Please, follow next steps:\n" \
+ "1) Download postgresql-9.0-801.jdbc4.jar.\n2) Create needed directory: mkdir -p {oozie_home}/libserver/\n" \
+ "3) Copy postgresql-9.0-801.jdbc4.jar to newly created dir: cp /path/to/jdbc/postgresql-9.0-801.jdbc4.jar " \
+ "{oozie_home}/libserver/\n4) Copy postgresql-9.0-801.jdbc4.jar to libext: cp " \
+ "/path/to/jdbc/postgresql-9.0-801.jdbc4.jar {oozie_home}/libext/\n")
+ exit(1)
+
+ if db_connection_check_command:
+ sudo.chmod(params.check_db_connection_jar, 0755)
+ Execute( db_connection_check_command,
+ tries=5,
+ try_sleep=10,
+ user=params.oozie_user,
+ )
+
+ Execute( format("cd {oozie_tmp_dir} && {oozie_home}/bin/ooziedb.sh create -sqlfile oozie.sql -run"),
+ user = params.oozie_user, not_if = no_op_test,
+ ignore_failures = True
+ )
+
+ if params.security_enabled:
+ Execute(kinit_if_needed,
+ user = params.oozie_user,
+ )
+
+ if params.sysprep_skip_copy_oozie_share_lib_to_hdfs:
+ Logger.info("Skipping creation of oozie sharelib as host is sys prepped")
+ # Copy current hive-site to hdfs:/user/oozie/share/lib/spark/
+ params.HdfsResource(format("{hdfs_share_dir}/lib/spark/hive-site.xml"),
+ action="create_on_execute",
+ type = 'file',
+ mode=0444,
+ owner=params.oozie_user,
+ group=params.user_group,
+ source=format("{hive_conf_dir}/hive-site.xml"),
+ )
+ params.HdfsResource(None, action="execute")
+
+ hdfs_share_dir_exists = True # skip time-expensive hadoop fs -ls check
+ elif WebHDFSUtil.is_webhdfs_available(params.is_webhdfs_enabled, params.default_fs):
+ # check with webhdfs is much faster than executing hadoop fs -ls.
+ util = WebHDFSUtil(params.hdfs_site, params.oozie_user, params.security_enabled)
+ list_status = util.run_command(params.hdfs_share_dir, 'GETFILESTATUS', method='GET', ignore_status_codes=['404'], assertable_result=False)
+ hdfs_share_dir_exists = ('FileStatus' in list_status)
+ else:
+ # have to do time expensive hadoop fs -ls check.
+ hdfs_share_dir_exists = shell.call(format("{kinit_if_needed} hadoop --config {hadoop_conf_dir} dfs -ls {hdfs_share_dir} | awk 'BEGIN {{count=0;}} /share/ {{count++}} END {{if (count > 0) {{exit 0}} else {{exit 1}}}}'"),
+ user=params.oozie_user)[0]
+
+ if not hdfs_share_dir_exists:
+ Execute( params.put_shared_lib_to_hdfs_cmd,
+ user = params.oozie_user,
+ path = params.execute_path
+ )
+ params.HdfsResource(format("{oozie_hdfs_user_dir}/share"),
+ type="directory",
+ action="create_on_execute",
+ mode=0755,
+ recursive_chmod=True,
+ )
+ params.HdfsResource(None, action="execute")
+
+
+ try:
+ # start oozie
+ Execute( start_cmd, environment=environment, user = params.oozie_user,
+ not_if = no_op_test )
+
+ copy_atlas_hive_hook_to_dfs_share_lib(upgrade_type, params.upgrade_direction)
+ except:
+ show_logs(params.oozie_log_dir, params.oozie_user)
+ raise
+
+ elif action == 'stop':
+ Directory(params.oozie_tmp_dir,
+ owner=params.oozie_user,
+ create_parents = True,
+ )
+
+ stop_cmd = format("cd {oozie_tmp_dir} && {oozie_home}/bin/oozied.sh stop 60 -force")
+
+ try:
+ # stop oozie
+ Execute(stop_cmd, environment=environment, only_if = no_op_test,
+ user = params.oozie_user)
+ except:
+ show_logs(params.oozie_log_dir, params.oozie_user)
+ raise
+
+ File(params.pid_file, action = "delete")
http://git-wip-us.apache.org/repos/asf/ambari/blob/df448220/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/OOZIE/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/OOZIE/package/scripts/params.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/OOZIE/package/scripts/params.py
new file mode 100644
index 0000000..f39d632
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/OOZIE/package/scripts/params.py
@@ -0,0 +1,39 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from ambari_commons import OSCheck
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions.expect import expect
+from resource_management.libraries.functions.copy_tarball import get_sysprep_skip_copy_tarballs_hdfs
+
+if OSCheck.is_windows_family():
+ from params_windows import *
+else:
+ from params_linux import *
+
+java_home = config['hostLevelParams']['java_home']
+java_version = expect("/hostLevelParams/java_version", int)
+
+
+host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
+
+# By default, copy the tarballs to HDFS. If the cluster is sysprepped, then set based on the config.
+sysprep_skip_copy_oozie_share_lib_to_hdfs = False
+if host_sys_prepped:
+ sysprep_skip_copy_oozie_share_lib_to_hdfs = default("/configurations/cluster-env/sysprep_skip_copy_oozie_share_lib_to_hdfs", False)
[34/50] [abbrv] ambari git commit: AMBARI-21770 - Spark1 Shuffle
Property Is Removed Incorrectly on a Stack Upgrade (jonathanhurley)
Posted by ja...@apache.org.
AMBARI-21770 - Spark1 Shuffle Property Is Removed Incorrectly on a Stack Upgrade (jonathanhurley)
(cherry picked from commit 24debeed14654249055077ad5483dc45d127a074)
Change-Id: Ic475ed2895a14740b576aeaf801f320320017bfd
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/a0b3f4fc
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/a0b3f4fc
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/a0b3f4fc
Branch: refs/heads/2.5-maint
Commit: a0b3f4fc738aa28e2ac39a175da4e53e31208b3f
Parents: 2054332
Author: Jonathan Hurley <jh...@hortonworks.com>
Authored: Mon Aug 21 16:09:57 2017 -0400
Committer: Sumit Mohanty <sm...@hortonworks.com>
Committed: Mon Aug 21 19:19:29 2017 -0700
----------------------------------------------------------------------
.../stacks/BigInsights/4.2.5/upgrades/config-upgrade.xml | 7 ++++++-
.../stacks/BigInsights/4.2/upgrades/config-upgrade.xml | 6 +++++-
2 files changed, 11 insertions(+), 2 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/a0b3f4fc/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/config-upgrade.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/config-upgrade.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/config-upgrade.xml
index f55f9fb..f90a20e 100644
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/config-upgrade.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/config-upgrade.xml
@@ -76,7 +76,12 @@
<changes>
<definition xsi:type="configure" id="biginsights_4_2_yarn_config_update" summary="Update Yarn configurations">
<type>yarn-site</type>
- <replace key="yarn.nodemanager.aux-services" find=",spark_shuffle" replace-with=""/>
+ <!-- adjust the spark shuffle values for spark1 and spark2 -->
+ <set key="yarn.nodemanager.aux-services" value="mapreduce_shuffle,spark_shuffle,spark2_shuffle"/>
+ <set key="yarn.nodemanager.aux-services.spark_shuffle.class" value="org.apache.spark.network.yarn.YarnShuffleService"/>
+ <set key="yarn.nodemanager.aux-services.spark_shuffle.classpath" value="{{stack_root}}/${hdp.version}/spark/aux/*"/>
+ <set key="yarn.nodemanager.aux-services.spark2_shuffle.class" value="org.apache.spark.network.yarn.YarnShuffleService"/>
+ <set key="yarn.nodemanager.aux-services.spark2_shuffle.classpath" value="{{stack_root}}/${hdp.version}/spark2/aux/*"/>
</definition>
<definition xsi:type="configure" id="yarn_env_security_opts" summary="Adding YARN Security ACLs">
<type>yarn-env</type>
http://git-wip-us.apache.org/repos/asf/ambari/blob/a0b3f4fc/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/config-upgrade.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/config-upgrade.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/config-upgrade.xml
index 02d3a01..d85c7ee 100644
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/config-upgrade.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/config-upgrade.xml
@@ -164,7 +164,11 @@
<changes>
<definition xsi:type="configure" id="biginsights_4_2_yarn_config_update" summary="Update Yarn configurations">
<type>yarn-site</type>
- <replace key="yarn.nodemanager.aux-services" find=",spark_shuffle" replace-with=""/>
+ <!-- adjust the spark shuffle values for spark1 based on whether spark is installed -->
+ <set key="yarn.nodemanager.aux-services" value="mapreduce_shuffle,spark_shuffle" if-type="spark-env" if-key="content" if-key-state="present"/>
+ <set key="yarn.nodemanager.aux-services.spark_shuffle.class" value="org.apache.spark.network.yarn.YarnShuffleService" if-type="spark-env" if-key="content" if-key-state="present"/>
+ <set key="yarn.nodemanager.aux-services.spark_shuffle.classpath" value="{{stack_root}}/${hdp.version}/spark/aux/*" if-type="spark-env" if-key="content" if-key-state="present"/>
+
<insert key="yarn.application.classpath" insert-type="append" newline-after="false" newline-before="false" value=",/usr/hdp/current/ext/hadoop/*"/>
</definition>
<definition xsi:type="configure" id="yarn_env_security_opts" summary="Adding YARN Security ACLs">
[20/50] [abbrv] ambari git commit: AMBARI-21488 Default Base URL
should be there for OS_TYPE=redhat-ppc6 in IBM Power and change the OS _TYPE
to redhat7-ppc64, second patch. (atkach)
Posted by ja...@apache.org.
AMBARI-21488 Default Base URL should be there for OS_TYPE=redhat-ppc6 in IBM Power and change the OS _TYPE to redhat7-ppc64, second patch. (atkach)
(cherry picked from commit 06661308259ae08ff21f33d4dd1e2e81ab9e74e3)
Change-Id: Ib6981e677ba276bf777e95d4b45b43cb1a1acded
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/5e1bfb7a
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/5e1bfb7a
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/5e1bfb7a
Branch: refs/heads/2.5-maint
Commit: 5e1bfb7a6f4fcc216d4931960a4cb9f013001415
Parents: 6d2bab1
Author: Andrii Tkach <at...@apache.org>
Authored: Thu Aug 17 16:14:31 2017 +0300
Committer: atkach@apache.org <je...@hortonworks.com>
Committed: Thu Aug 17 14:51:10 2017 +0000
----------------------------------------------------------------------
ambari-web/app/controllers/installer.js | 3 ---
1 file changed, 3 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/5e1bfb7a/ambari-web/app/controllers/installer.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/installer.js b/ambari-web/app/controllers/installer.js
index 77d5a5c..ccf03de 100644
--- a/ambari-web/app/controllers/installer.js
+++ b/ambari-web/app/controllers/installer.js
@@ -702,9 +702,6 @@ App.InstallerController = App.WizardController.extend(App.UserPref, {
response.operating_systems.forEach(function(supportedOS) {
if(!existedMap[supportedOS.OperatingSystems.os_type]) {
supportedOS.isSelected = false;
- supportedOS.repositories.forEach(function(repo) {
- repo.Repositories.base_url = '';
- });
existedOS.push(supportedOS);
} else if (stack_default) { // only overwrite if it is stack default, otherwise use url from /version_definition
existedMap[supportedOS.OperatingSystems.os_type].repositories.forEach(function (repo) {
[23/50] [abbrv] ambari git commit: AMBARI-21751. Fix regression
caused by AMBARI-21045. (yusaku)
Posted by ja...@apache.org.
AMBARI-21751. Fix regression caused by AMBARI-21045. (yusaku)
(cherry picked from commit da9ea1cfb3efb7fd538acbfd8f38f0a8a2c580df)
Change-Id: I72fc722312ff6ae3bbc305051ccf9a7a0ae96efe
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/99a0714f
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/99a0714f
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/99a0714f
Branch: refs/heads/2.5-maint
Commit: 99a0714f0020e6586c83da17242c7c2e1d3ac736
Parents: c0648e1
Author: Yusaku Sako <yu...@hortonworks.com>
Authored: Fri Aug 18 02:54:36 2017 -0700
Committer: yusaku@hortonworks.com <je...@hortonworks.com>
Committed: Fri Aug 18 10:36:18 2017 +0000
----------------------------------------------------------------------
.../stacks/HDP/2.6/services/stack_advisor.py | 31 --------------------
1 file changed, 31 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/99a0714f/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py
index 974b10c..577e4f5 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py
@@ -42,7 +42,6 @@ class HDP26StackAdvisor(HDP25StackAdvisor):
"HBASE": self.recommendHBASEConfigurations,
"YARN": self.recommendYARNConfigurations,
"KAFKA": self.recommendKAFKAConfigurations,
- "BEACON": self.recommendBEACONConfigurations,
"STORM": self.recommendSTORMConfigurations
}
parentRecommendConfDict.update(childRecommendConfDict)
@@ -96,36 +95,6 @@ class HDP26StackAdvisor(HDP25StackAdvisor):
putStormSiteProperty("nimbus.credential.renewers.classes", "['org.apache.storm.hdfs.security.AutoHDFS', 'org.apache.storm.hbase.security.AutoHBase', 'org.apache.storm.hive.security.AutoHive']")
putStormSiteProperty("nimbus.credential.renewers.freq.secs", "82800")
pass
-
- def recommendBEACONConfigurations(self, configurations, clusterData, services, hosts):
- beaconEnvProperties = self.getSiteProperties(services['configurations'], 'beacon-env')
- putbeaconEnvProperty = self.putProperty(configurations, "beacon-env", services)
-
- # database URL and driver class recommendations
- if beaconEnvProperties and self.checkSiteProperties(beaconEnvProperties, 'beacon_store_driver') and self.checkSiteProperties(beaconEnvProperties, 'beacon_database'):
- putbeaconEnvProperty('beacon_store_driver', self.getDBDriver(beaconEnvProperties['beacon_database']))
- if beaconEnvProperties and self.checkSiteProperties(beaconEnvProperties, 'beacon_store_db_name', 'beacon_store_url') and self.checkSiteProperties(beaconEnvProperties, 'beacon_database'):
- beaconServerHost = self.getHostWithComponent('BEACON', 'BEACON_SERVER', services, hosts)
- beaconDBConnectionURL = beaconEnvProperties['beacon_store_url']
- protocol = self.getProtocol(beaconEnvProperties['beacon_database'])
- oldSchemaName = getOldValue(self, services, "beacon-env", "beacon_store_db_name")
- oldDBType = getOldValue(self, services, "beacon-env", "beacon_database")
- # under these if constructions we are checking if beacon server hostname available,
- # if it's default db connection url with "localhost" or if schema name was changed or if db type was changed (only for db type change from default mysql to existing mysql)
- # or if protocol according to current db type differs with protocol in db connection url(other db types changes)
- if beaconServerHost is not None:
- if (beaconDBConnectionURL and "//localhost" in beaconDBConnectionURL) or oldSchemaName or oldDBType or (protocol and beaconDBConnectionURL and not beaconDBConnectionURL.startswith(protocol)):
- dbConnection = self.getDBConnectionStringBeacon(beaconEnvProperties['beacon_database']).format(beaconServerHost['Hosts']['host_name'], beaconEnvProperties['beacon_store_db_name'])
- putbeaconEnvProperty('beacon_store_url', dbConnection)
-
- def getDBConnectionStringBeacon(self, databaseType):
- driverDict = {
- 'NEW DERBY DATABASE': 'jdbc:derby:${{beacon.data.dir}}/${{beacon.store.db.name}}-db;create=true',
- 'EXISTING MYSQL DATABASE': 'jdbc:mysql://{0}/{1}',
- 'EXISTING MYSQL / MARIADB DATABASE': 'jdbc:mysql://{0}/{1}',
- 'EXISTING ORACLE DATABASE': 'jdbc:oracle:thin:@//{0}:1521/{1}'
- }
- return driverDict.get(databaseType.upper())
def recommendAtlasConfigurations(self, configurations, clusterData, services, hosts):
super(HDP26StackAdvisor, self).recommendAtlasConfigurations(configurations, clusterData, services, hosts)
[14/50] [abbrv] ambari git commit: AMBARI-21724 Hosts filters work
incorrectly. (atkach)
Posted by ja...@apache.org.
AMBARI-21724 Hosts filters work incorrectly. (atkach)
(cherry picked from commit 52daee8fa02b071383083884305bcc10564bb4d4)
Change-Id: I75d812edec08b25c4f61a0672becb8451cd38f4e
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/fd3379f2
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/fd3379f2
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/fd3379f2
Branch: refs/heads/2.5-maint
Commit: fd3379f2fd92164c53b539cfc5a7770c1898e16a
Parents: 4f101bd
Author: Andrii Tkach <at...@apache.org>
Authored: Wed Aug 16 14:49:20 2017 +0300
Committer: atkach@apache.org <je...@hortonworks.com>
Committed: Wed Aug 16 13:06:36 2017 +0000
----------------------------------------------------------------------
ambari-web/app/mappers/hosts_mapper.js | 2 ++
ambari-web/app/models/host.js | 2 ++
ambari-web/app/views/main/host.js | 10 ++++++++++
3 files changed, 14 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/fd3379f2/ambari-web/app/mappers/hosts_mapper.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/mappers/hosts_mapper.js b/ambari-web/app/mappers/hosts_mapper.js
index 5042e67..a2d64bf 100644
--- a/ambari-web/app/mappers/hosts_mapper.js
+++ b/ambari-web/app/mappers/hosts_mapper.js
@@ -206,6 +206,7 @@ App.hostsMapper = App.QuickDataMapper.create({
parsedItem.not_started_components = notStartedComponents;
parsedItem.components_in_passive_state = componentsInPassiveState;
parsedItem.components_with_stale_configs = componentsWithStaleConfigs;
+ parsedItem.is_filtered = true;
hostIds[item.Hosts.host_name] = parsedItem;
@@ -224,6 +225,7 @@ App.hostsMapper = App.QuickDataMapper.create({
//"itemTotal" present only for Hosts page request
if (!Em.isNone(json.itemTotal)) {
+ App.Host.find().setEach('isFiltered', false);
App.Host.find().clear();
//App.HostComponent.find contains master components which requested across the app hence it should not be cleared
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/fd3379f2/ambari-web/app/models/host.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/models/host.js b/ambari-web/app/models/host.js
index 58ffd41..69e43ba 100644
--- a/ambari-web/app/models/host.js
+++ b/ambari-web/app/models/host.js
@@ -55,6 +55,8 @@ App.Host = DS.Model.extend({
index: DS.attr('number'),
stackVersions: DS.hasMany('App.HostStackVersion'),
+ isFiltered: DS.attr('boolean'),
+
/**
* Is host checked at the main Hosts page
*/
http://git-wip-us.apache.org/repos/asf/ambari/blob/fd3379f2/ambari-web/app/views/main/host.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/main/host.js b/ambari-web/app/views/main/host.js
index 52692cc..273fd10 100644
--- a/ambari-web/app/views/main/host.js
+++ b/ambari-web/app/views/main/host.js
@@ -50,6 +50,16 @@ App.MainHostView = App.TableView.extend(App.TableServerViewMixin, {
*/
contentBinding: 'controller.content',
+ filteredContent: [],
+
+ filteredContentObserver: function() {
+ Em.run.once(this, this.setFilteredContentOnce);
+ }.observes('content.@each'),
+
+ setFilteredContentOnce: function() {
+ this.set('filteredContent', this.get('content').filterProperty('isFiltered'));
+ },
+
onRequestErrorHandler: function() {
this.set('requestError', null);
this.set('filteringComplete', true);
[46/50] [abbrv] ambari git commit: AMBARI-21823 - Spark2 Shuffle
Configuration Property Not Configure After Upgrade From BigInsights
(jonathanhurley)
Posted by ja...@apache.org.
AMBARI-21823 - Spark2 Shuffle Configuration Property Not Configure After Upgrade From BigInsights (jonathanhurley)
Change-Id: Ifcada9f5e1a001977db15d91eb9ffa90450c07f8
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/82a821e3
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/82a821e3
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/82a821e3
Branch: refs/heads/2.5-maint
Commit: 82a821e3d87b5a0956f7fb76699e14c445024dcb
Parents: f2f9afbc
Author: Jonathan Hurley <jh...@hortonworks.com>
Authored: Sat Aug 26 23:02:10 2017 -0400
Committer: Jonathan Hurley <jh...@hortonworks.com>
Committed: Sat Aug 26 23:02:10 2017 -0400
----------------------------------------------------------------------
.../BigInsights/4.2/upgrades/config-upgrade.xml | 19 ++++++++++---------
1 file changed, 10 insertions(+), 9 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/82a821e3/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/config-upgrade.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/config-upgrade.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/config-upgrade.xml
index d85c7ee..cdc9c40 100644
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/config-upgrade.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/config-upgrade.xml
@@ -164,11 +164,12 @@
<changes>
<definition xsi:type="configure" id="biginsights_4_2_yarn_config_update" summary="Update Yarn configurations">
<type>yarn-site</type>
- <!-- adjust the spark shuffle values for spark1 based on whether spark is installed -->
- <set key="yarn.nodemanager.aux-services" value="mapreduce_shuffle,spark_shuffle" if-type="spark-env" if-key="content" if-key-state="present"/>
+ <!-- adjust the spark shuffle values for spark1 and spark2 -->
+ <set key="yarn.nodemanager.aux-services" value="mapreduce_shuffle,spark_shuffle,spark2_shuffle" if-type="spark-env" if-key="content" if-key-state="present"/>
<set key="yarn.nodemanager.aux-services.spark_shuffle.class" value="org.apache.spark.network.yarn.YarnShuffleService" if-type="spark-env" if-key="content" if-key-state="present"/>
<set key="yarn.nodemanager.aux-services.spark_shuffle.classpath" value="{{stack_root}}/${hdp.version}/spark/aux/*" if-type="spark-env" if-key="content" if-key-state="present"/>
-
+ <set key="yarn.nodemanager.aux-services.spark2_shuffle.class" value="org.apache.spark.network.yarn.YarnShuffleService" if-type="spark-env" if-key="content" if-key-state="present"/>
+ <set key="yarn.nodemanager.aux-services.spark2_shuffle.classpath" value="{{stack_root}}/${hdp.version}/spark2/aux/*" if-type="spark-env" if-key="content" if-key-state="present"/>
<insert key="yarn.application.classpath" insert-type="append" newline-after="false" newline-before="false" value=",/usr/hdp/current/ext/hadoop/*"/>
</definition>
<definition xsi:type="configure" id="yarn_env_security_opts" summary="Adding YARN Security ACLs">
@@ -201,7 +202,7 @@
</changes>
</component>
</service>
-
+
<service name="PIG">
<component name="PIG">
<changes>
@@ -287,7 +288,7 @@
<transfer operation="delete" delete-key="hive.server2.authentication.kerberos.keytab" if-key="hive.server2.authentication" if-type="hive-site" if-value="custom"/>
<transfer operation="delete" delete-key="hive.server2.authentication.kerberos.principal" if-key="hive.server2.authentication" if-type="hive-site" if-value="custom"/>
</definition>
-
+
<definition xsi:type="configure" id="biginsights_4_2_hive_env_configure" summary="Configuring hive-env for MariaDB/RedHat7 support">
<type>hive-env</type>
<set key="mariadb_redhat_support" value="true"/>
@@ -322,14 +323,14 @@
</definition>
</changes>
</component>
-
+
<component name="WEBHCAT_SERVER">
<changes>
<definition xsi:type="configure" id="biginsights_4_2_webhcat_server_update_environment_configurations" summary="Update Hadoop home">
<type>webhcat-env</type>
<replace key="content" find="export HADOOP_HOME={{hadoop_home}}" replace-with="export HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}" />
</definition>
-
+
<definition xsi:type="configure" id="biginsights_4_2_webhcat_server_update_configurations" summary="Updating Configuration Paths">
<type>webhcat-site</type>
<replace key="templeton.jar" find="/usr/iop/current/hive-webhcat" replace-with="/usr/hdp/${hdp.version}/hive"/>
@@ -340,7 +341,7 @@
</changes>
</component>
</service>
-
+
<service name="OOZIE">
<component name="OOZIE_SERVER">
<changes>
@@ -350,7 +351,7 @@
</definition>
<definition xsi:type="configure" id="biginsights_4_2_oozie_server_update_environment_configurations" summary="Update oozie env">
<type>oozie-env</type>
- <replace key="content" find="export CATALINA_BASE=${CATALINA_BASE:-{{oozie_server_dir}}}" replace-with="export CATALINA_BASE={{oozie_server_dir}}" />
+ <replace key="content" find="export CATALINA_BASE=${CATALINA_BASE:-{{oozie_server_dir}}}" replace-with="export CATALINA_BASE={{oozie_server_dir}}" />
<replace key="content" find="export CATALINA_OPTS="$CATALINA_OPTS -Xms{{oozie_initial_heapsize}}" replace-with="export CATALINA_OPTS="$CATALINA_OPTS"/>
</definition>
<definition xsi:type="configure" id="biginsights_4_2_oozie_server_update_environment_tomcat" summary="Update oozie env">
[32/50] [abbrv] ambari git commit: AMBARI-21765. Missing hdfs_scheme
variable in params.py file for Apache Knox (Sandeep More via smohanty)
Posted by ja...@apache.org.
AMBARI-21765. Missing hdfs_scheme variable in params.py file for Apache Knox (Sandeep More via smohanty)
(cherry picked from commit ca6ce335d7bd3d50e5892a3169d90acbdada963d)
Change-Id: I60d15f356f462123df9ace2b4244e16d0d196e89
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/688258b0
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/688258b0
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/688258b0
Branch: refs/heads/2.5-maint
Commit: 688258b09ebf13ae5982ebda5ca364110b817fc0
Parents: 9277090
Author: Sumit Mohanty <sm...@hortonworks.com>
Authored: Mon Aug 21 14:00:40 2017 -0700
Committer: Sumit Mohanty <sm...@hortonworks.com>
Committed: Mon Aug 21 17:09:29 2017 -0700
----------------------------------------------------------------------
.../0.5.0.2.2/package/scripts/params_linux.py | 27 ++++++++++++++++++++
1 file changed, 27 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/688258b0/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/params_linux.py
index 9b0bbfc..f54abef 100644
--- a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/params_linux.py
@@ -157,17 +157,30 @@ else:
has_namenode = not namenode_host == None
namenode_http_port = "50070"
+namenode_https_port = "50470"
namenode_rpc_port = "8020"
if has_namenode:
if 'dfs.namenode.http-address' in config['configurations']['hdfs-site']:
namenode_http_port = get_port_from_url(config['configurations']['hdfs-site']['dfs.namenode.http-address'])
+ if 'dfs.namenode.https-address' in config['configurations']['hdfs-site']:
+ namenode_https_port = get_port_from_url(config['configurations']['hdfs-site']['dfs.namenode.https-address'])
if dfs_ha_enabled and namenode_rpc:
namenode_rpc_port = get_port_from_url(namenode_rpc)
else:
if 'dfs.namenode.rpc-address' in config['configurations']['hdfs-site']:
namenode_rpc_port = get_port_from_url(config['configurations']['hdfs-site']['dfs.namenode.rpc-address'])
+dfs_http_policy = default('/configurations/hdfs-site/dfs.http.policy', None)
+
+hdfs_https_on = False
+hdfs_scheme = 'http'
+if dfs_http_policy != None :
+ hdfs_https_on = (dfs_http_policy.upper() == 'HTTPS_ONLY')
+ hdfs_scheme = 'http' if not hdfs_https_on else 'https'
+ hdfs_port = str(namenode_http_port) if not hdfs_https_on else str(namenode_https_port)
+ namenode_http_port = hdfs_port
+
webhdfs_service_urls = ""
def buildUrlElement(protocol, hdfs_host, port, servicePath) :
@@ -188,6 +201,13 @@ else:
webhdfs_service_urls = buildUrlElement("http", namenode_host, namenode_http_port, "/webhdfs")
+yarn_http_policy = default('/configurations/yarn-site/yarn.http.policy', None )
+yarn_https_on = False
+yarn_scheme = 'http'
+if yarn_http_policy != None :
+ yarn_https_on = ( yarn_http_policy.upper() == 'HTTPS_ONLY')
+ yarn_scheme = 'http' if not yarn_https_on else 'https'
+
rm_hosts = default("/clusterHostInfo/rm_host", None)
if type(rm_hosts) is list:
rm_host = rm_hosts[0]
@@ -227,17 +247,24 @@ if type(hbase_master_hosts) is list:
else:
hbase_master_host = hbase_master_hosts
+oozie_https_port = None
oozie_server_hosts = default("/clusterHostInfo/oozie_server", None)
if type(oozie_server_hosts) is list:
oozie_server_host = oozie_server_hosts[0]
else:
oozie_server_host = oozie_server_hosts
+oozie_scheme = 'http'
has_oozie = not oozie_server_host == None
oozie_server_port = "11000"
if has_oozie:
oozie_server_port = get_port_from_url(config['configurations']['oozie-site']['oozie.base.url'])
+ oozie_https_port = default("/configurations/oozie-site/oozie.https.port", None)
+
+if oozie_https_port is not None:
+ oozie_scheme = 'https'
+ oozie_server_port = oozie_https_port
# Knox managed properties
knox_managed_pid_symlink= format('{stack_root}/current/knox-server/pids')
[09/50] [abbrv] ambari git commit: AMBARI-21711. Seeing SQL errors in
ambari server log when installing HDF 3.1 (smohanty)
Posted by ja...@apache.org.
AMBARI-21711. Seeing SQL errors in ambari server log when installing HDF 3.1 (smohanty)
(cherry picked from commit 5f750176f0c1fb88b7577aa69d3054ded6382f93)
Change-Id: I5067ff76aefde6fe3790247ebf898da869fd07bb
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/f99725e9
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/f99725e9
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/f99725e9
Branch: refs/heads/2.5-maint
Commit: f99725e9335b38401e9024465f0ff7904984031f
Parents: 6ddb896
Author: Sumit Mohanty <sm...@hortonworks.com>
Authored: Fri Aug 11 16:35:00 2017 -0700
Committer: smohanty@hortonworks.com <je...@hortonworks.com>
Committed: Fri Aug 11 23:56:10 2017 +0000
----------------------------------------------------------------------
ambari-server/src/main/resources/host_scripts/alert_disk_space.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/f99725e9/ambari-server/src/main/resources/host_scripts/alert_disk_space.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/host_scripts/alert_disk_space.py b/ambari-server/src/main/resources/host_scripts/alert_disk_space.py
index f3c6406..eb92781 100644
--- a/ambari-server/src/main/resources/host_scripts/alert_disk_space.py
+++ b/ambari-server/src/main/resources/host_scripts/alert_disk_space.py
@@ -67,7 +67,7 @@ def execute(configurations={}, parameters={}, host_name=None):
return (('UNKNOWN', ['There were no configurations supplied to the script.']))
if not STACK_NAME in configurations or not STACK_ROOT in configurations:
- return (('STACK_ROOT', ['cluster-env/stack_name and cluster-env/stack_root are required']))
+ return (('UNKNOWN', ['cluster-env/stack_name and cluster-env/stack_root are required']))
path = stack_tools.get_stack_root(configurations[STACK_NAME], configurations[STACK_ROOT])
[10/50] [abbrv] ambari git commit: AMBARI-21498. DB consistency
checker throws errors for missing 'product-info' configs after Ambari upgrade
(dlysnichenko)
Posted by ja...@apache.org.
AMBARI-21498. DB consistency checker throws errors for missing 'product-info' configs after Ambari upgrade (dlysnichenko)
(cherry picked from commit fb5567608f173fa4cbc4f1beed6fcf9163c418ed)
Change-Id: I27dd6ae3ae2f2a057e078ea0c50179c5ed08dc2d
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/f1476629
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/f1476629
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/f1476629
Branch: refs/heads/2.5-maint
Commit: f1476629372d1cd7f01b64fc7ea7f4b765d72c11
Parents: f99725e
Author: Lisnichenko Dmitro <dl...@hortonworks.com>
Authored: Mon Aug 14 20:12:31 2017 +0300
Committer: dlysnichenko@hortonworks.com <je...@hortonworks.com>
Committed: Mon Aug 14 17:41:10 2017 +0000
----------------------------------------------------------------------
.../java/org/apache/ambari/server/upgrade/UpgradeCatalog252.java | 1 +
1 file changed, 1 insertion(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/f1476629/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog252.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog252.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog252.java
index ca7ab3f..ce1c8f1 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog252.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog252.java
@@ -122,6 +122,7 @@ public class UpgradeCatalog252 extends AbstractUpgradeCatalog {
*/
@Override
protected void executeDMLUpdates() throws AmbariException, SQLException {
+ addNewConfigurationsFromXml();
resetStackToolsAndFeatures();
ensureConfigTypesHaveAtLeastOneVersionSelected();
updateMariaDBRedHatSupportHive();
[05/50] [abbrv] ambari git commit: AMBARI-21690 Updating
get_stack_version pattern match (mugdha)
Posted by ja...@apache.org.
AMBARI-21690 Updating get_stack_version pattern match (mugdha)
(cherry picked from commit ee87b6ab60edc43ef94a404ae2ab72da42f3549f)
Change-Id: I51379e33b421aff6c8bb4fa85f668870c0915d46
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/ad886577
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/ad886577
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/ad886577
Branch: refs/heads/2.5-maint
Commit: ad88657754802bd5e078741581e22a1d5472a529
Parents: 7ced0c9
Author: Mugdha Varadkar <mu...@apache.org>
Authored: Thu Aug 10 11:57:28 2017 +0530
Committer: mugdha@apache.org <je...@hortonworks.com>
Committed: Fri Aug 11 04:36:40 2017 +0000
----------------------------------------------------------------------
.../resource_management/libraries/functions/get_stack_version.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/ad886577/ambari-common/src/main/python/resource_management/libraries/functions/get_stack_version.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/get_stack_version.py b/ambari-common/src/main/python/resource_management/libraries/functions/get_stack_version.py
index 7274a59..463d61f 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/get_stack_version.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/get_stack_version.py
@@ -85,7 +85,7 @@ def get_stack_version(package_name):
stack_version = re.sub(package_name + ' - ', '', stack_output)
stack_version = stack_version.rstrip()
- match = re.match('[0-9]+.[0-9]+.[0-9]+.[0-9]+-[0-9]+', stack_version)
+ match = re.match('[0-9]+.[0-9]+.[0-9]+.[0-9]+(-[0-9]+)?', stack_version)
if match is None:
Logger.info('Failed to get extracted version with ' + stack_selector_path)
[08/50] [abbrv] ambari git commit: AMBARI-21692 dfs.include file is
created on all datanode hosts when Ambari manages include/exclude files
(dsen)
Posted by ja...@apache.org.
AMBARI-21692 dfs.include file is created on all datanode hosts when Ambari manages include/exclude files (dsen)
(cherry picked from commit aa9d866e7c4df1bdff665bec3154e3731cd8f5a7)
Change-Id: Id1c35871654e0c1df4c4caeb3bbe63df3f59a9bd
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/6ddb896a
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/6ddb896a
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/6ddb896a
Branch: refs/heads/2.5-maint
Commit: 6ddb896ab969e451540f3f3271f1fddcbb567946
Parents: b79e01b
Author: Dmytro Sen <ds...@apache.org>
Authored: Fri Aug 11 13:38:46 2017 +0300
Committer: dsen@apache.org <je...@hortonworks.com>
Committed: Fri Aug 11 11:11:11 2017 +0000
----------------------------------------------------------------------
.../2.1.0.2.0/package/scripts/hdfs_snamenode.py | 6 ++++++
.../before-START/scripts/shared_initialization.py | 15 ---------------
.../services/HDFS/package/scripts/hdfs_snamenode.py | 6 ++++++
.../before-START/scripts/shared_initialization.py | 16 +---------------
.../before-START/scripts/shared_initialization.py | 14 --------------
5 files changed, 13 insertions(+), 44 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/6ddb896a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_snamenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_snamenode.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_snamenode.py
index 500ed15..ef6a2e2 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_snamenode.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_snamenode.py
@@ -37,6 +37,12 @@ def snamenode(action=None, format=False):
content=Template("exclude_hosts_list.j2"),
owner=params.hdfs_user,
group=params.user_group)
+ if params.hdfs_include_file:
+ File(params.include_file_path,
+ content=Template("include_hosts_list.j2"),
+ owner=params.hdfs_user,
+ group=params.user_group)
+ pass
elif action == "start" or action == "stop":
import params
service(
http://git-wip-us.apache.org/repos/asf/ambari/blob/6ddb896a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-START/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-START/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-START/scripts/shared_initialization.py
index 265502f..cba724a 100644
--- a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-START/scripts/shared_initialization.py
+++ b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-START/scripts/shared_initialization.py
@@ -137,21 +137,6 @@ def setup_configs():
group=params.user_group
)
- generate_include_file()
-
-
-def generate_include_file():
- import params
-
- if params.has_namenode and params.dfs_hosts and params.has_slaves:
- include_hosts_list = params.slave_hosts
- File(params.dfs_hosts,
- content=Template("include_hosts_list.j2"),
- owner=params.hdfs_user,
- group=params.user_group
- )
-
-
def install_snappy():
import params
http://git-wip-us.apache.org/repos/asf/ambari/blob/6ddb896a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/hdfs_snamenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/hdfs_snamenode.py b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/hdfs_snamenode.py
index e734973..b7e4044 100644
--- a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/hdfs_snamenode.py
+++ b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/hdfs_snamenode.py
@@ -35,6 +35,12 @@ def snamenode(action=None, format=False):
content=Template("exclude_hosts_list.j2"),
owner=params.hdfs_user,
group=params.user_group)
+ if params.hdfs_include_file:
+ File(params.include_file_path,
+ content=Template("include_hosts_list.j2"),
+ owner=params.hdfs_user,
+ group=params.user_group)
+ pass
elif action == "start" or action == "stop":
service(
action=action,
http://git-wip-us.apache.org/repos/asf/ambari/blob/6ddb896a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/shared_initialization.py
index 148d235..c279c3f 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/shared_initialization.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/shared_initialization.py
@@ -142,20 +142,6 @@ def setup_configs():
group=params.user_group
)
- generate_include_file()
-
-
-def generate_include_file():
- import params
-
- if params.has_namenode and params.dfs_hosts and params.has_slaves:
- include_hosts_list = params.slave_hosts
- File(params.dfs_hosts,
- content=Template("include_hosts_list.j2"),
- owner=params.hdfs_user,
- group=params.user_group
- )
-
def create_javahome_symlink():
if os.path.exists("/usr/jdk/jdk1.6.0_31") and not os.path.exists("/usr/jdk64/jdk1.6.0_31"):
Directory("/usr/jdk64/",
@@ -259,4 +245,4 @@ def setup_unlimited_key_jce_policy():
only_if=format("test -e {java_security_dir} && test -f {jce_zip_target}"),
path=['/bin/', '/usr/bin'],
sudo=True
- )
\ No newline at end of file
+ )
http://git-wip-us.apache.org/repos/asf/ambari/blob/6ddb896a/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/hooks/before-START/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/hooks/before-START/scripts/shared_initialization.py b/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/hooks/before-START/scripts/shared_initialization.py
index ba9c8fb..01e55c0 100755
--- a/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/hooks/before-START/scripts/shared_initialization.py
+++ b/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/hooks/before-START/scripts/shared_initialization.py
@@ -132,20 +132,6 @@ def setup_configs():
group=params.user_group
)
- generate_include_file()
-
-
-def generate_include_file():
- import params
-
- if params.has_namenode and params.dfs_hosts and params.has_slaves:
- include_hosts_list = params.slave_hosts
- File(params.dfs_hosts,
- content=Template("include_hosts_list.j2"),
- owner=params.hdfs_user,
- group=params.user_group
- )
-
def create_javahome_symlink():
if os.path.exists("/usr/jdk/jdk1.6.0_31") and not os.path.exists("/usr/jdk64/jdk1.6.0_31"):
Directory("/usr/jdk64/",
[49/50] [abbrv] ambari git commit: AMBARI-21807. 'Move Hive
Metastore' wizard is stuck while configuring MYSQL_SERVER. (jaimin)
Posted by ja...@apache.org.
AMBARI-21807. 'Move Hive Metastore' wizard is stuck while configuring MYSQL_SERVER. (jaimin)
(cherry picked from commit 15c6f5a3ce8b10da25c383edd99455793fbdd25b)
Change-Id: Ie47344fa6acb8e20a1f5bf2cc41cebfba68a804b
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/2453e164
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/2453e164
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/2453e164
Branch: refs/heads/2.5-maint
Commit: 2453e16418fd964042452b649153dbe45f3c6009
Parents: 99269b1
Author: Jaimin Jetly <ja...@hortonworks.com>
Authored: Tue Aug 29 20:20:20 2017 -0700
Committer: jaimin@hortonworks.com <je...@hortonworks.com>
Committed: Wed Aug 30 03:31:27 2017 +0000
----------------------------------------------------------------------
.../controllers/main/service/reassign/step1_controller_test.js | 2 ++
1 file changed, 2 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/2453e164/ambari-web/test/controllers/main/service/reassign/step1_controller_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/controllers/main/service/reassign/step1_controller_test.js b/ambari-web/test/controllers/main/service/reassign/step1_controller_test.js
index 7dbf24a..0a75903 100644
--- a/ambari-web/test/controllers/main/service/reassign/step1_controller_test.js
+++ b/ambari-web/test/controllers/main/service/reassign/step1_controller_test.js
@@ -109,6 +109,7 @@ describe('App.ReassignMasterWizardStep1Controller', function () {
controller.set('_super', Em.K);
sinon.stub(controller, 'getDatabaseHost', Em.K);
+ sinon.stub(controller, 'isExistingDb', Em.K);
sinon.stub(controller, 'saveDatabaseType', Em.K);
sinon.stub(controller, 'saveServiceProperties', Em.K);
sinon.stub(controller, 'saveConfigs', Em.K);
@@ -119,6 +120,7 @@ describe('App.ReassignMasterWizardStep1Controller', function () {
afterEach(function() {
controller.getDatabaseHost.restore();
+ controller.isExistingDb.restore();
controller.saveDatabaseType.restore();
controller.saveServiceProperties.restore();
controller.saveConfigs.restore();
[24/50] [abbrv] ambari git commit: AMBARI-21761. VDF should support
redhat-ppc6 and redhat-ppc7 OS types (ncole)
Posted by ja...@apache.org.
AMBARI-21761. VDF should support redhat-ppc6 and redhat-ppc7 OS types (ncole)
(cherry picked from commit 4fa581afb0497e3d3e607530501290fa9fff4340)
Change-Id: Ifd2dd06e2df8aac270235e8ce79f013cf7352afc
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/dcf9a4a9
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/dcf9a4a9
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/dcf9a4a9
Branch: refs/heads/2.5-maint
Commit: dcf9a4a90048306e3a4223ad74bf149b41d45476
Parents: 99a0714
Author: Nate Cole <nc...@hortonworks.com>
Authored: Fri Aug 18 23:15:52 2017 -0400
Committer: ncole@hortonworks.com <je...@hortonworks.com>
Committed: Sat Aug 19 11:46:23 2017 +0000
----------------------------------------------------------------------
ambari-server/src/main/resources/version_definition.xsd | 2 ++
1 file changed, 2 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/dcf9a4a9/ambari-server/src/main/resources/version_definition.xsd
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/version_definition.xsd b/ambari-server/src/main/resources/version_definition.xsd
index bef3739..b772d7e 100644
--- a/ambari-server/src/main/resources/version_definition.xsd
+++ b/ambari-server/src/main/resources/version_definition.xsd
@@ -51,6 +51,8 @@
<xs:restriction base="xs:string">
<xs:enumeration value="redhat6" />
<xs:enumeration value="redhat7" />
+ <xs:enumeration value="redhat-ppc6" />
+ <xs:enumeration value="redhat-ppc7" />
<xs:enumeration value="debian6" />
<xs:enumeration value="debian7" />
<xs:enumeration value="ubuntu12" />
[11/50] [abbrv] ambari git commit: AMBARI-21045. AMBARI-21045. Enable
Storm's AutoTGT configs in secure mode (Sriharsha Chintalapani via smohanty)
Posted by ja...@apache.org.
AMBARI-21045. AMBARI-21045. Enable Storm's AutoTGT configs in secure mode (Sriharsha Chintalapani via smohanty)
(cherry picked from commit 3d07ec5a091de3735e63c5500c380612e6533baa)
Change-Id: I634998c832262241f656ac5d02413c9ae4d854e1
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/55fbea58
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/55fbea58
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/55fbea58
Branch: refs/heads/2.5-maint
Commit: 55fbea58adcd495206902fb0f055cf1dc81bafff
Parents: f147662
Author: Sumit Mohanty <sm...@hortonworks.com>
Authored: Tue Aug 15 15:59:46 2017 -0700
Committer: smohanty@hortonworks.com <je...@hortonworks.com>
Committed: Tue Aug 15 23:21:10 2017 +0000
----------------------------------------------------------------------
.../server/upgrade/UpgradeCatalog251.java | 36 ++++++++
.../STORM/1.1.0/configuration/storm-env.xml | 54 ++++++++++++
.../STORM/1.1.0/configuration/storm-site.xml | 48 -----------
.../stacks/HDP/2.3/upgrades/config-upgrade.xml | 7 ++
.../HDP/2.3/upgrades/nonrolling-upgrade-2.6.xml | 4 +
.../stacks/HDP/2.3/upgrades/upgrade-2.6.xml | 1 +
.../stacks/HDP/2.4/upgrades/config-upgrade.xml | 7 ++
.../HDP/2.4/upgrades/nonrolling-upgrade-2.6.xml | 4 +
.../stacks/HDP/2.4/upgrades/upgrade-2.6.xml | 2 +
.../stacks/HDP/2.5/upgrades/config-upgrade.xml | 7 ++
.../HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml | 4 +
.../stacks/HDP/2.5/upgrades/upgrade-2.6.xml | 1 +
.../stacks/HDP/2.6/services/stack_advisor.py | 87 +++++++++++++++++++-
.../stacks/HDP/2.6/upgrades/config-upgrade.xml | 6 ++
.../HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml | 4 +
.../stacks/HDP/2.6/upgrades/upgrade-2.6.xml | 1 +
.../src/main/resources/stacks/stack_advisor.py | 18 ++++
17 files changed, 240 insertions(+), 51 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/55fbea58/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog251.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog251.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog251.java
index afda1f6..1a854f8 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog251.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog251.java
@@ -170,4 +170,40 @@ public class UpgradeCatalog251 extends AbstractUpgradeCatalog {
}
}
}
+
+ /**
+ * Make sure storm-env changes are applied to anyone upgrading to HDP-2.6.1 Storm
+ * If the base version was before Ambari 2.5.0, this method should wind up doing nothing.
+ * @throws AmbariException
+ */
+ protected void updateSTORMConfigs() throws AmbariException {
+ AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
+ Clusters clusters = ambariManagementController.getClusters();
+ if (clusters != null) {
+ Map<String, Cluster> clusterMap = getCheckedClusterMap(clusters);
+ if (clusterMap != null && !clusterMap.isEmpty()) {
+ for (final Cluster cluster : clusterMap.values()) {
+ Set<String> installedServices = cluster.getServices().keySet();
+
+ if (installedServices.contains("STORM") && cluster.getSecurityType() == SecurityType.KERBEROS) {
+ Config stormEnv = cluster.getDesiredConfigByType(STORM_ENV_CONFIG);
+ String content = stormEnv.getProperties().get("content");
+ if (content != null && !content.contains("STORM_AUTOCREDS_LIB_DIR")) {
+ Map<String, String> newProperties = new HashMap<>();
+ String stormEnvConfigs = "\n #set storm-auto creds \n" +
+ "# check if storm_jaas.conf in config , only enable storm_auto_creds in secure mode.\n " +
+ "STORM_JAAS_CONF=$STORM_HOME/conf/storm_jaas.conf \n" +
+ "STORM_AUTOCREDS_LIB_DIR=$STORM_HOME/external/storm-autocreds \n" +
+ "if [ -f $STORM_JAAS_CONF ] && [ -d $STORM_AUTOCREDS_LIB_DIR ]; then \n" +
+ " export STORM_EXT_CLASSPATH=$STORM_AUTOCREDS_LIB_DIR \n" +
+ "fi\n";
+ content += stormEnvConfigs;
+ newProperties.put("content", content);
+ updateConfigurationPropertiesForCluster(cluster, "storm-env", newProperties, true, false);
+ }
+ }
+ }
+ }
+ }
+ }
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/55fbea58/ambari-server/src/main/resources/common-services/STORM/1.1.0/configuration/storm-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.1.0/configuration/storm-env.xml b/ambari-server/src/main/resources/common-services/STORM/1.1.0/configuration/storm-env.xml
new file mode 100644
index 0000000..2ce560d
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.1.0/configuration/storm-env.xml
@@ -0,0 +1,54 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_adding_forbidden="true">
+ <!-- storm-env.sh -->
+ <property>
+ <name>content</name>
+ <display-name>storm-env template</display-name>
+ <description>This is the jinja template for storm-env.sh file</description>
+ <value>
+#!/bin/bash
+
+# Set Storm specific environment variables here.
+
+# The java implementation to use.
+export JAVA_HOME={{java64_home}}
+
+export STORM_CONF_DIR={{conf_dir}}
+export STORM_HOME={{storm_component_home_dir}}
+export STORM_JAR_JVM_OPTS={{jar_jvm_opts}}
+
+#set storm-auto creds
+# check if storm_jaas.conf in config , only enable storm_auto_creds in secure mode.
+STORM_JAAS_CONF=$STORM_HOME/conf/storm_jaas.conf
+STORM_AUTOCREDS_LIB_DIR=$STORM_HOME/external/storm-autocreds
+
+if [ -f $STORM_JAAS_CONF ] && [ -d $STORM_AUTOCREDS_LIB_DIR ]; then
+ export STORM_EXT_CLASSPATH=$STORM_AUTOCREDS_LIB_DIR
+fi
+ </value>
+ <value-attributes>
+ <type>content</type>
+ </value-attributes>
+ <on-ambari-upgrade add="true"/>
+ </property>
+</configuration>
http://git-wip-us.apache.org/repos/asf/ambari/blob/55fbea58/ambari-server/src/main/resources/common-services/STORM/1.1.0/configuration/storm-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.1.0/configuration/storm-site.xml b/ambari-server/src/main/resources/common-services/STORM/1.1.0/configuration/storm-site.xml
deleted file mode 100644
index b2e9acb..0000000
--- a/ambari-server/src/main/resources/common-services/STORM/1.1.0/configuration/storm-site.xml
+++ /dev/null
@@ -1,48 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration supports_final="true">
- <property>
- <name>nimbus.impersonation.acl</name>
- <description>
- The ImpersonationAuthorizer uses nimbus.impersonation.acl as the acl to authorize users. Following is a sample nimbus config for supporting impersonation:
- nimbus.impersonation.acl:
- impersonating_user1:
- hosts:
- [comma separated list of hosts from which impersonating_user1 is allowed to impersonate other users]
- groups:
- [comma separated list of groups whose users impersonating_user1 is allowed to impersonate]
- impersonating_user2:
- hosts:
- [comma separated list of hosts from which impersonating_user2 is allowed to impersonate other users]
- groups:
- [comma separated list of groups whose users impersonating_user2 is allowed to impersonate]
- </description>
- <!-- The depends-on section is new in this version, required so Stack Advisor can include streamline-env and streamline-common configs. -->
- <depends-on>
- <property>
- <type>streamline-env</type>
- <name>streamline_principal_name</name>
- </property>
- </depends-on>
- <on-ambari-upgrade add="false"/>
- </property>
-</configuration>
http://git-wip-us.apache.org/repos/asf/ambari/blob/55fbea58/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/config-upgrade.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/config-upgrade.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/config-upgrade.xml
index d8e2d7d..0c4106e 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/config-upgrade.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/config-upgrade.xml
@@ -664,6 +664,13 @@
replace-with="${sys:workers.artifacts}/${sys:storm.id}/${sys:worker.port}/${sys:logfile.name}"/>
</definition>
+ <definition xsi:type="configure" id="storm_nimbus_autocred_config" summary="Update Storm's Nimbus AutoCred config">
+ <type>storm-site</type>
+ <set key="nimbus.autocredential.plugins.classes" value="['org.apache.storm.hdfs.security.AutoHDFS', 'org.apache.storm.hbase.security.AutoHBase', 'org.apache.storm.hive.security.AutoHive']" if-type="streamline-common" if-key="authorizer.class.name" if-key-state="present"/>
+ <set key="nimbus.credential.renewers.classes" value="['org.apache.storm.hdfs.security.AutoHDFS', 'org.apache.storm.hbase.security.AutoHBase', 'org.apache.storm.hive.security.AutoHive']" if-type="streamline-common" if-key="authorizer.class.name" if-key-state="present"/>
+ <set key="nimbus.credential.renewers.freq.secs" value="82800" if-type="streamline-common" if-key="authorizer.class.name" if-key-state="present"/>
+ </definition>
+
</changes>
</component>
</service>
http://git-wip-us.apache.org/repos/asf/ambari/blob/55fbea58/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.6.xml
index ca3c193..c45c7c5 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.6.xml
@@ -638,6 +638,10 @@
<task xsi:type="configure" id="hdp_2_5_0_0_upgrade_storm_1.0"/>
</execute-stage>
+ <execute-stage service="STORM" component="NIMBUS" title="Apply config changes for Storm AutoCreds">
+ <task xsi:type="configure" id="storm_nimbus_autocred_config"/>
+ </execute-stage>
+
<execute-stage service="STORM" component="NIMBUS" title="Apply config changes for Storm">
<!-- Remove Atlas configs that were incorrectly added to storm-site instead of Atlas' application.properties. -->
<task xsi:type="configure" id="hdp_2_5_0_0_remove_storm_atlas_configs"/>
http://git-wip-us.apache.org/repos/asf/ambari/blob/55fbea58/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.6.xml
index cf2c257..69be4dc 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.6.xml
@@ -1130,6 +1130,7 @@
<task xsi:type="configure" id="storm_worker_log4j_parameterize" />
<task xsi:type="configure" id="storm_cluster_log4j_parameterize" />
<task xsi:type="configure" id="storm_worker_log4j_directory" />
+ <task xsi:type="configure" id="storm_nimbus_autocred_config" />
</pre-upgrade>
<pre-downgrade>
http://git-wip-us.apache.org/repos/asf/ambari/blob/55fbea58/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/config-upgrade.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/config-upgrade.xml b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/config-upgrade.xml
index 83a0a1e..3e62fb1 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/config-upgrade.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/config-upgrade.xml
@@ -483,6 +483,13 @@
<replace key="content" find="${sys:storm.log.dir}/${sys:logfile.name}"
replace-with="${sys:workers.artifacts}/${sys:storm.id}/${sys:worker.port}/${sys:logfile.name}"/>
</definition>
+ <definition xsi:type="configure" id="storm_nimbus_autocred_config" summary="Update Storm's Nimbus AutoCred config">
+ <type>storm-site</type>
+ <set key="nimbus.autocredential.plugins.classes" value="['org.apache.storm.hdfs.security.AutoHDFS', 'org.apache.storm.hbase.security.AutoHBase', 'org.apache.storm.hive.security.AutoHive']" if-type="streamline-common" if-key="authorizer.class.name" if-key-state="present"/>
+ <set key="nimbus.credential.renewers.classes" value="['org.apache.storm.hdfs.security.AutoHDFS', 'org.apache.storm.hbase.security.AutoHBase', 'org.apache.storm.hive.security.AutoHive']" if-type="streamline-common" if-key="authorizer.class.name" if-key-state="present"/>
+ <set key="nimbus.credential.renewers.freq.secs" value="82800" if-type="streamline-common" if-key="authorizer.class.name" if-key-state="present"/>
+ </definition>
+
</changes>
</component>
</service>
http://git-wip-us.apache.org/repos/asf/ambari/blob/55fbea58/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.6.xml
index 6e899ee..b6f48fc 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.6.xml
@@ -563,6 +563,10 @@
<task xsi:type="configure" id="hdp_2_5_0_0_remove_storm_atlas_configs"/>
</execute-stage>
+ <execute-stage service="STORM" component="NIMBUS" title="Apply config changes for Storm AutoCreds">
+ <task xsi:type="configure" id="storm_nimbus_autocred_config"/>
+ </execute-stage>
+
<execute-stage service="STORM" component="NIMBUS" title="Apply security changes for Storm">
<!-- Add nimbus.impersonation acls . -->
<task xsi:type="configure" id="hdp_2_5_0_0_add_storm_security_configs" />
http://git-wip-us.apache.org/repos/asf/ambari/blob/55fbea58/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.6.xml
index 4e14e83..f1d34e7 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.6.xml
@@ -1092,6 +1092,8 @@
<task xsi:type="configure" id="storm_worker_log4j_parameterize" />
<task xsi:type="configure" id="storm_cluster_log4j_parameterize" />
<task xsi:type="configure" id="storm_worker_log4j_directory" />
+ <task xsi:type="configure" id="storm_nimbus_autocred_config" />
+
</pre-upgrade>
<pre-downgrade>
http://git-wip-us.apache.org/repos/asf/ambari/blob/55fbea58/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/config-upgrade.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/config-upgrade.xml b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/config-upgrade.xml
index f227bac..8539006 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/config-upgrade.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/config-upgrade.xml
@@ -63,6 +63,13 @@
<regex-replace key="content" find="A1" immediateFlush="false"
 fileName="\$\{sys:storm.log.dir}/\$\{sys:logfile.name}"
 filePattern="\$\{sys:storm.log.dir}/\$\{sys:logfile.name}.%i.gz">
 <PatternLayout>
 <pattern>\$\{pattern}</pattern>
 </PatternLayout>
 <Policies>
 <SizeBasedTriggeringPolicy size="(?:[0-9]+) MB"/> <!-- Or every 100 MB -->
 </Policies>
 <DefaultRolloverStrategy max="([0-9]+)"
replace-with="A1" immediateFlush="false"
 fileName="${sys:storm.log.dir}/${sys:logfile.name}"
 filePattern="${sys:storm.log.dir}/${sys:logfile.name}.%i.gz">
 <PatternLayout>
 <pattern>${pattern}</pattern>
 </PatternLayout>
 <Policies>
 <SizeBasedTriggeringPolicy size="{{storm_a1_maxfilesize}} MB"/> <!-- Or every 100 MB -->
 </Policies>
 <DefaultRolloverStrategy max="{{storm_a1_maxbackupindex}}"/>
</definition>
+ <definition xsi:type="configure" id="storm_nimbus_autocred_config" summary="Update Storm's Nimbus AutoCred config">
+ <type>storm-site</type>
+ <set key="nimbus.autocredential.plugins.classes" value="['org.apache.storm.hdfs.security.AutoHDFS', 'org.apache.storm.hbase.security.AutoHBase', 'org.apache.storm.hive.security.AutoHive']" if-type="streamline-common" if-key="authorizer.class.name" if-key-state="present"/>
+ <set key="nimbus.credential.renewers.classes" value="['org.apache.storm.hdfs.security.AutoHDFS', 'org.apache.storm.hbase.security.AutoHBase', 'org.apache.storm.hive.security.AutoHive']" if-type="streamline-common" if-key="authorizer.class.name" if-key-state="present"/>
+ <set key="nimbus.credential.renewers.freq.secs" value="82800" if-type="streamline-common" if-key="authorizer.class.name" if-key-state="present"/>
+ </definition>
+
</changes>
</component>
</service>
http://git-wip-us.apache.org/repos/asf/ambari/blob/55fbea58/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml
index ce10e8b..ef4c6f4 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml
@@ -416,6 +416,10 @@
<task xsi:type="configure" id="hdp_2_6_0_0_remove_bind_anonymous"/>
</execute-stage>
+ <execute-stage service="STORM" component="NIMBUS" title="Apply config changes for Storm AutoCreds">
+ <task xsi:type="configure" id="storm_nimbus_autocred_config"/>
+ </execute-stage>
+
<!--RANGER-->
<execute-stage service="RANGER" component="RANGER_ADMIN" title="Parameterizing Ranger Admin Log4J Properties">
<task xsi:type="configure" id="admin_log4j_parameterize">
http://git-wip-us.apache.org/repos/asf/ambari/blob/55fbea58/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml
index 840b17d..d9675f1 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml
@@ -1030,6 +1030,7 @@
<pre-upgrade>
<task xsi:type="configure" id="storm_worker_log4j_parameterize" />
<task xsi:type="configure" id="storm_cluster_log4j_parameterize" />
+ <task xsi:type="configure" id="storm_nimbus_autocred_config" />
</pre-upgrade>
<pre-downgrade/>
<upgrade>
http://git-wip-us.apache.org/repos/asf/ambari/blob/55fbea58/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py
index cc5fa92..974b10c 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py
@@ -41,11 +41,92 @@ class HDP26StackAdvisor(HDP25StackAdvisor):
"HIVE": self.recommendHIVEConfigurations,
"HBASE": self.recommendHBASEConfigurations,
"YARN": self.recommendYARNConfigurations,
- "KAFKA": self.recommendKAFKAConfigurations
+ "KAFKA": self.recommendKAFKAConfigurations,
+ "BEACON": self.recommendBEACONConfigurations,
+ "STORM": self.recommendSTORMConfigurations
}
parentRecommendConfDict.update(childRecommendConfDict)
return parentRecommendConfDict
+ def recommendSTORMConfigurations(self, configurations, clusterData, services, hosts):
+ """
+ In HDF-2.6.1 we introduced a new way of doing Auto Credentials with services such as
+ HDFS, HIVE, HBASE. This method will update the required configs for autocreds if the users installs
+ STREAMLINE service.
+ """
+ super(HDP26StackAdvisor, self).recommendStormConfigurations(configurations, clusterData, services, hosts)
+ storm_site = self.getServicesSiteProperties(services, "storm-site")
+ storm_env = self.getServicesSiteProperties(services, "storm-env")
+ putStormSiteProperty = self.putProperty(configurations, "storm-site", services)
+ putStormSiteAttributes = self.putPropertyAttribute(configurations, "storm-site")
+ security_enabled = self.isSecurityEnabled(services)
+ servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+
+ if storm_env and storm_site and security_enabled and 'STREAMLINE' in servicesList:
+ storm_nimbus_impersonation_acl = storm_site["nimbus.impersonation.acl"] if "nimbus.impersonation.acl" in storm_site else None
+ streamline_env = self.getServicesSiteProperties(services, "streamline-env")
+ _streamline_principal_name = streamline_env['streamline_principal_name'] if 'streamline_principal_name' in streamline_env else None
+ if _streamline_principal_name is not None and storm_nimbus_impersonation_acl is not None:
+ streamline_bare_principal = get_bare_principal(_streamline_principal_name)
+ storm_nimbus_impersonation_acl.replace('{{streamline_bare_principal}}', streamline_bare_principal)
+ putStormSiteProperty('nimbus.impersonation.acl', storm_nimbus_impersonation_acl)
+
+ storm_nimbus_autocred_plugin_classes = storm_site["nimbus.autocredential.plugins.classes"] if "nimbus.autocredential.plugins.classes" in storm_site else None
+ if storm_nimbus_autocred_plugin_classes is not None:
+ new_storm_nimbus_autocred_plugin_classes = ['org.apache.storm.hdfs.security.AutoHDFS',
+ 'org.apache.storm.hbase.security.AutoHBase',
+ 'org.apache.storm.hive.security.AutoHive']
+ new_conf = DefaultStackAdvisor.appendToYamlString(storm_nimbus_autocred_plugin_classes,
+ new_storm_nimbus_autocred_plugin_classes)
+
+ putStormSiteProperty("nimbus.autocredential.plugins.classes", new_conf)
+ else:
+ putStormSiteProperty("nimbus.autocredential.plugins.classes", "['org.apache.storm.hdfs.security.AutoHDFS', 'org.apache.storm.hbase.security.AutoHBase', 'org.apache.storm.hive.security.AutoHive']")
+
+
+ storm_nimbus_credential_renewer_classes = storm_site["nimbus.credential.renewers.classes"] if "nimbus.credential.renewers.classes" in storm_site else None
+ if storm_nimbus_credential_renewer_classes is not None:
+ new_storm_nimbus_credential_renewer_classes_array = ['org.apache.storm.hdfs.security.AutoHDFS',
+ 'org.apache.storm.hbase.security.AutoHBase',
+ 'org.apache.storm.hive.security.AutoHive']
+ new_conf = DefaultStackAdvisor.appendToYamlString(storm_nimbus_credential_renewer_classes,
+ new_storm_nimbus_credential_renewer_classes_array)
+ putStormSiteProperty("nimbus.autocredential.plugins.classes", new_conf)
+ else:
+ putStormSiteProperty("nimbus.credential.renewers.classes", "['org.apache.storm.hdfs.security.AutoHDFS', 'org.apache.storm.hbase.security.AutoHBase', 'org.apache.storm.hive.security.AutoHive']")
+ putStormSiteProperty("nimbus.credential.renewers.freq.secs", "82800")
+ pass
+
+ def recommendBEACONConfigurations(self, configurations, clusterData, services, hosts):
+ beaconEnvProperties = self.getSiteProperties(services['configurations'], 'beacon-env')
+ putbeaconEnvProperty = self.putProperty(configurations, "beacon-env", services)
+
+ # database URL and driver class recommendations
+ if beaconEnvProperties and self.checkSiteProperties(beaconEnvProperties, 'beacon_store_driver') and self.checkSiteProperties(beaconEnvProperties, 'beacon_database'):
+ putbeaconEnvProperty('beacon_store_driver', self.getDBDriver(beaconEnvProperties['beacon_database']))
+ if beaconEnvProperties and self.checkSiteProperties(beaconEnvProperties, 'beacon_store_db_name', 'beacon_store_url') and self.checkSiteProperties(beaconEnvProperties, 'beacon_database'):
+ beaconServerHost = self.getHostWithComponent('BEACON', 'BEACON_SERVER', services, hosts)
+ beaconDBConnectionURL = beaconEnvProperties['beacon_store_url']
+ protocol = self.getProtocol(beaconEnvProperties['beacon_database'])
+ oldSchemaName = getOldValue(self, services, "beacon-env", "beacon_store_db_name")
+ oldDBType = getOldValue(self, services, "beacon-env", "beacon_database")
+ # under these if constructions we are checking if beacon server hostname available,
+ # if it's default db connection url with "localhost" or if schema name was changed or if db type was changed (only for db type change from default mysql to existing mysql)
+ # or if protocol according to current db type differs with protocol in db connection url(other db types changes)
+ if beaconServerHost is not None:
+ if (beaconDBConnectionURL and "//localhost" in beaconDBConnectionURL) or oldSchemaName or oldDBType or (protocol and beaconDBConnectionURL and not beaconDBConnectionURL.startswith(protocol)):
+ dbConnection = self.getDBConnectionStringBeacon(beaconEnvProperties['beacon_database']).format(beaconServerHost['Hosts']['host_name'], beaconEnvProperties['beacon_store_db_name'])
+ putbeaconEnvProperty('beacon_store_url', dbConnection)
+
+ def getDBConnectionStringBeacon(self, databaseType):
+ driverDict = {
+ 'NEW DERBY DATABASE': 'jdbc:derby:${{beacon.data.dir}}/${{beacon.store.db.name}}-db;create=true',
+ 'EXISTING MYSQL DATABASE': 'jdbc:mysql://{0}/{1}',
+ 'EXISTING MYSQL / MARIADB DATABASE': 'jdbc:mysql://{0}/{1}',
+ 'EXISTING ORACLE DATABASE': 'jdbc:oracle:thin:@//{0}:1521/{1}'
+ }
+ return driverDict.get(databaseType.upper())
+
def recommendAtlasConfigurations(self, configurations, clusterData, services, hosts):
super(HDP26StackAdvisor, self).recommendAtlasConfigurations(configurations, clusterData, services, hosts)
servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
@@ -401,9 +482,9 @@ class HDP26StackAdvisor(HDP25StackAdvisor):
propertyValue = "https://"+webapp_address+"/ws/v1/applicationhistory"
Logger.info("validateYarnSiteConfigurations: recommended value for webservice url"+services["configurations"]["yarn-site"]["properties"]["yarn.log.server.web-service.url"])
if services["configurations"]["yarn-site"]["properties"]["yarn.log.server.web-service.url"] != propertyValue:
- validationItems.append(
+ validationItems = [
{"config-name": "yarn.log.server.web-service.url",
- "item": self.getWarnItem("Value should be %s" % propertyValue)})
+ "item": self.getWarnItem("Value should be %s" % propertyValue)}]
return self.toConfigurationValidationProblems(validationItems, "yarn-site")
def validateDruidHistoricalConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
http://git-wip-us.apache.org/repos/asf/ambari/blob/55fbea58/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/config-upgrade.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/config-upgrade.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/config-upgrade.xml
index c2c1532..3881137 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/config-upgrade.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/config-upgrade.xml
@@ -111,6 +111,12 @@
<set key="ranger.plugin.storm.ambari.cluster.name" value="{{cluster_name}}"
if-type="ranger-storm-plugin-properties" if-key="ranger-storm-plugin-enabled" if-key-state="present"/>
</definition>
+ <definition xsi:type="configure" id="storm_nimbus_autocred_config" summary="Update Storm's Nimbus AutoCred config">
+ <type>storm-site</type>
+ <set key="nimbus.autocredential.plugins.classes" value="['org.apache.storm.hdfs.security.AutoHDFS', 'org.apache.storm.hbase.security.AutoHBase', 'org.apache.storm.hive.security.AutoHive']" if-type="streamline-common" if-key="authorizer.class.name" if-key-state="present"/>
+ <set key="nimbus.credential.renewers.classes" value="['org.apache.storm.hdfs.security.AutoHDFS', 'org.apache.storm.hbase.security.AutoHBase', 'org.apache.storm.hive.security.AutoHive']" if-type="streamline-common" if-key="authorizer.class.name" if-key-state="present"/>
+ <set key="nimbus.credential.renewers.freq.secs" value="82800" if-type="streamline-common" if-key="authorizer.class.name" if-key-state="present"/>
+ </definition>
</changes>
</component>
</service>
http://git-wip-us.apache.org/repos/asf/ambari/blob/55fbea58/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml
index df609cd..6d8821b 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml
@@ -328,6 +328,10 @@
<task xsi:type="configure" id="hdp_2_6_maint_ranger_storm_plugin_cluster_name"/>
</execute-stage>
+ <execute-stage service="STORM" component="NIMBUS" title="Apply config changes for Storm AutoCreds">
+ <task xsi:type="configure" id="storm_nimbus_autocred_config"/>
+ </execute-stage>
+
<!-- YARN -->
<execute-stage service="YARN" component="RESOURCEMANAGER" title="Apply config changes for Ranger Yarn plugin">
<task xsi:type="configure" id="hdp_2_6_maint_ranger_yarn_plugin_cluster_name"/>
http://git-wip-us.apache.org/repos/asf/ambari/blob/55fbea58/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/upgrade-2.6.xml
index b376fa7..07f0960 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/upgrade-2.6.xml
@@ -965,6 +965,7 @@
<component name="NIMBUS">
<pre-upgrade>
<task xsi:type="configure" id="hdp_2_6_maint_ranger_storm_plugin_cluster_name"/>
+ <task xsi:type="configure" id="storm_nimbus_autocred_config"/>
</pre-upgrade>
<pre-downgrade/> <!-- no-op to prevent config changes on downgrade -->
<upgrade>
http://git-wip-us.apache.org/repos/asf/ambari/blob/55fbea58/ambari-server/src/main/resources/stacks/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/stack_advisor.py b/ambari-server/src/main/resources/stacks/stack_advisor.py
index ccbb676..8ccbaba 100644
--- a/ambari-server/src/main/resources/stacks/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/stack_advisor.py
@@ -1375,3 +1375,21 @@ class DefaultStackAdvisor(StackAdvisor):
if recommendation:
put_f(name, ",".join(recommendation))
+
+ @classmethod
+ def appendToYamlString(cls, yaml_string, list_classes):
+ updated_yaml_string = ""
+ try:
+ strip_yaml_str = re.sub('[\[\]\']', ' ', yaml_string)
+ klass_array = [x.strip() for x in strip_yaml_str.split(',')]
+ if yaml_string:
+ for klass in list_classes:
+ klass = klass.strip()
+ klass_array.append(klass)
+ klass_set = set(klass_array)
+ klass_list = [("'" + e + "'") for e in klass_set]
+ updated_yaml_string = "[" + ",".join(klass_list) + "]"
+ except Exception:
+ klass_list = [("'" + e + "'") for e in list_classes]
+ updated_yaml_string = "[" + ",".join(klass_list) + "]"
+ return updated_yaml_string
[31/50] [abbrv] ambari git commit: AMBARI-21768. Spark History Server
uses wrong log dir. (Attila Doroszlai via swagle)
Posted by ja...@apache.org.
AMBARI-21768. Spark History Server uses wrong log dir. (Attila Doroszlai via swagle)
(cherry picked from commit d1df4d9aa658975a82b5cb5e16be893d36268c87)
Change-Id: I94b918c2e186a2de03583ee8fd5a4e9bdb81fe1f
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/92770907
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/92770907
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/92770907
Branch: refs/heads/2.5-maint
Commit: 92770907f7dbc7dbd3303167d463c4f838048d5c
Parents: a4f7784
Author: Siddharth Wagle <sw...@hortonworks.com>
Authored: Mon Aug 21 13:53:26 2017 -0700
Committer: Sumit Mohanty <sm...@hortonworks.com>
Committed: Mon Aug 21 17:09:22 2017 -0700
----------------------------------------------------------------------
.../BigInsights/4.2/upgrades/config-upgrade.xml | 16 ++++++++++++++++
.../4.2/upgrades/nonrolling-upgrade-to-hdp-2.6.xml | 6 ++++++
2 files changed, 22 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/92770907/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/config-upgrade.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/config-upgrade.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/config-upgrade.xml
index ad20bf9..02d3a01 100644
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/config-upgrade.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/config-upgrade.xml
@@ -372,6 +372,22 @@
</definition>
</changes>
</component>
+ <component name="SPARK_JOBHISTORYSERVER">
+ <changes>
+ <definition xsi:type="configure" id="biginsights_4_2_spark_history_defaults" summary="Update spark-defaults for History Server">
+ <type>spark-defaults</type>
+ <transfer operation="copy" to-key="spark.history.fs.logDirectory" from-type="spark-defaults" from-key="spark.eventLog.dir" if-type="spark-defaults" if-key="spark.eventLog.dir" if-key-state="present" />
+ <replace key="spark.eventLog.dir" find="hdfs:///spark-history" replace-with="hdfs:///iop/apps/4.2.0.0/spark/logs/history-server" />
+ <replace key="spark.history.fs.logDirectory" find="hdfs:///spark-history" replace-with="hdfs:///iop/apps/4.2.0.0/spark/logs/history-server" />
+ <insert key="spark.eventLog.dir" value="hdfs://" insert-type="prepend" />
+ <insert key="spark.history.fs.logDirectory" value="hdfs://" insert-type="prepend" />
+ </definition>
+ <definition xsi:type="configure" id="biginsights_4_2_spark_history_env" summary="Update spark-env for History Server">
+ <type>spark-env</type>
+ <replace key="content" find="-Dspark.history.fs.logDirectory={{spark_eventlog_dir_default}}" replace-with="" />
+ </definition>
+ </changes>
+ </component>
</service>
<service name="KNOX">
http://git-wip-us.apache.org/repos/asf/ambari/blob/92770907/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/nonrolling-upgrade-to-hdp-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/nonrolling-upgrade-to-hdp-2.6.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/nonrolling-upgrade-to-hdp-2.6.xml
index b8c23bb..c831326 100644
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/nonrolling-upgrade-to-hdp-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/nonrolling-upgrade-to-hdp-2.6.xml
@@ -316,6 +316,12 @@
<execute-stage service="SPARK" component="SPARK_THRIFTSERVER" title="Apply config changes for Spark Thrift server">
<task xsi:type="configure" id="biginsights_4_2_spark_env" />
</execute-stage>
+ <execute-stage service="SPARK" component="SPARK_JOBHISTORYSERVER" title="Apply config changes for Spark History Server">
+ <task xsi:type="configure" id="biginsights_4_2_spark_history_defaults" />
+ </execute-stage>
+ <execute-stage service="SPARK" component="SPARK_JOBHISTORYSERVER" title="Apply config changes for Spark History Server">
+ <task xsi:type="configure" id="biginsights_4_2_spark_history_env" />
+ </execute-stage>
<!-- OOZIE -->
<execute-stage service="OOZIE" component="OOZIE_SERVER" title="Apply config changes for Oozie server">