You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by ds...@apache.org on 2016/02/16 16:58:49 UTC

[1/4] ambari git commit: AMBARI-15050 Https Support for Metrics System (dsen)

Repository: ambari
Updated Branches:
  refs/heads/trunk 0868a0fc1 -> 7e75e52a9


http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-server/src/test/python/stacks/2.0.6/configs/default_ams_embedded.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default_ams_embedded.json b/ambari-server/src/test/python/stacks/2.0.6/configs/default_ams_embedded.json
index 0ce8e5a..95e931a 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/default_ams_embedded.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default_ams_embedded.json
@@ -811,6 +811,9 @@
         "ams-hbase-log4j": {
             "content": "\n"
         },
+        "ams-ssl-server": {
+            "content": "\n"
+        },
         "ams-site": {
             "timeline.metrics.host.aggregator.minute.ttl": "604800",
             "timeline.metrics.cluster.aggregator.daily.checkpointCutOffMultiplier": "1",
@@ -859,6 +862,7 @@
         "ams-hbase-site": {},
         "ams-hbase-policy": {},
         "ams-hbase-log4j": {},
+        "ams-ssl-server": {},
         "ams-site": {},
         "yarn-site": {
         "final": {
@@ -941,6 +945,9 @@
         "ams-site": {
             "tag": "version1"
         },
+        "ams-ssl-server": {
+            "tag": "version1"
+        },
         "ams-hbase-policy": {
             "tag": "version1"
         },


[4/4] ambari git commit: AMBARI-15050 Https Support for Metrics System (dsen)

Posted by ds...@apache.org.
AMBARI-15050 Https Support for Metrics System (dsen)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/7e75e52a
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/7e75e52a
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/7e75e52a

Branch: refs/heads/trunk
Commit: 7e75e52a91568d7ed0dd2bdaafbd60b37a3e7dc8
Parents: 0868a0f
Author: Dmytro Sen <ds...@apache.org>
Authored: Tue Feb 16 17:53:11 2016 +0200
Committer: Dmytro Sen <ds...@apache.org>
Committed: Tue Feb 16 17:53:11 2016 +0200

----------------------------------------------------------------------
 .../timeline/AbstractTimelineMetricsSink.java   |   92 +-
 .../src/main/conf/flume-metrics2.properties.j2  |    3 +-
 .../sink/flume/FlumeTimelineMetricsSink.java    |   11 +-
 .../conf/hadoop-metrics2-hbase.properties.j2    |    8 +-
 .../src/main/conf/hadoop-metrics2.properties.j2 |   22 +-
 .../timeline/HadoopTimelineMetricsSink.java     |   13 +-
 .../timeline/HadoopTimelineMetricsSinkTest.java |    6 +-
 .../conf/unix/metric_monitor.ini                |    1 +
 .../src/main/python/core/config_reader.py       |    6 +-
 .../src/main/python/core/emitter.py             |   18 +-
 .../kafka/KafkaTimelineMetricsReporter.java     |   17 +-
 .../kafka/KafkaTimelineMetricsReporterTest.java |    2 +-
 .../storm/StormTimelineMetricsReporter.java     |   24 +-
 .../sink/storm/StormTimelineMetricsSink.java    |    8 +-
 .../ApplicationHistoryServer.java               |   11 +-
 .../loadsimulator/net/RestMetricsSender.java    |    6 +-
 .../timeline/TimelineMetricConfiguration.java   |    6 +-
 .../ComponentSSLConfiguration.java              |   14 +-
 .../server/configuration/Configuration.java     |    6 +-
 .../ganglia/GangliaPropertyProvider.java        |    2 +-
 .../ganglia/GangliaReportPropertyProvider.java  |    2 +-
 .../metrics/timeline/AMSPropertyProvider.java   |    8 +-
 .../timeline/AMSReportPropertyProvider.java     |    2 +-
 .../ACCUMULO/1.6.1.2.2.0/metainfo.xml           |    1 +
 .../1.6.1.2.2.0/package/scripts/params.py       |    7 +
 .../hadoop-metrics2-accumulo.properties.j2      |    7 +-
 .../0.1.0/configuration/ams-site.xml            |   12 +-
 .../0.1.0/configuration/ams-ssl-client.xml      |   37 +
 .../0.1.0/configuration/ams-ssl-server.xml      |   64 +
 .../AMBARI_METRICS/0.1.0/metainfo.xml           |    2 +
 .../AMBARI_METRICS/0.1.0/package/scripts/ams.py |    8 +
 .../package/scripts/metrics_grafana_util.py     |    2 +-
 .../0.1.0/package/scripts/params.py             |   10 +
 .../0.1.0/package/scripts/service_check.py      |   17 +-
 .../hadoop-metrics2-hbase.properties.j2         |    7 +-
 .../package/templates/metric_monitor.ini.j2     |    1 +
 .../FLUME/1.4.0.2.0/metainfo.xml                |    1 +
 .../FLUME/1.4.0.2.0/package/scripts/params.py   |    7 +
 .../templates/flume-metrics2.properties.j2      |    8 +-
 .../common-services/HAWQ/2.0.0/metainfo.xml     |    1 +
 .../HBASE/0.96.0.2.0/metainfo.xml               |    1 +
 .../0.96.0.2.0/package/scripts/params_linux.py  |    8 +
 ...-metrics2-hbase.properties-GANGLIA-MASTER.j2 |    7 +-
 ...doop-metrics2-hbase.properties-GANGLIA-RS.j2 |    7 +-
 .../common-services/HDFS/2.1.0.2.0/metainfo.xml |    1 +
 .../0.8.1.2.2/configuration/kafka-broker.xml    |   21 +
 .../KAFKA/0.8.1.2.2/metainfo.xml                |    1 +
 .../KAFKA/0.8.1.2.2/package/scripts/kafka.py    |    4 +
 .../KAFKA/0.8.1.2.2/package/scripts/params.py   |   10 +-
 .../STORM/0.9.1.2.1/metainfo.xml                |    1 +
 .../0.9.1.2.1/package/scripts/params_linux.py   |   11 +-
 .../0.9.1.2.1/package/templates/config.yaml.j2  |    8 +-
 .../templates/storm-metrics2.properties.j2      |   10 +-
 .../common-services/YARN/2.1.0.2.0/metainfo.xml |    2 +
 .../2.0.6/hooks/before-START/scripts/params.py  |    8 +
 .../templates/hadoop-metrics2.properties.j2     |   25 +-
 .../ComponentSSLConfigurationTest.java          |    6 +-
 .../ganglia/GangliaPropertyProviderTest.java    |   26 +-
 .../GangliaReportPropertyProviderTest.java      |    2 +-
 .../timeline/AMSPropertyProviderTest.java       |   30 +-
 .../timeline/AMSReportPropertyProviderTest.java |    4 +-
 .../AMBARI_METRICS/test_metrics_collector.py    |    8 +
 .../python/stacks/2.0.6/configs/default.json    | 1146 +++++++++---------
 .../2.0.6/configs/default_ams_embedded.json     |    7 +
 64 files changed, 1113 insertions(+), 721 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/AbstractTimelineMetricsSink.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/AbstractTimelineMetricsSink.java b/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/AbstractTimelineMetricsSink.java
index 9173889..2854898 100644
--- a/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/AbstractTimelineMetricsSink.java
+++ b/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/AbstractTimelineMetricsSink.java
@@ -24,21 +24,35 @@ import org.codehaus.jackson.map.ObjectMapper;
 import org.codehaus.jackson.map.annotate.JsonSerialize;
 import org.codehaus.jackson.xc.JaxbAnnotationIntrospector;
 
+import javax.net.ssl.HttpsURLConnection;
+import javax.net.ssl.SSLContext;
+import javax.net.ssl.SSLSocketFactory;
+import javax.net.ssl.TrustManagerFactory;
+import java.io.File;
+import java.io.FileInputStream;
 import java.io.IOException;
 import java.io.OutputStream;
 import java.net.HttpURLConnection;
 import java.net.URL;
+import java.security.KeyStore;
 
 public abstract class AbstractTimelineMetricsSink {
   public static final String TAGS_FOR_PREFIX_PROPERTY_PREFIX = "tagsForPrefix.";
   public static final String MAX_METRIC_ROW_CACHE_SIZE = "maxRowCacheSize";
   public static final String METRICS_SEND_INTERVAL = "sendInterval";
   public static final String METRICS_POST_TIMEOUT_SECONDS = "timeout";
-  public static final String COLLECTOR_HOST_PROPERTY = "collector";
-  public static final String COLLECTOR_PORT_PROPERTY = "port";
+  public static final String COLLECTOR_PROPERTY = "collector";
   public static final int DEFAULT_POST_TIMEOUT_SECONDS = 10;
   public static final String SKIP_COUNTER_TRANSFROMATION = "skipCounterDerivative";
 
+  public static final String WS_V1_TIMELINE_METRICS = "/ws/v1/timeline/metrics";
+
+  public static final String SSL_KEYSTORE_PATH_PROPERTY = "truststore.path";
+  public static final String SSL_KEYSTORE_TYPE_PROPERTY = "truststore.type";
+  public static final String SSL_KEYSTORE_PASSWORD_PROPERTY = "truststore.password";
+
+  private SSLSocketFactory sslSocketFactory;
+
   protected final Log LOG;
 
   protected static ObjectMapper mapper;
@@ -48,7 +62,7 @@ public abstract class AbstractTimelineMetricsSink {
     AnnotationIntrospector introspector = new JaxbAnnotationIntrospector();
     mapper.setAnnotationIntrospector(introspector);
     mapper.getSerializationConfig()
-        .setSerializationInclusion(JsonSerialize.Inclusion.NON_NULL);
+      .withSerializationInclusion(JsonSerialize.Inclusion.NON_NULL);
   }
 
   public AbstractTimelineMetricsSink() {
@@ -59,9 +73,13 @@ public abstract class AbstractTimelineMetricsSink {
     String connectUrl = getCollectorUri();
     int timeout = getTimeoutSeconds() * 1000;
     try {
+      if (connectUrl == null) {
+        throw new IOException("Unknown URL. " +
+          "Unable to connect to metrics collector.");
+      }
       String jsonData = mapper.writeValueAsString(metrics);
-
-      HttpURLConnection connection = (HttpURLConnection) new URL(connectUrl).openConnection();
+      HttpURLConnection connection = connectUrl.startsWith("https") ?
+        getSSLConnection(connectUrl) : getConnection(connectUrl);
 
       connection.setRequestMethod("POST");
       connection.setRequestProperty("Content-Type", "application/json");
@@ -81,13 +99,75 @@ public abstract class AbstractTimelineMetricsSink {
         LOG.info("Unable to POST metrics to collector, " + connectUrl + ", " +
           "statusCode = " + statusCode);
       } else {
-        LOG.debug("Metrics posted to Collector " + connectUrl);
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Metrics posted to Collector " + connectUrl);
+        }
       }
     } catch (IOException e) {
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Unable to connect to collector, " + connectUrl, e);
+      } else {
+        LOG.info("Unable to connect to collector, " + connectUrl);
+      }
       throw new UnableToConnectException(e).setConnectUrl(connectUrl);
     }
   }
 
+  // Get a connection
+  protected HttpURLConnection getConnection(String spec) throws IOException {
+    return (HttpURLConnection) new URL(spec).openConnection();
+  }
+
+  // Get an ssl connection
+  protected HttpsURLConnection getSSLConnection(String spec)
+    throws IOException, IllegalStateException {
+
+    HttpsURLConnection connection = (HttpsURLConnection) (new URL(spec)
+      .openConnection());
+
+    connection.setSSLSocketFactory(sslSocketFactory);
+
+    return connection;
+  }
+
+  protected void loadTruststore(String trustStorePath, String trustStoreType,
+                                String trustStorePassword) {
+    if (sslSocketFactory == null) {
+      if (trustStorePath == null || trustStorePassword == null) {
+
+        String msg =
+          String.format("Can't load TrustStore. " +
+            "Truststore path or password is not set.");
+
+        LOG.error(msg);
+        throw new IllegalStateException(msg);
+      }
+      FileInputStream in = null;
+      try {
+        in = new FileInputStream(new File(trustStorePath));
+        KeyStore store = KeyStore.getInstance(trustStoreType == null ?
+          KeyStore.getDefaultType() : trustStoreType);
+        store.load(in, trustStorePassword.toCharArray());
+        TrustManagerFactory tmf = TrustManagerFactory
+          .getInstance(TrustManagerFactory.getDefaultAlgorithm());
+        tmf.init(store);
+        SSLContext context = SSLContext.getInstance("TLS");
+        context.init(null, tmf.getTrustManagers(), null);
+        sslSocketFactory = context.getSocketFactory();
+      } catch (Exception e) {
+        LOG.error("Unable to load TrustStore", e);
+      } finally {
+        if (in != null) {
+          try {
+            in.close();
+          } catch (IOException e) {
+            LOG.error("Unable to load TrustStore", e);
+          }
+        }
+      }
+    }
+  }
+
   abstract protected String getCollectorUri();
 
   abstract protected int getTimeoutSeconds();

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-metrics/ambari-metrics-flume-sink/src/main/conf/flume-metrics2.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-flume-sink/src/main/conf/flume-metrics2.properties.j2 b/ambari-metrics/ambari-metrics-flume-sink/src/main/conf/flume-metrics2.properties.j2
index bc18043..f9b303e 100644
--- a/ambari-metrics/ambari-metrics-flume-sink/src/main/conf/flume-metrics2.properties.j2
+++ b/ambari-metrics/ambari-metrics-flume-sink/src/main/conf/flume-metrics2.properties.j2
@@ -16,8 +16,7 @@
 # limitations under the License.
 #}
 
-collector={{metric_collector_host}}
-port={{metric_collector_port}}
+collector=http://localhost:6188
 collectionFrequency=60000
 maxRowCacheSize=10000
 sendInterval=59000

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-metrics/ambari-metrics-flume-sink/src/main/java/org/apache/hadoop/metrics2/sink/flume/FlumeTimelineMetricsSink.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-flume-sink/src/main/java/org/apache/hadoop/metrics2/sink/flume/FlumeTimelineMetricsSink.java b/ambari-metrics/ambari-metrics-flume-sink/src/main/java/org/apache/hadoop/metrics2/sink/flume/FlumeTimelineMetricsSink.java
index cf2b4ae..3040c48 100644
--- a/ambari-metrics/ambari-metrics-flume-sink/src/main/java/org/apache/hadoop/metrics2/sink/flume/FlumeTimelineMetricsSink.java
+++ b/ambari-metrics/ambari-metrics-flume-sink/src/main/java/org/apache/hadoop/metrics2/sink/flume/FlumeTimelineMetricsSink.java
@@ -18,7 +18,6 @@
 
 package org.apache.hadoop.metrics2.sink.flume;
 
-import org.apache.commons.lang.ClassUtils;
 import org.apache.commons.lang.math.NumberUtils;
 import org.apache.flume.Context;
 import org.apache.flume.FlumeException;
@@ -96,9 +95,13 @@ public class FlumeTimelineMetricsSink extends AbstractTimelineMetricsSink implem
     metricsSendInterval = Integer.parseInt(configuration.getProperty(METRICS_SEND_INTERVAL,
         String.valueOf(TimelineMetricsCache.MAX_EVICTION_TIME_MILLIS)));
     metricsCaches = new HashMap<String, TimelineMetricsCache>();
-    String collectorHostname = configuration.getProperty(COLLECTOR_HOST_PROPERTY);
-    String port = configuration.getProperty(COLLECTOR_PORT_PROPERTY);
-    collectorUri = "http://" + collectorHostname + ":" + port + "/ws/v1/timeline/metrics";
+    collectorUri = configuration.getProperty(COLLECTOR_PROPERTY) + WS_V1_TIMELINE_METRICS;
+    if (collectorUri.toLowerCase().startsWith("https://")) {
+      String trustStorePath = configuration.getProperty(SSL_KEYSTORE_PATH_PROPERTY).trim();
+      String trustStoreType = configuration.getProperty(SSL_KEYSTORE_TYPE_PROPERTY).trim();
+      String trustStorePwd = configuration.getProperty(SSL_KEYSTORE_PASSWORD_PROPERTY).trim();
+      loadTruststore(trustStorePath, trustStoreType, trustStorePwd);
+    }
     pollFrequency = Long.parseLong(configuration.getProperty("collectionFrequency"));
 
     String[] metrics = configuration.getProperty(COUNTER_METRICS_PROPERTY).trim().split(",");

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-metrics/ambari-metrics-hadoop-sink/src/main/conf/hadoop-metrics2-hbase.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-sink/src/main/conf/hadoop-metrics2-hbase.properties.j2 b/ambari-metrics/ambari-metrics-hadoop-sink/src/main/conf/hadoop-metrics2-hbase.properties.j2
index 0d13498..c819301 100644
--- a/ambari-metrics/ambari-metrics-hadoop-sink/src/main/conf/hadoop-metrics2-hbase.properties.j2
+++ b/ambari-metrics/ambari-metrics-hadoop-sink/src/main/conf/hadoop-metrics2-hbase.properties.j2
@@ -31,19 +31,19 @@ hbase.extendedperiod = 3600
 # Configuration of the "hbase" context for timeline metrics service
 hbase.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink
 hbase.period=10
-hbase.collector={{timeline_server_hosts}}:8188
+hbase.collector={{timeline_server_hosts}}:6188
 
 # Configuration of the "jvm" context for timeline metrics service
 jvm.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink
 jvm.period=10
-jvm.collector={{timeline_server_hosts}}:8188
+jvm.collector={{timeline_server_hosts}}:6188
 
 # Configuration of the "rpc" context for timeline metrics service
 rpc.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink
 rpc.period=10
-rpc.collector={{timeline_server_hosts}}:8188
+rpc.collector={{timeline_server_hosts}}:6188
 
 # Following hadoop example
 hbase.sink.timeline.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink
 hbase.sink.timeline.period=10
-hbase.sink.timeline.collector={{timeline_server_hosts}}:8188
\ No newline at end of file
+hbase.sink.timeline.collector=http://{{timeline_server_hosts}}:6188
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-metrics/ambari-metrics-hadoop-sink/src/main/conf/hadoop-metrics2.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-sink/src/main/conf/hadoop-metrics2.properties.j2 b/ambari-metrics/ambari-metrics-hadoop-sink/src/main/conf/hadoop-metrics2.properties.j2
index 76a00d1..ec1377d 100644
--- a/ambari-metrics/ambari-metrics-hadoop-sink/src/main/conf/hadoop-metrics2.properties.j2
+++ b/ambari-metrics/ambari-metrics-hadoop-sink/src/main/conf/hadoop-metrics2.properties.j2
@@ -42,17 +42,17 @@
 
 
 # Hook up to the server
-datanode.sink.timeline.collector={{timeline_server_hosts}}:8188
-namenode.sink.timeline.collector={{timeline_server_hosts}}:8188
-resourcemanager.sink.timeline.collector={{timeline_server_hosts}}:8188
-nodemanager.sink.timeline.collector={{timeline_server_hosts}}:8188
-historyserver.sink.timeline.collector={{timeline_server_hosts}}:8188
-journalnode.sink.timeline.collector={{timeline_server_hosts}}:8188
-nimbus.sink.timeline.collector={{timeline_server_hosts}}:8188
-supervisor.sink.timeline.collector={{timeline_server_hosts}}:8188
-maptask.sink.timeline.collector={{timeline_server_hosts}}:8188
-reducetask.sink.timeline.collector={{timeline_server_hosts}}:8188
+datanode.sink.timeline.collector=http://localhost:6188
+namenode.sink.timeline.collector=http://localhost:6188
+resourcemanager.sink.timeline.collector=http://localhost:6188
+nodemanager.sink.timeline.collector=http://localhost:6188
+historyserver.sink.timeline.collector=http://localhost:6188
+journalnode.sink.timeline.collector=http://localhost:6188
+nimbus.sink.timeline.collector=http://localhost:6188
+supervisor.sink.timeline.collector=http://localhost:6188
+maptask.sink.timeline.collector=http://localhost:6188
+reducetask.sink.timeline.collector=http://localhost:6188
 
 resourcemanager.sink.ganglia.tagsForPrefix.yarn=Queue
 
-{% endif %}
\ No newline at end of file
+{% endif %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-metrics/ambari-metrics-hadoop-sink/src/main/java/org/apache/hadoop/metrics2/sink/timeline/HadoopTimelineMetricsSink.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-sink/src/main/java/org/apache/hadoop/metrics2/sink/timeline/HadoopTimelineMetricsSink.java b/ambari-metrics/ambari-metrics-hadoop-sink/src/main/java/org/apache/hadoop/metrics2/sink/timeline/HadoopTimelineMetricsSink.java
index 000b82e..6da9257 100644
--- a/ambari-metrics/ambari-metrics-hadoop-sink/src/main/java/org/apache/hadoop/metrics2/sink/timeline/HadoopTimelineMetricsSink.java
+++ b/ambari-metrics/ambari-metrics-hadoop-sink/src/main/java/org/apache/hadoop/metrics2/sink/timeline/HadoopTimelineMetricsSink.java
@@ -29,7 +29,6 @@ import java.util.Map;
 import java.util.Set;
 
 import org.apache.commons.configuration.SubsetConfiguration;
-import org.apache.commons.lang.ClassUtils;
 import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
@@ -79,13 +78,19 @@ public class HadoopTimelineMetricsSink extends AbstractTimelineMetricsSink imple
     LOG.info("Identified hostname = " + hostName + ", serviceName = " + serviceName);
 
     // Load collector configs
-    metricsServers = Servers.parse(conf.getString(COLLECTOR_HOST_PROPERTY), 6188);
+    metricsServers = Servers.parse(conf.getString(COLLECTOR_PROPERTY), 6188);
 
     if (metricsServers == null || metricsServers.isEmpty()) {
       LOG.error("No Metric collector configured.");
     } else {
-      collectorUri = "http://" + conf.getString(COLLECTOR_HOST_PROPERTY).trim()
-          + "/ws/v1/timeline/metrics";
+      collectorUri = conf.getString(COLLECTOR_PROPERTY).trim()
+          + WS_V1_TIMELINE_METRICS;
+      if (collectorUri.toLowerCase().startsWith("https://")) {
+        String trustStorePath = conf.getString(SSL_KEYSTORE_PATH_PROPERTY).trim();
+        String trustStoreType = conf.getString(SSL_KEYSTORE_TYPE_PROPERTY).trim();
+        String trustStorePwd = conf.getString(SSL_KEYSTORE_PASSWORD_PROPERTY).trim();
+        loadTruststore(trustStorePath, trustStoreType, trustStorePwd);
+      }
     }
 
     LOG.info("Collector Uri: " + collectorUri);

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-metrics/ambari-metrics-hadoop-sink/src/test/java/org/apache/hadoop/metrics2/sink/timeline/HadoopTimelineMetricsSinkTest.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-sink/src/test/java/org/apache/hadoop/metrics2/sink/timeline/HadoopTimelineMetricsSinkTest.java b/ambari-metrics/ambari-metrics-hadoop-sink/src/test/java/org/apache/hadoop/metrics2/sink/timeline/HadoopTimelineMetricsSinkTest.java
index 6b23f36..528384e 100644
--- a/ambari-metrics/ambari-metrics-hadoop-sink/src/test/java/org/apache/hadoop/metrics2/sink/timeline/HadoopTimelineMetricsSinkTest.java
+++ b/ambari-metrics/ambari-metrics-hadoop-sink/src/test/java/org/apache/hadoop/metrics2/sink/timeline/HadoopTimelineMetricsSinkTest.java
@@ -18,7 +18,7 @@
 
 package org.apache.hadoop.metrics2.sink.timeline;
 
-import static org.apache.hadoop.metrics2.sink.timeline.AbstractTimelineMetricsSink.COLLECTOR_HOST_PROPERTY;
+import static org.apache.hadoop.metrics2.sink.timeline.AbstractTimelineMetricsSink.COLLECTOR_PROPERTY;
 import static org.apache.hadoop.metrics2.sink.timeline.AbstractTimelineMetricsSink.MAX_METRIC_ROW_CACHE_SIZE;
 import static org.apache.hadoop.metrics2.sink.timeline.AbstractTimelineMetricsSink.METRICS_SEND_INTERVAL;
 import static org.easymock.EasyMock.anyInt;
@@ -62,7 +62,7 @@ public class HadoopTimelineMetricsSinkTest {
     expect(conf.getString(eq("slave.host.name"))).andReturn("testhost").anyTimes();
     expect(conf.getParent()).andReturn(null).anyTimes();
     expect(conf.getPrefix()).andReturn("service").anyTimes();
-    expect(conf.getString(eq(COLLECTOR_HOST_PROPERTY))).andReturn("localhost:63188").anyTimes();
+    expect(conf.getString(eq(COLLECTOR_PROPERTY))).andReturn("localhost:63188").anyTimes();
     expect(conf.getString(eq("serviceName-prefix"), eq(""))).andReturn("").anyTimes();
 
     expect(conf.getInt(eq(MAX_METRIC_ROW_CACHE_SIZE), anyInt())).andReturn(10).anyTimes();
@@ -130,7 +130,7 @@ public class HadoopTimelineMetricsSinkTest {
     expect(conf.getString(eq("slave.host.name"))).andReturn("testhost").anyTimes();
     expect(conf.getParent()).andReturn(null).anyTimes();
     expect(conf.getPrefix()).andReturn("service").anyTimes();
-    expect(conf.getString(eq(COLLECTOR_HOST_PROPERTY))).andReturn("localhost:63188").anyTimes();
+    expect(conf.getString(eq(COLLECTOR_PROPERTY))).andReturn("localhost:63188").anyTimes();
     expect(conf.getString(eq("serviceName-prefix"), eq(""))).andReturn("").anyTimes();
 
     expect(conf.getInt(eq(MAX_METRIC_ROW_CACHE_SIZE), anyInt())).andReturn(10).anyTimes();

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-metrics/ambari-metrics-host-monitoring/conf/unix/metric_monitor.ini
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/conf/unix/metric_monitor.ini b/ambari-metrics/ambari-metrics-host-monitoring/conf/unix/metric_monitor.ini
index d302518..5952982 100644
--- a/ambari-metrics/ambari-metrics-host-monitoring/conf/unix/metric_monitor.ini
+++ b/ambari-metrics/ambari-metrics-host-monitoring/conf/unix/metric_monitor.ini
@@ -22,6 +22,7 @@ metrics_server = localhost:{{ams_collector_port}}
 hostname = {{hostname}}
 enable_time_threshold = false
 enable_value_threshold = false
+https_enabled = false
 
 [emitter]
 send_interval = 60

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/config_reader.py
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/config_reader.py b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/config_reader.py
index 48f5af2..d533537 100644
--- a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/config_reader.py
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/config_reader.py
@@ -91,6 +91,7 @@ debug_level = INFO
 metrics_server = host:port
 enable_time_threshold = false
 enable_value_threshold = false
+https_enabled = false
 
 [emitter]
 send_interval = 60
@@ -181,7 +182,7 @@ class Configuration:
 
   def get(self, section, key, default=None):
     try:
-      value = self.config.get(section, key)
+      value = str(self.config.get(section, key)).strip()
     except:
       return default
     return value
@@ -209,3 +210,6 @@ class Configuration:
 
   def get_max_queue_size(self):
     return int(self.get("collector", "max_queue_size", 5000))
+
+  def get_server_https_enabled(self):
+    return "true" == str(self.get("default", "https_enabled")).lower()

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/emitter.py
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/emitter.py b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/emitter.py
index 6f66093..4e39ab5 100644
--- a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/emitter.py
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/emitter.py
@@ -20,13 +20,12 @@ limitations under the License.
 
 import logging
 import threading
-import time
 import urllib2
 
 logger = logging.getLogger()
 
 class Emitter(threading.Thread):
-  COLLECTOR_URL = "http://{0}/ws/v1/timeline/metrics"
+  COLLECTOR_URL = "{0}://{1}/ws/v1/timeline/metrics"
   RETRY_SLEEP_INTERVAL = 5
   MAX_RETRY_COUNT = 3
   """
@@ -36,10 +35,12 @@ class Emitter(threading.Thread):
     threading.Thread.__init__(self)
     logger.debug('Initializing Emitter thread.')
     self.lock = threading.Lock()
-    self.collector_address = config.get_server_address()
     self.send_interval = config.get_send_interval()
     self._stop_handler = stop_handler
     self.application_metric_map = application_metric_map
+    # TODO verify certificate
+    protocol = 'https' if config.get_server_https_enabled() else 'http'
+    self.collector_url = self.COLLECTOR_URL.format(protocol, config.get_server_address())
 
   def run(self):
     logger.info('Running Emitter thread: %s' % threading.currentThread().getName())
@@ -54,7 +55,7 @@ class Emitter(threading.Thread):
         logger.info('Shutting down Emitter thread')
         return
     pass
-  
+
   def submit_metrics(self):
     retry_count = 0
     # This call will acquire lock on the map and clear contents before returning
@@ -73,7 +74,7 @@ class Emitter(threading.Thread):
       except Exception, e:
         logger.warn('Error sending metrics to server. %s' % str(e))
       pass
-  
+
       if response and response.getcode() == 200:
         retry_count = self.MAX_RETRY_COUNT
       else:
@@ -84,13 +85,12 @@ class Emitter(threading.Thread):
           return
       pass
     pass
-  
+    # TODO verify certificate
   def push_metrics(self, data):
     headers = {"Content-Type" : "application/json", "Accept" : "*/*"}
-    server = self.COLLECTOR_URL.format(self.collector_address.strip())
-    logger.info("server: %s" % server)
+    logger.info("server: %s" % self.collector_url)
     logger.debug("message to sent: %s" % data)
-    req = urllib2.Request(server, data, headers)
+    req = urllib2.Request(self.collector_url, data, headers)
     response = urllib2.urlopen(req, timeout=int(self.send_interval - 10))
     if response:
       logger.debug("POST response from server: retcode = {0}".format(response.getcode()))

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-metrics/ambari-metrics-kafka-sink/src/main/java/org/apache/hadoop/metrics2/sink/kafka/KafkaTimelineMetricsReporter.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-kafka-sink/src/main/java/org/apache/hadoop/metrics2/sink/kafka/KafkaTimelineMetricsReporter.java b/ambari-metrics/ambari-metrics-kafka-sink/src/main/java/org/apache/hadoop/metrics2/sink/kafka/KafkaTimelineMetricsReporter.java
index 4915435..d6d251c 100644
--- a/ambari-metrics/ambari-metrics-kafka-sink/src/main/java/org/apache/hadoop/metrics2/sink/kafka/KafkaTimelineMetricsReporter.java
+++ b/ambari-metrics/ambari-metrics-kafka-sink/src/main/java/org/apache/hadoop/metrics2/sink/kafka/KafkaTimelineMetricsReporter.java
@@ -63,11 +63,13 @@ public class KafkaTimelineMetricsReporter extends AbstractTimelineMetricsSink
   private static final String TIMELINE_METRICS_MAX_ROW_CACHE_SIZE_PROPERTY = "kafka.timeline.metrics.maxRowCacheSize";
   private static final String TIMELINE_HOST_PROPERTY = "kafka.timeline.metrics.host";
   private static final String TIMELINE_PORT_PROPERTY = "kafka.timeline.metrics.port";
+  private static final String TIMELINE_PROTOCOL_PROPERTY = "kafka.timeline.metrics.protocol";
   private static final String TIMELINE_REPORTER_ENABLED_PROPERTY = "kafka.timeline.metrics.reporter.enabled";
   private static final String EXCLUDED_METRICS_PROPERTY = "external.kafka.metrics.exclude.prefix";
   private static final String INCLUDED_METRICS_PROPERTY = "external.kafka.metrics.include.prefix";
   private static final String TIMELINE_DEFAULT_HOST = "localhost";
-  private static final String TIMELINE_DEFAULT_PORT = "8188";
+  private static final String TIMELINE_DEFAULT_PORT = "6188";
+  private static final String TIMELINE_DEFAULT_PROTOCOL = "http";
 
   private boolean initialized = false;
   private boolean running = false;
@@ -117,8 +119,19 @@ public class KafkaTimelineMetricsReporter extends AbstractTimelineMetricsSink
         int maxRowCacheSize = props.getInt(TIMELINE_METRICS_MAX_ROW_CACHE_SIZE_PROPERTY, MAX_RECS_PER_NAME_DEFAULT);
         String metricCollectorHost = props.getString(TIMELINE_HOST_PROPERTY, TIMELINE_DEFAULT_HOST);
         String metricCollectorPort = props.getString(TIMELINE_PORT_PROPERTY, TIMELINE_DEFAULT_PORT);
+        String metricCollectorProtocol = props.getString(TIMELINE_PROTOCOL_PROPERTY, TIMELINE_DEFAULT_PROTOCOL);
         setMetricsCache(new TimelineMetricsCache(maxRowCacheSize, metricsSendInterval));
-        collectorUri = "http://" + metricCollectorHost + ":" + metricCollectorPort + "/ws/v1/timeline/metrics";
+
+        collectorUri = metricCollectorProtocol + "://" + metricCollectorHost +
+                       ":" + metricCollectorPort + WS_V1_TIMELINE_METRICS;
+
+        if (collectorUri.toLowerCase().startsWith("https://")) {
+          String trustStorePath = props.getString(SSL_KEYSTORE_PATH_PROPERTY).trim();
+          String trustStoreType = props.getString(SSL_KEYSTORE_TYPE_PROPERTY).trim();
+          String trustStorePwd = props.getString(SSL_KEYSTORE_PASSWORD_PROPERTY).trim();
+          loadTruststore(trustStorePath, trustStoreType, trustStorePwd);
+        }
+
 
         // Exclusion policy
         String excludedMetricsStr = props.getString(EXCLUDED_METRICS_PROPERTY, "");

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-metrics/ambari-metrics-kafka-sink/src/test/java/org/apache/hadoop/metrics2/sink/kafka/KafkaTimelineMetricsReporterTest.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-kafka-sink/src/test/java/org/apache/hadoop/metrics2/sink/kafka/KafkaTimelineMetricsReporterTest.java b/ambari-metrics/ambari-metrics-kafka-sink/src/test/java/org/apache/hadoop/metrics2/sink/kafka/KafkaTimelineMetricsReporterTest.java
index 6f2fc27..e0adb4b 100644
--- a/ambari-metrics/ambari-metrics-kafka-sink/src/test/java/org/apache/hadoop/metrics2/sink/kafka/KafkaTimelineMetricsReporterTest.java
+++ b/ambari-metrics/ambari-metrics-kafka-sink/src/test/java/org/apache/hadoop/metrics2/sink/kafka/KafkaTimelineMetricsReporterTest.java
@@ -79,7 +79,7 @@ public class KafkaTimelineMetricsReporterTest {
     properties.setProperty("kafka.timeline.metrics.sendInterval", "5900");
     properties.setProperty("kafka.timeline.metrics.maxRowCacheSize", "10000");
     properties.setProperty("kafka.timeline.metrics.host", "localhost");
-    properties.setProperty("kafka.timeline.metrics.port", "8188");
+    properties.setProperty("kafka.timeline.metrics.port", "6188");
     properties.setProperty("kafka.timeline.metrics.reporter.enabled", "true");
     properties.setProperty("external.kafka.metrics.exclude.prefix", "a.b.c");
     properties.setProperty("external.kafka.metrics.include.prefix", "a.b.c.d");

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-metrics/ambari-metrics-storm-sink/src/main/java/org/apache/hadoop/metrics2/sink/storm/StormTimelineMetricsReporter.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-storm-sink/src/main/java/org/apache/hadoop/metrics2/sink/storm/StormTimelineMetricsReporter.java b/ambari-metrics/ambari-metrics-storm-sink/src/main/java/org/apache/hadoop/metrics2/sink/storm/StormTimelineMetricsReporter.java
index f054f16..ab5f1e4 100644
--- a/ambari-metrics/ambari-metrics-storm-sink/src/main/java/org/apache/hadoop/metrics2/sink/storm/StormTimelineMetricsReporter.java
+++ b/ambari-metrics/ambari-metrics-storm-sink/src/main/java/org/apache/hadoop/metrics2/sink/storm/StormTimelineMetricsReporter.java
@@ -24,7 +24,6 @@ import backtype.storm.generated.TopologySummary;
 import backtype.storm.metric.IClusterReporter;
 import backtype.storm.utils.NimbusClient;
 import backtype.storm.utils.Utils;
-import org.apache.commons.lang3.ClassUtils;
 import org.apache.commons.lang3.Validate;
 import org.apache.hadoop.metrics2.sink.timeline.AbstractTimelineMetricsSink;
 import org.apache.hadoop.metrics2.sink.timeline.TimelineMetric;
@@ -40,9 +39,7 @@ import java.util.Map;
 public class StormTimelineMetricsReporter extends AbstractTimelineMetricsSink
   implements IClusterReporter {
 
-  public static final String COLLECTOR_HOST = "host";
-  public static final String COLLECTOR_PORT = "port";
-  public static final String METRICS_COLLECTOR = "metrics_collector";
+  public static final String METRICS_COLLECTOR_CATEGORY = "metrics_collector";
   public static final String APP_ID = "appId";
 
   private String hostname;
@@ -79,20 +76,25 @@ public class StormTimelineMetricsReporter extends AbstractTimelineMetricsSink
         LOG.error("Could not identify hostname.");
         throw new RuntimeException("Could not identify hostname.", e);
       }
-      Validate.notNull(conf.get(METRICS_COLLECTOR), METRICS_COLLECTOR + " can not be null");
-      Map cf = (Map) conf.get(METRICS_COLLECTOR);
+      Validate.notNull(conf.get(METRICS_COLLECTOR_CATEGORY), METRICS_COLLECTOR_CATEGORY + " can not be null");
+      Map cf = (Map) conf.get(METRICS_COLLECTOR_CATEGORY);
       Map stormConf = Utils.readStormConfig();
       this.nimbusClient = NimbusClient.getConfiguredClient(stormConf);
-      String collectorHostname = cf.get(COLLECTOR_HOST).toString();
-      String port = cf.get(COLLECTOR_PORT).toString();
+      String collector = cf.get(COLLECTOR_PROPERTY).toString();
       timeoutSeconds = cf.get(METRICS_POST_TIMEOUT_SECONDS) != null ?
         Integer.parseInt(cf.get(METRICS_POST_TIMEOUT_SECONDS).toString()) :
         DEFAULT_POST_TIMEOUT_SECONDS;
       applicationId = cf.get(APP_ID).toString();
-      collectorUri = "http://" + collectorHostname + ":" + port + "/ws/v1/timeline/metrics";
+      collectorUri = collector + WS_V1_TIMELINE_METRICS;
+      if (collectorUri.toLowerCase().startsWith("https://")) {
+        String trustStorePath = cf.get(SSL_KEYSTORE_PATH_PROPERTY).toString().trim();
+        String trustStoreType = cf.get(SSL_KEYSTORE_TYPE_PROPERTY).toString().trim();
+        String trustStorePwd = cf.get(SSL_KEYSTORE_PASSWORD_PROPERTY).toString().trim();
+        loadTruststore(trustStorePath, trustStoreType, trustStorePwd);
+      }
     } catch (Exception e) {
-      LOG.warn("Could not initialize metrics collector, please specify host, " +
-        "port under $STORM_HOME/conf/config.yaml ", e);
+      LOG.warn("Could not initialize metrics collector, please specify " +
+        "protocol, host, port under $STORM_HOME/conf/config.yaml ", e);
     }
 
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-metrics/ambari-metrics-storm-sink/src/main/java/org/apache/hadoop/metrics2/sink/storm/StormTimelineMetricsSink.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-storm-sink/src/main/java/org/apache/hadoop/metrics2/sink/storm/StormTimelineMetricsSink.java b/ambari-metrics/ambari-metrics-storm-sink/src/main/java/org/apache/hadoop/metrics2/sink/storm/StormTimelineMetricsSink.java
index 4208287..91c63b9 100644
--- a/ambari-metrics/ambari-metrics-storm-sink/src/main/java/org/apache/hadoop/metrics2/sink/storm/StormTimelineMetricsSink.java
+++ b/ambari-metrics/ambari-metrics-storm-sink/src/main/java/org/apache/hadoop/metrics2/sink/storm/StormTimelineMetricsSink.java
@@ -77,7 +77,13 @@ public class StormTimelineMetricsSink extends AbstractTimelineMetricsSink implem
     int metricsSendInterval = Integer.parseInt(configuration.getProperty(METRICS_SEND_INTERVAL,
         String.valueOf(MAX_EVICTION_TIME_MILLIS)));
     metricsCache = new TimelineMetricsCache(maxRowCacheSize, metricsSendInterval);
-    collectorUri = "http://" + configuration.getProperty(COLLECTOR_HOST_PROPERTY) + ":" + configuration.getProperty(COLLECTOR_PORT_PROPERTY) + "/ws/v1/timeline/metrics";
+    collectorUri = configuration.getProperty(COLLECTOR_PROPERTY) + WS_V1_TIMELINE_METRICS;
+    if (collectorUri.toLowerCase().startsWith("https://")) {
+      String trustStorePath = configuration.getProperty(SSL_KEYSTORE_PATH_PROPERTY).trim();
+      String trustStoreType = configuration.getProperty(SSL_KEYSTORE_TYPE_PROPERTY).trim();
+      String trustStorePwd = configuration.getProperty(SSL_KEYSTORE_PASSWORD_PROPERTY).trim();
+      loadTruststore(trustStorePath, trustStoreType, trustStorePwd);
+    }
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryServer.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryServer.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryServer.java
index 11c162a..62a8cc3 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryServer.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryServer.java
@@ -22,6 +22,7 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.http.HttpConfig;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.metrics2.source.JvmMetrics;
 import org.apache.hadoop.service.CompositeService;
@@ -170,15 +171,15 @@ public class ApplicationHistoryServer extends CompositeService {
     String bindAddress = metricConfiguration.getWebappAddress();
     LOG.info("Instantiating AHSWebApp at " + bindAddress);
     try {
+      Configuration conf = metricConfiguration.getMetricsConf();
+      HttpConfig.Policy policy = HttpConfig.Policy.valueOf(
+        conf.get(TimelineMetricConfiguration.TIMELINE_SERVICE_HTTP_POLICY,
+          HttpConfig.Policy.HTTP_ONLY.name()));
       webApp =
           WebApps
             .$for("applicationhistory", ApplicationHistoryClientService.class,
               ahsClientService, "ws")
-            .with(getConfig())
-            .withHttpSpnegoPrincipalKey(
-              YarnConfiguration.TIMELINE_SERVICE_PRINCIPAL)
-            .withHttpSpnegoKeytabKey(
-              YarnConfiguration.TIMELINE_SERVICE_KEYTAB)
+            .withHttpPolicy(conf, policy)
             .at(bindAddress)
             .start(new AHSWebApp(timelineStore, timelineMetricStore,
               ahsClientService));

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/loadsimulator/net/RestMetricsSender.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/loadsimulator/net/RestMetricsSender.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/loadsimulator/net/RestMetricsSender.java
index c6d84c2..0a9a513 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/loadsimulator/net/RestMetricsSender.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/loadsimulator/net/RestMetricsSender.java
@@ -37,7 +37,7 @@ public class RestMetricsSender implements MetricsSender {
 
   /**
    * Creates unconnected RestMetricsSender with endpoint configured as
-   * http://${metricsHost}:8188/ws/v1/timeline/metrics,
+   * http://${metricsHost}:6188/ws/v1/timeline/metrics,
    * where ${metricsHost} is specified by metricHost param.
    *
    * @param metricsHost the hostname that will be used to access application metrics history service.
@@ -70,10 +70,6 @@ public class RestMetricsSender implements MetricsSender {
       if (responseString.length() > 0) {
         LOG.debug("POST response from server: " + responseString);
       }
-    } catch (MalformedURLException e) {
-      LOG.error("", e);
-    } catch (ProtocolException e) {
-      LOG.error("", e);
     } catch (IOException e) {
       LOG.error("", e);
     } finally {

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricConfiguration.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricConfiguration.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricConfiguration.java
index cc9a2f3..3431ac8 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricConfiguration.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricConfiguration.java
@@ -17,7 +17,6 @@
  */
 package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline;
 
-import org.apache.commons.lang.StringUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -205,6 +204,9 @@ public class TimelineMetricConfiguration {
   public static final String AGGREGATORS_SKIP_BLOCK_CACHE =
     "timeline.metrics.aggregators.skip.blockcache.enabled";
 
+  public static final String TIMELINE_SERVICE_HTTP_POLICY =
+    "timeline.metrics.service.http.policy";
+
   public static final String DISABLE_METRIC_METADATA_MGMT =
     "timeline.metrics.service.metadata.management.disabled";
 
@@ -262,7 +264,7 @@ public class TimelineMetricConfiguration {
   }
 
   public String getWebappAddress() {
-    String defaultHttpAddress = "0.0.0.0:8188";
+    String defaultHttpAddress = "0.0.0.0:6188";
     if (metricsConf != null) {
       return metricsConf.get(WEBAPP_HTTP_ADDRESS, defaultHttpAddress);
     }

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-server/src/main/java/org/apache/ambari/server/configuration/ComponentSSLConfiguration.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/configuration/ComponentSSLConfiguration.java b/ambari-server/src/main/java/org/apache/ambari/server/configuration/ComponentSSLConfiguration.java
index a51b2ee..cb9651e 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/configuration/ComponentSSLConfiguration.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/configuration/ComponentSSLConfiguration.java
@@ -21,7 +21,7 @@ package org.apache.ambari.server.configuration;
  * Configuration for SSL communication between Ambari and 3rd party services.
  * Currently, the following services are supported with SSL communication:
  * <ul>
- * <li>Ganglia</li>
+ * <li>Ambari metrics</li>
  * </ul>
  */
 public class ComponentSSLConfiguration {
@@ -32,7 +32,7 @@ public class ComponentSSLConfiguration {
   private String truststorePath;
   private String truststorePassword;
   private String truststoreType;
-  private boolean gangliaSSL;
+  private boolean httpsEnabled;
 
   /**
    * The singleton.
@@ -60,7 +60,7 @@ public class ComponentSSLConfiguration {
     truststorePath     = configuration.getProperty(Configuration.SSL_TRUSTSTORE_PATH_KEY);
     truststorePassword = getPassword(configuration);
     truststoreType     = configuration.getProperty(Configuration.SSL_TRUSTSTORE_TYPE_KEY);
-    gangliaSSL         = Boolean.parseBoolean(configuration.getProperty(Configuration.GANGLIA_HTTPS_KEY));
+    httpsEnabled = Boolean.parseBoolean(configuration.getProperty(Configuration.AMRABI_METRICS_HTTPS_ENABLED_KEY));
   }
 
 
@@ -94,12 +94,12 @@ public class ComponentSSLConfiguration {
   }
 
   /**
-   * Indicates whether or not Ganglia is setup for SSL.
+   * Indicates whether or not Ambari Metrics is setup for SSL.
    *
-   * @return true if Ganglia is setup for SSL
+   * @return true if AMS is setup for SSL
    */
-  public boolean isGangliaSSL() {
-    return gangliaSSL;
+  public boolean isHttpsEnabled() {
+    return httpsEnabled;
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java b/ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java
index 35e9b6f..cfc2f51 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java
@@ -242,7 +242,6 @@ public class Configuration {
   public static final String JAVAX_SSL_TRUSTSTORE = "javax.net.ssl.trustStore";
   public static final String JAVAX_SSL_TRUSTSTORE_PASSWORD = "javax.net.ssl.trustStorePassword";
   public static final String JAVAX_SSL_TRUSTSTORE_TYPE = "javax.net.ssl.trustStoreType";
-  public static final String GANGLIA_HTTPS_KEY = "ganglia.https";
   public static final String SRVR_TWO_WAY_SSL_PORT_DEFAULT = "8441";
   public static final String SRVR_ONE_WAY_SSL_PORT_DEFAULT = "8440";
   public static final String SRVR_CRT_NAME_DEFAULT = "ca.crt";
@@ -483,6 +482,9 @@ public class Configuration {
   private static final String DEFAULT_TIMELINE_METRICS_CACHE_HEAP_PERCENT = "15%";
   private static final String TIMELINE_METRICS_CACHE_USE_CUSTOM_SIZING_ENGINE = "server.timeline.metrics.cache.use.custom.sizing.engine";
 
+  // Timeline Metrics SSL settings
+  public static final String AMRABI_METRICS_HTTPS_ENABLED_KEY = "server.timeline.metrics.https.enabled";
+
   /**
    * Governs the use of {@link Parallel} to process {@link StageEntity}
    * instances into {@link Stage}.
@@ -921,8 +923,6 @@ public class Configuration {
       jsonObject = jsonElement.getAsJsonObject();
     } catch (FileNotFoundException e) {
       throw new IllegalArgumentException("No file " + file, e);
-    } catch (IOException ioe){
-      throw new IllegalArgumentException("Can't read file " + file, ioe);
     }
 
     return jsonObject;

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-server/src/main/java/org/apache/ambari/server/controller/metrics/ganglia/GangliaPropertyProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/metrics/ganglia/GangliaPropertyProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/metrics/ganglia/GangliaPropertyProvider.java
index c62b00c..16e2cd4 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/metrics/ganglia/GangliaPropertyProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/metrics/ganglia/GangliaPropertyProvider.java
@@ -253,7 +253,7 @@ public abstract class GangliaPropertyProvider extends MetricsPropertyProvider {
     
     URIBuilder uriBuilder = new URIBuilder();
 
-    if (configuration.isGangliaSSL()) {
+    if (configuration.isHttpsEnabled()) {
       uriBuilder.setScheme("https");
     } else {
       uriBuilder.setScheme("http");

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-server/src/main/java/org/apache/ambari/server/controller/metrics/ganglia/GangliaReportPropertyProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/metrics/ganglia/GangliaReportPropertyProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/metrics/ganglia/GangliaReportPropertyProvider.java
index e8dc7d1..80ecae6 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/metrics/ganglia/GangliaReportPropertyProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/metrics/ganglia/GangliaReportPropertyProvider.java
@@ -209,7 +209,7 @@ public class GangliaReportPropertyProvider extends MetricsReportPropertyProvider
 
     StringBuilder sb = new StringBuilder();
 
-    if (configuration.isGangliaSSL()) {
+    if (configuration.isHttpsEnabled()) {
       sb.append("https://");
     } else {
       sb.append("http://");

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-server/src/main/java/org/apache/ambari/server/controller/metrics/timeline/AMSPropertyProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/metrics/timeline/AMSPropertyProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/metrics/timeline/AMSPropertyProvider.java
index 3ecb520..a674371 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/metrics/timeline/AMSPropertyProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/metrics/timeline/AMSPropertyProvider.java
@@ -34,7 +34,6 @@ import org.apache.ambari.server.controller.spi.Resource;
 import org.apache.ambari.server.controller.spi.SystemException;
 import org.apache.ambari.server.controller.spi.TemporalInfo;
 import org.apache.ambari.server.controller.utilities.PropertyHelper;
-import org.apache.ambari.server.controller.utilities.StreamProvider;
 import org.apache.ambari.server.state.StackId;
 import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.metrics2.sink.timeline.TimelineMetric;
@@ -625,7 +624,8 @@ public abstract class AMSPropertyProvider extends MetricsPropertyProvider {
             if (metricsRequest == null) {
               metricsRequest = new MetricsRequest(temporalInfo,
                 getAMSUriBuilder(collectorHostName,
-                  collectorPort != null ? Integer.parseInt(collectorPort) : COLLECTOR_DEFAULT_PORT),
+                  collectorPort != null ? Integer.parseInt(collectorPort) : COLLECTOR_DEFAULT_PORT,
+                  configuration.isHttpsEnabled()),
                   (String) resource.getPropertyValue(clusterNamePropertyId));
               requests.put(temporalInfo, metricsRequest);
             }
@@ -643,9 +643,9 @@ public abstract class AMSPropertyProvider extends MetricsPropertyProvider {
     return requestMap;
   }
 
-  static URIBuilder getAMSUriBuilder(String hostname, int port) {
+  static URIBuilder getAMSUriBuilder(String hostname, int port, boolean httpsEnabled) {
     URIBuilder uriBuilder = new URIBuilder();
-    uriBuilder.setScheme("http");
+    uriBuilder.setScheme(httpsEnabled ? "https" : "http");
     uriBuilder.setHost(hostname);
     uriBuilder.setPort(port);
     uriBuilder.setPath("/ws/v1/timeline/metrics");

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-server/src/main/java/org/apache/ambari/server/controller/metrics/timeline/AMSReportPropertyProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/metrics/timeline/AMSReportPropertyProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/metrics/timeline/AMSReportPropertyProvider.java
index e40a5a5..306390c 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/metrics/timeline/AMSReportPropertyProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/metrics/timeline/AMSReportPropertyProvider.java
@@ -174,7 +174,7 @@ public class AMSReportPropertyProvider extends MetricsReportPropertyProvider {
     String host = hostProvider.getCollectorHostName(clusterName, TIMELINE_METRICS);
     String port = hostProvider.getCollectorPort(clusterName, TIMELINE_METRICS);
     URIBuilder uriBuilder = AMSPropertyProvider.getAMSUriBuilder(host,
-      port != null ? Integer.parseInt(port) : 8188);
+      port != null ? Integer.parseInt(port) : 6188, configuration.isHttpsEnabled());
 
     for (Map.Entry<String, MetricReportRequest> entry : reportRequestMap.entrySet()) {
       MetricReportRequest reportRequest = entry.getValue();

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/metainfo.xml b/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/metainfo.xml
index f40da62..bd9f357 100644
--- a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/metainfo.xml
+++ b/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/metainfo.xml
@@ -172,6 +172,7 @@
       <configuration-dependencies>
         <config-type>accumulo-env</config-type>
         <config-type>accumulo-site</config-type>
+        <config-type>ams-ssl-client</config-type>
       </configuration-dependencies>
 
     </service>

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/params.py
index a9626b6..2bd2626 100644
--- a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/params.py
@@ -131,6 +131,13 @@ if has_metric_collector:
       metric_collector_port = metric_collector_web_address.split(':')[1]
     else:
       metric_collector_port = '6188'
+  if default("/configurations/ams-site/timeline.metrics.service.http.policy", "HTTP_ONLY") == "HTTPS_ONLY":
+    metric_collector_protocol = 'https'
+  else:
+    metric_collector_protocol = 'http'
+  metric_truststore_path= default("/configurations/ams-ssl-client/ssl.client.truststore.location", "")
+  metric_truststore_type= default("/configurations/ams-ssl-client/ssl.client.truststore.type", "")
+  metric_truststore_password= default("/configurations/ams-ssl-client/ssl.client.truststore.password", "")
   pass
 metrics_report_interval = default("/configurations/ams-site/timeline.metrics.sink.report.interval", 60)
 metrics_collection_period = default("/configurations/ams-site/timeline.metrics.sink.collection.period", 10)

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/templates/hadoop-metrics2-accumulo.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/templates/hadoop-metrics2-accumulo.properties.j2 b/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/templates/hadoop-metrics2-accumulo.properties.j2
index 399e0c9..c2c12a5 100644
--- a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/templates/hadoop-metrics2-accumulo.properties.j2
+++ b/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/templates/hadoop-metrics2-accumulo.properties.j2
@@ -35,7 +35,12 @@ rpc.collector={{metric_collector_host}}:{{metric_collector_port}}
 accumulo.sink.timeline.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink
 accumulo.sink.timeline.period={{metrics_collection_period}}
 accumulo.sink.timeline.sendInterval={{metrics_report_interval}}000
-accumulo.sink.timeline.collector={{metric_collector_host}}:{{metric_collector_port}}
+accumulo.sink.timeline.collector={{metric_collector_protocol}}://{{metric_collector_host}}:{{metric_collector_port}}
+
+# HTTPS properties
+accumulo.sink.timeline.truststore.path = {{metric_truststore_path}}
+accumulo.sink.timeline.truststore.type = {{metric_truststore_type}}
+accumulo.sink.timeline.truststore.password = {{metric_truststore_password}}
 
 {% else %}
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-site.xml b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-site.xml
index ab9593d..08bf47c 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-site.xml
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-site.xml
@@ -484,7 +484,6 @@
       </property>
     </depends-on>
   </property>
-
   <property>
     <name>timeline.metrics.sink.report.interval</name>
     <value>60</value>
@@ -553,5 +552,16 @@
       utilization only for user queries.
     </description>
   </property>
+  <property>
+    <name>timeline.metrics.service.http.policy</name>
+    <value>HTTP_ONLY</value>
+    <description>
+      This configures the HTTP endpoint for Yarn Application History Server for
+      Ambari Metrics System.
+      The following values are supported:
+      - HTTP_ONLY : Service is provided only on http
+      - HTTPS_ONLY : Service is provided only on https
+    </description>
+  </property>
 
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-ssl-client.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-ssl-client.xml b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-ssl-client.xml
new file mode 100644
index 0000000..31586ee
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-ssl-client.xml
@@ -0,0 +1,37 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<configuration>
+    <property>
+        <name>ssl.client.truststore.location</name>
+        <value>/etc/security/clientKeys/all.jks</value>
+        <description>Location of the trust store file.</description>
+    </property>
+    <property>
+        <name>ssl.client.truststore.type</name>
+        <value>jks</value>
+        <description>Optional. Default value is "jks".</description>
+    </property>
+    <property>
+        <name>ssl.client.truststore.password</name>
+        <value>bigdata</value>
+        <property-type>PASSWORD</property-type>
+        <description>Password to open the trust store file.</description>
+    </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-ssl-server.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-ssl-server.xml b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-ssl-server.xml
new file mode 100644
index 0000000..f95793e
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-ssl-server.xml
@@ -0,0 +1,64 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<configuration>
+    <property>
+        <name>ssl.server.truststore.location</name>
+        <value>/etc/security/serverKeys/all.jks</value>
+        <description>Location of the trust store file.</description>
+    </property>
+    <property>
+        <name>ssl.server.truststore.type</name>
+        <value>jks</value>
+        <description>Optional. Default value is "jks".</description>
+    </property>
+    <property>
+        <name>ssl.server.truststore.password</name>
+        <value>bigdata</value>
+        <property-type>PASSWORD</property-type>
+        <description>Password to open the trust store file.</description>
+    </property>
+    <property>
+        <name>ssl.server.truststore.reload.interval</name>
+        <value>10000</value>
+        <description>Truststore reload interval, in milliseconds.</description>
+    </property>
+    <property>
+        <name>ssl.server.keystore.type</name>
+        <value>jks</value>
+        <description>Optional. Default value is "jks".</description>
+    </property>
+    <property>
+        <name>ssl.server.keystore.location</name>
+        <value>/etc/security/serverKeys/keystore.jks</value>
+        <description>Location of the keystore file.</description>
+    </property>
+    <property>
+        <name>ssl.server.keystore.password</name>
+        <value>bigdata</value>
+        <property-type>PASSWORD</property-type>
+        <description>Password to open the keystore file.</description>
+    </property>
+    <property>
+        <name>ssl.server.keystore.keypassword</name>
+        <value>bigdata</value>
+        <property-type>PASSWORD</property-type>
+        <description>Password for private key in keystore file.</description>
+    </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/metainfo.xml b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/metainfo.xml
index 67a93a5..2f2f116 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/metainfo.xml
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/metainfo.xml
@@ -155,6 +155,8 @@
         <config-type>ams-hbase-log4j</config-type>
         <config-type>ams-grafana-env</config-type>
         <config-type>ams-grafana-ini</config-type>
+        <config-type>ams-ssl-server</config-type>
+        <config-type>ams-ssl-client</config-type>
       </configuration-dependencies>
 
       <excluded-config-types>

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/ams.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/ams.py b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/ams.py
index 40188c2..417574b 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/ams.py
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/ams.py
@@ -204,6 +204,14 @@ def ams(name=None):
               group=params.user_group
     )
 
+    XmlConfig("ssl-server.xml",
+              conf_dir=params.ams_collector_conf_dir,
+              configurations=params.config['configurations']['ams-ssl-server'],
+              configuration_attributes=params.config['configuration_attributes']['ams-ssl-server'],
+              owner=params.ams_user,
+              group=params.user_group
+    )
+
     merged_ams_hbase_site = {}
     merged_ams_hbase_site.update(params.config['configurations']['ams-hbase-site'])
     if params.security_enabled:

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/metrics_grafana_util.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/metrics_grafana_util.py b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/metrics_grafana_util.py
index 5ab40b0..02caa11 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/metrics_grafana_util.py
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/metrics_grafana_util.py
@@ -40,7 +40,7 @@ def create_ams_datasource():
   Logger.info("Connecting (GET) to %s:%s%s" % (params.hostname,
                                                params.ams_grafana_port,
                                                GRAFANA_URL))
-
+# TODO add https support
   conn = httplib.HTTPConnection(params.hostname,
                                 int(params.ams_grafana_port))
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/params.py
index 89a60f7..f3a97fc 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/params.py
@@ -50,6 +50,16 @@ ams_pid_dir = status_params.ams_collector_pid_dir
 ams_collector_script = "/usr/sbin/ambari-metrics-collector"
 ams_collector_pid_dir = status_params.ams_collector_pid_dir
 ams_collector_hosts = default("/clusterHostInfo/metrics_collector_hosts", [])
+if default("/configurations/ams-site/timeline.metrics.service.http.policy", "HTTP_ONLY") == "HTTPS_ONLY":
+  metric_collector_https_enabled = True
+  metric_collector_protocol = 'https'
+else:
+  metric_collector_https_enabled = False
+  metric_collector_protocol = 'http'
+metric_truststore_path= default("/configurations/ams-ssl-client/ssl.client.truststore.location", "")
+metric_truststore_type= default("/configurations/ams-ssl-client/ssl.client.truststore.type", "")
+metric_truststore_password= default("/configurations/ams-ssl-client/ssl.client.truststore.password", "")
+
 if 'cluster-env' in config['configurations'] and \
     'metrics_collector_vip_host' in config['configurations']['cluster-env']:
   metric_collector_host = config['configurations']['cluster-env']['metrics_collector_vip_host']

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/service_check.py b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/service_check.py
index f19c823..4346f0f 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/service_check.py
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/service_check.py
@@ -79,8 +79,9 @@ class AMSServiceCheck(Script):
         Logger.info("Connecting (POST) to %s:%s%s" % (params.metric_collector_host,
                                                       params.metric_collector_port,
                                                       self.AMS_METRICS_POST_URL))
-        conn = httplib.HTTPConnection(params.metric_collector_host,
-                                        int(params.metric_collector_port))
+        conn = self.get_http_connection(params.metric_collector_host,
+                                        int(params.metric_collector_port),
+                                        params.metric_collector_https_enabled)
         conn.request("POST", self.AMS_METRICS_POST_URL, metric_json, headers)
 
         response = conn.getresponse()
@@ -127,8 +128,9 @@ class AMSServiceCheck(Script):
                                                  params.metric_collector_port,
                                               self.AMS_METRICS_GET_URL % encoded_get_metrics_parameters))
 
-    conn = httplib.HTTPConnection(params.metric_collector_host,
-                                  int(params.metric_collector_port))
+    conn = self.get_http_connection(params.metric_collector_host,
+                                    int(params.metric_collector_port),
+                                    params.metric_collector_https_enabled)
     conn.request("GET", self.AMS_METRICS_GET_URL % encoded_get_metrics_parameters)
     response = conn.getresponse()
     Logger.info("Http response: %s %s" % (response.status, response.reason))
@@ -161,6 +163,13 @@ class AMSServiceCheck(Script):
 
     Logger.info("Ambari Metrics service check is finished.")
 
+  def get_http_connection(self, host, port, https_enabled=False):
+    if https_enabled:
+      # TODO verify certificate
+      return httplib.HTTPSConnection(host, port)
+    else:
+      return httplib.HTTPConnection(host, port)
+
 if __name__ == "__main__":
   AMSServiceCheck().execute()
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/templates/hadoop-metrics2-hbase.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/templates/hadoop-metrics2-hbase.properties.j2 b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/templates/hadoop-metrics2-hbase.properties.j2
index a65ea88..1591d82 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/templates/hadoop-metrics2-hbase.properties.j2
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/templates/hadoop-metrics2-hbase.properties.j2
@@ -55,9 +55,14 @@ rpc.collector={{metric_collector_host}}:{{metric_collector_port}}
 hbase.sink.timeline.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink
 hbase.sink.timeline.period={{metrics_collection_period}}
 hbase.sink.timeline.sendInterval={{metrics_report_interval}}000
-hbase.sink.timeline.collector={{metric_collector_host}}:{{metric_collector_port}}
+hbase.sink.timeline.collector={{metric_collector_protocol}}://{{metric_collector_host}}:{{metric_collector_port}}
 hbase.sink.timeline.serviceName-prefix=ams
 
+# HTTPS properties
+hbase.sink.timeline.truststore.path = {{metric_truststore_path}}
+hbase.sink.timeline.truststore.type = {{metric_truststore_type}}
+hbase.sink.timeline.truststore.password = {{metric_truststore_password}}
+
 # Switch off metrics generation on a per region basis
 *.source.filter.class=org.apache.hadoop.metrics2.filter.GlobFilter
 hbase.*.source.filter.exclude=*Regions*

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/templates/metric_monitor.ini.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/templates/metric_monitor.ini.j2 b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/templates/metric_monitor.ini.j2
index fc86a58..4e2d0f5 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/templates/metric_monitor.ini.j2
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/templates/metric_monitor.ini.j2
@@ -22,6 +22,7 @@ metrics_server = {{metric_collector_host}}:{{metric_collector_port}}
 hostname = {{hostname}}
 enable_time_threshold = false
 enable_value_threshold = false
+https_enabled = {{metric_collector_https_enabled}}
 
 [emitter]
 send_interval = {{metrics_report_interval}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/metainfo.xml b/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/metainfo.xml
index 5e5e4a7..0396c28 100644
--- a/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/metainfo.xml
+++ b/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/metainfo.xml
@@ -58,6 +58,7 @@
       <configuration-dependencies>
         <config-type>flume-env</config-type>
         <config-type>flume-conf</config-type>
+        <config-type>ams-ssl-client</config-type>
       </configuration-dependencies>
 
     </service>

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/scripts/params.py
index 28ee36b..5ec879c 100644
--- a/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/scripts/params.py
@@ -101,6 +101,13 @@ if has_metric_collector:
       metric_collector_port = metric_collector_web_address.split(':')[1]
     else:
       metric_collector_port = '6188'
+  if default("/configurations/ams-site/timeline.metrics.service.http.policy", "HTTP_ONLY") == "HTTPS_ONLY":
+    metric_collector_protocol = 'https'
+  else:
+    metric_collector_protocol = 'http'
+  metric_truststore_path= default("/configurations/ams-ssl-client/ssl.client.truststore.location", "")
+  metric_truststore_type= default("/configurations/ams-ssl-client/ssl.client.truststore.type", "")
+  metric_truststore_password= default("/configurations/ams-ssl-client/ssl.client.truststore.password", "")
   pass
 metrics_report_interval = default("/configurations/ams-site/timeline.metrics.sink.report.interval", 60)
 metrics_collection_period = default("/configurations/ams-site/timeline.metrics.sink.collection.period", 10)

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/templates/flume-metrics2.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/templates/flume-metrics2.properties.j2 b/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/templates/flume-metrics2.properties.j2
index 753aa3f..df68242 100644
--- a/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/templates/flume-metrics2.properties.j2
+++ b/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/templates/flume-metrics2.properties.j2
@@ -16,11 +16,15 @@
 # limitations under the License.
 #}
 
-collector={{metric_collector_host}}
-port={{metric_collector_port}}
+collector={{metric_collector_protocol}}://{{metric_collector_host}}:{{metric_collector_port}}
 collectionFrequency={{metrics_collection_period}}000
 maxRowCacheSize=10000
 sendInterval={{metrics_report_interval}}000
 
+# HTTPS properties
+truststore.path = {{metric_truststore_path}}
+truststore.type = {{metric_truststore_type}}
+truststore.password = {{metric_truststore_password}}
+
 # Metric names having type COUNTER
 counters=EventTakeSuccessCount,EventPutSuccessCount,EventTakeAttemptCount,EventPutAttemptCount

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/metainfo.xml b/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/metainfo.xml
index ce625eb..7a71604 100644
--- a/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/metainfo.xml
+++ b/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/metainfo.xml
@@ -149,6 +149,7 @@
         <config-type>yarn-client</config-type>
         <config-type>hawq-limits-env</config-type>
         <config-type>hawq-sysctl-env</config-type>
+        <config-type>ams-ssl-client</config-type>
       </configuration-dependencies>
     </service>
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/metainfo.xml b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/metainfo.xml
index 78aca3d..057e126 100644
--- a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/metainfo.xml
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/metainfo.xml
@@ -147,6 +147,7 @@
         <config-type>ranger-hbase-audit</config-type>
         <config-type>ranger-hbase-policymgr-ssl</config-type>
         <config-type>ranger-hbase-security</config-type>
+        <config-type>ams-ssl-client</config-type>
       </configuration-dependencies>
 
     </service>

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
index 6bbf379..6837bf1 100644
--- a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
@@ -149,6 +149,14 @@ if has_metric_collector:
       metric_collector_port = metric_collector_web_address.split(':')[1]
     else:
       metric_collector_port = '6188'
+  if default("/configurations/ams-site/timeline.metrics.service.http.policy", "HTTP_ONLY") == "HTTPS_ONLY":
+    metric_collector_protocol = 'https'
+  else:
+    metric_collector_protocol = 'http'
+  metric_truststore_path= default("/configurations/ams-ssl-client/ssl.client.truststore.location", "")
+  metric_truststore_type= default("/configurations/ams-ssl-client/ssl.client.truststore.type", "")
+  metric_truststore_password= default("/configurations/ams-ssl-client/ssl.client.truststore.password", "")
+
   pass
 metrics_report_interval = default("/configurations/ams-site/timeline.metrics.sink.report.interval", 60)
 metrics_collection_period = default("/configurations/ams-site/timeline.metrics.sink.collection.period", 10)

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-MASTER.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-MASTER.j2 b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-MASTER.j2
index f74ea4c..3202605 100644
--- a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-MASTER.j2
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-MASTER.j2
@@ -66,7 +66,12 @@ rpc.collector={{metric_collector_host}}:{{metric_collector_port}}
 hbase.sink.timeline.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink
 hbase.sink.timeline.period={{metrics_collection_period}}
 hbase.sink.timeline.sendInterval={{metrics_report_interval}}000
-hbase.sink.timeline.collector={{metric_collector_host}}:{{metric_collector_port}}
+hbase.sink.timeline.collector={{metric_collector_protocol}}://{{metric_collector_host}}:{{metric_collector_port}}
+
+# HTTPS properties
+hbase.sink.timeline.truststore.path = {{metric_truststore_path}}
+hbase.sink.timeline.truststore.type = {{metric_truststore_type}}
+hbase.sink.timeline.truststore.password = {{metric_truststore_password}}
 
 {% else %}
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-RS.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-RS.j2 b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-RS.j2
index 2df141f..5a91866 100644
--- a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-RS.j2
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-RS.j2
@@ -65,7 +65,12 @@ rpc.collector={{metric_collector_host}}:{{metric_collector_port}}
 hbase.sink.timeline.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink
 hbase.sink.timeline.period={{metrics_collection_period}}
 hbase.sink.timeline.sendInterval={{metrics_report_interval}}000
-hbase.sink.timeline.collector={{metric_collector_host}}:{{metric_collector_port}}
+hbase.sink.timeline.collector={{metric_collector_protocol}}://{{metric_collector_host}}:{{metric_collector_port}}
+
+# HTTPS properties
+hbase.sink.timeline.truststore.path = {{metric_truststore_path}}
+hbase.sink.timeline.truststore.type = {{metric_truststore_type}}
+hbase.sink.timeline.truststore.password = {{metric_truststore_password}}
 
 {% else %}
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/metainfo.xml b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/metainfo.xml
index 73616ae..75d3bea 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/metainfo.xml
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/metainfo.xml
@@ -245,6 +245,7 @@
         <config-type>ranger-hdfs-audit</config-type>
         <config-type>ranger-hdfs-policymgr-ssl</config-type>
         <config-type>ranger-hdfs-security</config-type>
+        <config-type>ams-ssl-client</config-type>
       </configuration-dependencies>
       <restartRequiredAfterRackChange>true</restartRequiredAfterRackChange>
     </service>

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-server/src/main/resources/common-services/KAFKA/0.8.1.2.2/configuration/kafka-broker.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KAFKA/0.8.1.2.2/configuration/kafka-broker.xml b/ambari-server/src/main/resources/common-services/KAFKA/0.8.1.2.2/configuration/kafka-broker.xml
index 6d0c8c6..704d73f 100644
--- a/ambari-server/src/main/resources/common-services/KAFKA/0.8.1.2.2/configuration/kafka-broker.xml
+++ b/ambari-server/src/main/resources/common-services/KAFKA/0.8.1.2.2/configuration/kafka-broker.xml
@@ -325,6 +325,27 @@
     <description>Timeline port</description>
   </property>
   <property>
+  <property>
+    <name>kafka.timeline.metrics.protocol</name>
+    <value>{{metric_collector_protocol}}</value>
+    <description>Timeline protocol(http or https)</description>
+  </property>
+  <property>
+    <name>kafka.timeline.metrics.truststore.path</name>
+    <value>{{metric_truststore_path}}</value>
+    <description>Location of the trust store file.</description>
+  </property>
+  <property>
+    <name>kafka.timeline.metrics.truststore.type</name>
+    <value>{{metric_truststore_type}}</value>
+    <description>Optional. Default value is "jks".</description>
+  </property>
+  <property>
+    <name>kafka.timeline.metrics.truststore.password</name>
+    <value>{{metric_truststore_password}}</value>
+    <description>Password to open the trust store file.</description>
+  </property>
+  <property>
     <name>kafka.timeline.metrics.reporter.sendInterval</name>
     <value>5900</value>
     <description>Timeline metrics reporter send interval</description>


[2/4] ambari git commit: AMBARI-15050 Https Support for Metrics System (dsen)

Posted by ds...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-server/src/test/python/stacks/2.0.6/configs/default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default.json b/ambari-server/src/test/python/stacks/2.0.6/configs/default.json
index cb34098..3d3987b 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/default.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default.json
@@ -1,30 +1,30 @@
 {
-    "roleCommand": "SERVICE_CHECK", 
-    "clusterName": "c1", 
-    "hostname": "c6401.ambari.apache.org", 
+    "roleCommand": "SERVICE_CHECK",
+    "clusterName": "c1",
+    "hostname": "c6401.ambari.apache.org",
     "hostLevelParams": {
-        "jdk_location": "http://c6401.ambari.apache.org:8080/resources/", 
-        "ambari_db_rca_password": "mapred", 
+        "jdk_location": "http://c6401.ambari.apache.org:8080/resources/",
+        "ambari_db_rca_password": "mapred",
         "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca",
         "repo_info": "[{\"baseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0\",\"osType\":\"centos6\",\"repoId\":\"HDP-2.0._\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0\"}]",
-        "jce_name": "UnlimitedJCEPolicyJDK7.zip", 
+        "jce_name": "UnlimitedJCEPolicyJDK7.zip",
         "stack_version": "2.0",
-        "stack_name": "HDP", 
-        "ambari_db_rca_driver": "org.postgresql.Driver", 
+        "stack_name": "HDP",
+        "ambari_db_rca_driver": "org.postgresql.Driver",
         "jdk_name": "jdk-7u67-linux-x64.tar.gz",
-        "ambari_db_rca_username": "mapred", 
+        "ambari_db_rca_username": "mapred",
         "java_home": "/usr/jdk64/jdk1.7.0_45",
         "java_version": "8",
         "db_name": "ambari",
         "group_list": "[\"hadoop\",\"nobody\",\"users\"]",
         "user_list": "[\"hive\",\"oozie\",\"nobody\",\"ambari-qa\",\"flume\",\"hdfs\",\"storm\",\"mapred\",\"hbase\",\"tez\",\"zookeeper\",\"falcon\",\"sqoop\",\"yarn\",\"hcat\"]"
-    }, 
-    "commandType": "EXECUTION_COMMAND", 
-    "roleParams": {}, 
+    },
+    "commandType": "EXECUTION_COMMAND",
+    "roleParams": {},
     "serviceName": "HIVE",
     "role": "HIVE_SERVER",
     "commandParams": {
-        "command_timeout": "300", 
+        "command_timeout": "300",
         "service_package_folder": "OOZIE",
         "script_type": "PYTHON",
         "script": "scripts/service_check.py",
@@ -37,442 +37,443 @@
         "output_file":"HDFS_CLIENT-configs.tar.gz",
         "refresh_topology": "True"
     },
-    "taskId": 152, 
-    "public_hostname": "c6401.ambari.apache.org", 
+    "taskId": 152,
+    "public_hostname": "c6401.ambari.apache.org",
     "configurations": {
         "mapred-site": {
-            "mapreduce.jobhistory.address": "c6402.ambari.apache.org:10020", 
-            "mapreduce.cluster.administrators": " hadoop", 
-            "mapreduce.reduce.input.buffer.percent": "0.0", 
-            "mapreduce.output.fileoutputformat.compress": "false", 
-            "mapreduce.framework.name": "yarn", 
-            "mapreduce.map.speculative": "false", 
-            "mapreduce.reduce.shuffle.merge.percent": "0.66", 
-            "yarn.app.mapreduce.am.resource.mb": "683", 
-            "mapreduce.map.java.opts": "-Xmx273m", 
-            "mapreduce.application.classpath": "$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/*,$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/lib/*", 
-            "mapreduce.job.reduce.slowstart.completedmaps": "0.05", 
-            "mapreduce.output.fileoutputformat.compress.type": "BLOCK", 
-            "mapreduce.reduce.speculative": "false", 
-            "mapreduce.reduce.java.opts": "-Xmx546m", 
-            "mapreduce.am.max-attempts": "2", 
-            "yarn.app.mapreduce.am.admin-command-opts": "-Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN", 
-            "mapreduce.reduce.log.level": "INFO", 
-            "mapreduce.map.sort.spill.percent": "0.7", 
-            "mapreduce.task.timeout": "300000", 
-            "mapreduce.map.memory.mb": "341", 
-            "mapreduce.task.io.sort.factor": "100", 
-            "mapreduce.jobhistory.intermediate-done-dir": "/mr-history/tmp", 
-            "mapreduce.reduce.memory.mb": "683", 
-            "yarn.app.mapreduce.am.log.level": "INFO", 
-            "mapreduce.map.log.level": "INFO", 
-            "mapreduce.shuffle.port": "13562", 
-            "mapreduce.admin.user.env": "LD_LIBRARY_PATH=/usr/lib/hadoop/lib/native:/usr/lib/hadoop/lib/native/`$JAVA_HOME/bin/java -d32 -version &amp;&gt; /dev/null;if [ $? -eq 0 ]; then echo Linux-i386-32; else echo Linux-amd64-64;fi`", 
-            "mapreduce.map.output.compress": "false", 
-            "yarn.app.mapreduce.am.staging-dir": "/user", 
-            "mapreduce.reduce.shuffle.parallelcopies": "30", 
-            "mapreduce.reduce.shuffle.input.buffer.percent": "0.7", 
-            "mapreduce.jobhistory.webapp.address": "c6402.ambari.apache.org:19888", 
-            "mapreduce.jobhistory.done-dir": "/mr-history/done", 
-            "mapreduce.admin.reduce.child.java.opts": "-Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN", 
-            "mapreduce.task.io.sort.mb": "136", 
-            "yarn.app.mapreduce.am.command-opts": "-Xmx546m", 
+            "mapreduce.jobhistory.address": "c6402.ambari.apache.org:10020",
+            "mapreduce.cluster.administrators": " hadoop",
+            "mapreduce.reduce.input.buffer.percent": "0.0",
+            "mapreduce.output.fileoutputformat.compress": "false",
+            "mapreduce.framework.name": "yarn",
+            "mapreduce.map.speculative": "false",
+            "mapreduce.reduce.shuffle.merge.percent": "0.66",
+            "yarn.app.mapreduce.am.resource.mb": "683",
+            "mapreduce.map.java.opts": "-Xmx273m",
+            "mapreduce.application.classpath": "$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/*,$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/lib/*",
+            "mapreduce.job.reduce.slowstart.completedmaps": "0.05",
+            "mapreduce.output.fileoutputformat.compress.type": "BLOCK",
+            "mapreduce.reduce.speculative": "false",
+            "mapreduce.reduce.java.opts": "-Xmx546m",
+            "mapreduce.am.max-attempts": "2",
+            "yarn.app.mapreduce.am.admin-command-opts": "-Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN",
+            "mapreduce.reduce.log.level": "INFO",
+            "mapreduce.map.sort.spill.percent": "0.7",
+            "mapreduce.task.timeout": "300000",
+            "mapreduce.map.memory.mb": "341",
+            "mapreduce.task.io.sort.factor": "100",
+            "mapreduce.jobhistory.intermediate-done-dir": "/mr-history/tmp",
+            "mapreduce.reduce.memory.mb": "683",
+            "yarn.app.mapreduce.am.log.level": "INFO",
+            "mapreduce.map.log.level": "INFO",
+            "mapreduce.shuffle.port": "13562",
+            "mapreduce.admin.user.env": "LD_LIBRARY_PATH=/usr/lib/hadoop/lib/native:/usr/lib/hadoop/lib/native/`$JAVA_HOME/bin/java -d32 -version &amp;&gt; /dev/null;if [ $? -eq 0 ]; then echo Linux-i386-32; else echo Linux-amd64-64;fi`",
+            "mapreduce.map.output.compress": "false",
+            "yarn.app.mapreduce.am.staging-dir": "/user",
+            "mapreduce.reduce.shuffle.parallelcopies": "30",
+            "mapreduce.reduce.shuffle.input.buffer.percent": "0.7",
+            "mapreduce.jobhistory.webapp.address": "c6402.ambari.apache.org:19888",
+            "mapreduce.jobhistory.done-dir": "/mr-history/done",
+            "mapreduce.admin.reduce.child.java.opts": "-Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN",
+            "mapreduce.task.io.sort.mb": "136",
+            "yarn.app.mapreduce.am.command-opts": "-Xmx546m",
             "mapreduce.admin.map.child.java.opts": "-Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN"
-        }, 
+        },
         "oozie-site": {
-            "oozie.service.PurgeService.purge.interval": "3600", 
-            "oozie.service.CallableQueueService.queue.size": "1000", 
+            "oozie.service.PurgeService.purge.interval": "3600",
+            "oozie.service.CallableQueueService.queue.size": "1000",
             "oozie.service.SchemaService.wf.ext.schemas": "shell-action-0.1.xsd,email-action-0.1.xsd,hive-action-0.2.xsd,sqoop-action-0.2.xsd,ssh-action-0.1.xsd,distcp-action-0.1.xsd,shell-action-0.2.xsd,oozie-sla-0.1.xsd,oozie-sla-0.2.xsd,hive-action-0.3.xsd",
-            "oozie.service.JPAService.jdbc.url": "jdbc:derby:${oozie.data.dir}/${oozie.db.schema.name}-db;create=true", 
-            "oozie.service.HadoopAccessorService.nameNode.whitelist": " ", 
-            "use.system.libpath.for.mapreduce.and.pig.jobs": "false", 
-            "oozie.db.schema.name": "oozie", 
-            "oozie.credentials.credentialclasses": "hcat=org.apache.oozie.action.hadoop.HCatCredentials", 
-            "oozie.service.JPAService.create.db.schema": "false", 
-            "oozie.authentication.kerberos.name.rules": "\n        RULE:[2:$1@$0]([jt]t@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-MAPREDUSER/\n        RULE:[2:$1@$0]([nd]n@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HDFSUSER/\n        RULE:[2:$1@$0](hm@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HBASE-USER/\n        RULE:[2:$1@$0](rs@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HBASE-USER/\n        DEFAULT", 
-            "oozie.service.ActionService.executor.ext.classes": "\n            org.apache.oozie.action.email.EmailActionExecutor,\n            org.apache.oozie.action.hadoop.HiveActionExecutor,\n            org.apache.oozie.action.hadoop.ShellActionExecutor,\n            org.apache.oozie.action.hadoop.SqoopActionExecutor,\n            org.apache.oozie.action.hadoop.DistcpActionExecutor", 
-            "oozie.base.url": "http://c6402.ambari.apache.org:11000/oozie", 
-            "oozie.service.JPAService.jdbc.password": "asd", 
-            "oozie.service.coord.normal.default.timeout": "120", 
-            "oozie.service.AuthorizationService.security.enabled": "true", 
-            "oozie.service.JPAService.pool.max.active.conn": "10", 
-            "oozie.service.PurgeService.older.than": "30", 
-            "oozie.service.coord.push.check.requeue.interval": "30000", 
-            "oozie.service.HadoopAccessorService.hadoop.configurations": "*=/etc/hadoop/conf", 
-            "oozie.service.HadoopAccessorService.jobTracker.whitelist": " ", 
-            "oozie.service.CallableQueueService.callable.concurrency": "3", 
-            "oozie.service.JPAService.jdbc.username": "oozie", 
-            "oozie.service.CallableQueueService.threads": "10", 
-            "oozie.services.ext": "org.apache.oozie.service.PartitionDependencyManagerService,org.apache.oozie.service.HCatAccessorService", 
-            "oozie.systemmode": "NORMAL", 
-            "oozie.service.WorkflowAppService.system.libpath": "/user/${user.name}/share/lib", 
-            "oozie.services": "\n        org.apache.oozie.service.SchedulerService,\n        org.apache.oozie.service.InstrumentationService,\n        org.apache.oozie.service.CallableQueueService,\n        org.apache.oozie.service.UUIDService,\n        org.apache.oozie.service.ELService,\n        org.apache.oozie.service.AuthorizationService,\n        org.apache.oozie.service.UserGroupInformationService,\n        org.apache.oozie.service.HadoopAccessorService,\n        org.apache.oozie.service.URIHandlerService,\n        org.apache.oozie.service.MemoryLocksService,\n        org.apache.oozie.service.DagXLogInfoService,\n        org.apache.oozie.service.SchemaService,\n        org.apache.oozie.service.LiteWorkflowAppService,\n        org.apache.oozie.service.JPAService,\n        org.apache.oozie.service.StoreService,\n        org.apache.oozie.service.CoordinatorStoreService,\n        org.apache.oozie.service.SLAStoreService,\n        org.apache.oozie.service.DBLiteWorkflowStoreServic
 e,\n        org.apache.oozie.service.CallbackService,\n        org.apache.oozie.service.ActionService,\n        org.apache.oozie.service.ActionCheckerService,\n        org.apache.oozie.service.RecoveryService,\n        org.apache.oozie.service.PurgeService,\n        org.apache.oozie.service.CoordinatorEngineService,\n        org.apache.oozie.service.BundleEngineService,\n        org.apache.oozie.service.DagEngineService,\n        org.apache.oozie.service.CoordMaterializeTriggerService,\n        org.apache.oozie.service.StatusTransitService,\n        org.apache.oozie.service.PauseTransitService,\n        org.apache.oozie.service.GroupsService,\n        org.apache.oozie.service.ProxyUserService", 
-            "oozie.service.URIHandlerService.uri.handlers": "org.apache.oozie.dependency.FSURIHandler,org.apache.oozie.dependency.HCatURIHandler", 
-            "oozie.authentication.type": "simple", 
-            "oozie.service.JPAService.jdbc.driver": "org.apache.derby.jdbc.EmbeddedDriver", 
+            "oozie.service.JPAService.jdbc.url": "jdbc:derby:${oozie.data.dir}/${oozie.db.schema.name}-db;create=true",
+            "oozie.service.HadoopAccessorService.nameNode.whitelist": " ",
+            "use.system.libpath.for.mapreduce.and.pig.jobs": "false",
+            "oozie.db.schema.name": "oozie",
+            "oozie.credentials.credentialclasses": "hcat=org.apache.oozie.action.hadoop.HCatCredentials",
+            "oozie.service.JPAService.create.db.schema": "false",
+            "oozie.authentication.kerberos.name.rules": "\n        RULE:[2:$1@$0]([jt]t@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-MAPREDUSER/\n        RULE:[2:$1@$0]([nd]n@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HDFSUSER/\n        RULE:[2:$1@$0](hm@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HBASE-USER/\n        RULE:[2:$1@$0](rs@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HBASE-USER/\n        DEFAULT",
+            "oozie.service.ActionService.executor.ext.classes": "\n            org.apache.oozie.action.email.EmailActionExecutor,\n            org.apache.oozie.action.hadoop.HiveActionExecutor,\n            org.apache.oozie.action.hadoop.ShellActionExecutor,\n            org.apache.oozie.action.hadoop.SqoopActionExecutor,\n            org.apache.oozie.action.hadoop.DistcpActionExecutor",
+            "oozie.base.url": "http://c6402.ambari.apache.org:11000/oozie",
+            "oozie.service.JPAService.jdbc.password": "asd",
+            "oozie.service.coord.normal.default.timeout": "120",
+            "oozie.service.AuthorizationService.security.enabled": "true",
+            "oozie.service.JPAService.pool.max.active.conn": "10",
+            "oozie.service.PurgeService.older.than": "30",
+            "oozie.service.coord.push.check.requeue.interval": "30000",
+            "oozie.service.HadoopAccessorService.hadoop.configurations": "*=/etc/hadoop/conf",
+            "oozie.service.HadoopAccessorService.jobTracker.whitelist": " ",
+            "oozie.service.CallableQueueService.callable.concurrency": "3",
+            "oozie.service.JPAService.jdbc.username": "oozie",
+            "oozie.service.CallableQueueService.threads": "10",
+            "oozie.services.ext": "org.apache.oozie.service.PartitionDependencyManagerService,org.apache.oozie.service.HCatAccessorService",
+            "oozie.systemmode": "NORMAL",
+            "oozie.service.WorkflowAppService.system.libpath": "/user/${user.name}/share/lib",
+            "oozie.services": "\n        org.apache.oozie.service.SchedulerService,\n        org.apache.oozie.service.InstrumentationService,\n        org.apache.oozie.service.CallableQueueService,\n        org.apache.oozie.service.UUIDService,\n        org.apache.oozie.service.ELService,\n        org.apache.oozie.service.AuthorizationService,\n        org.apache.oozie.service.UserGroupInformationService,\n        org.apache.oozie.service.HadoopAccessorService,\n        org.apache.oozie.service.URIHandlerService,\n        org.apache.oozie.service.MemoryLocksService,\n        org.apache.oozie.service.DagXLogInfoService,\n        org.apache.oozie.service.SchemaService,\n        org.apache.oozie.service.LiteWorkflowAppService,\n        org.apache.oozie.service.JPAService,\n        org.apache.oozie.service.StoreService,\n        org.apache.oozie.service.CoordinatorStoreService,\n        org.apache.oozie.service.SLAStoreService,\n        org.apache.oozie.service.DBLiteWorkflowStoreServic
 e,\n        org.apache.oozie.service.CallbackService,\n        org.apache.oozie.service.ActionService,\n        org.apache.oozie.service.ActionCheckerService,\n        org.apache.oozie.service.RecoveryService,\n        org.apache.oozie.service.PurgeService,\n        org.apache.oozie.service.CoordinatorEngineService,\n        org.apache.oozie.service.BundleEngineService,\n        org.apache.oozie.service.DagEngineService,\n        org.apache.oozie.service.CoordMaterializeTriggerService,\n        org.apache.oozie.service.StatusTransitService,\n        org.apache.oozie.service.PauseTransitService,\n        org.apache.oozie.service.GroupsService,\n        org.apache.oozie.service.ProxyUserService",
+            "oozie.service.URIHandlerService.uri.handlers": "org.apache.oozie.dependency.FSURIHandler,org.apache.oozie.dependency.HCatURIHandler",
+            "oozie.authentication.type": "simple",
+            "oozie.service.JPAService.jdbc.driver": "org.apache.derby.jdbc.EmbeddedDriver",
             "oozie.system.id": "oozie-${user.name}"
-        }, 
+        },
         "storm-site": {
-            "topology.tuple.serializer": "backtype.storm.serialization.types.ListDelegateSerializer", 
-            "topology.workers": "1", 
-            "drpc.worker.threads": "64", 
-            "storm.zookeeper.servers": "['c6401.ambari.apache.org','c6402.ambari.apache.org']", 
-            "supervisor.heartbeat.frequency.secs": "5", 
-            "topology.executor.send.buffer.size": "1024", 
-            "drpc.childopts": "-Xmx768m", 
-            "nimbus.thrift.port": "6627", 
-            "storm.zookeeper.retry.intervalceiling.millis": "30000", 
-            "storm.local.dir": "/hadoop/storm", 
-            "topology.receiver.buffer.size": "8", 
-            "storm.messaging.netty.client_worker_threads": "1", 
-            "transactional.zookeeper.root": "/transactional", 
-            "drpc.request.timeout.secs": "600", 
-            "topology.skip.missing.kryo.registrations": "false", 
-            "worker.heartbeat.frequency.secs": "1", 
-            "zmq.hwm": "0", 
-            "storm.zookeeper.connection.timeout": "15000", 
-            "topology.max.error.report.per.interval": "5", 
-            "storm.messaging.netty.server_worker_threads": "1", 
-            "supervisor.worker.start.timeout.secs": "120", 
-            "zmq.threads": "1", 
-            "topology.acker.executors": "null", 
-            "storm.local.mode.zmq": "false", 
-            "topology.max.task.parallelism": "null", 
-            "storm.zookeeper.port": "2181", 
-            "nimbus.childopts": "-Xmx1024m", 
-            "worker.childopts": "-Xmx768m", 
-            "drpc.queue.size": "128", 
-            "storm.zookeeper.retry.times": "5", 
-            "nimbus.monitor.freq.secs": "10", 
-            "storm.cluster.mode": "distributed", 
-            "dev.zookeeper.path": "/tmp/dev-storm-zookeeper", 
-            "drpc.invocations.port": "3773", 
-            "storm.zookeeper.root": "/storm", 
-            "logviewer.childopts": "-Xmx128m", 
-            "transactional.zookeeper.port": "null", 
-            "topology.worker.childopts": "null", 
-            "topology.max.spout.pending": "null", 
-            "nimbus.cleanup.inbox.freq.secs": "600", 
-            "storm.messaging.netty.min_wait_ms": "100", 
-            "nimbus.task.timeout.secs": "30", 
-            "nimbus.thrift.max_buffer_size": "1048576", 
-            "topology.sleep.spout.wait.strategy.time.ms": "1", 
-            "topology.optimize": "true", 
-            "nimbus.reassign": "true", 
-            "storm.messaging.transport": "backtype.storm.messaging.netty.Context", 
-            "logviewer.appender.name": "A1", 
-            "nimbus.host": "c6401.ambari.apache.org", 
-            "ui.port": "8744", 
-            "supervisor.slots.ports": "[6700, 6701]", 
-            "nimbus.file.copy.expiration.secs": "600", 
-            "supervisor.monitor.frequency.secs": "3", 
-            "ui.childopts": "-Xmx768m", 
-            "transactional.zookeeper.servers": "null", 
-            "zmq.linger.millis": "5000", 
-            "topology.error.throttle.interval.secs": "10", 
-            "topology.worker.shared.thread.pool.size": "4", 
-            "java.library.path": "/usr/local/lib:/opt/local/lib:/usr/lib", 
-            "topology.spout.wait.strategy": "backtype.storm.spout.SleepSpoutWaitStrategy", 
-            "task.heartbeat.frequency.secs": "3", 
-            "topology.transfer.buffer.size": "1024", 
-            "storm.zookeeper.session.timeout": "20000", 
-            "topology.executor.receive.buffer.size": "1024", 
-            "topology.stats.sample.rate": "0.05", 
-            "topology.fall.back.on.java.serialization": "true", 
-            "supervisor.childopts": "-Xmx256m", 
-            "topology.enable.message.timeouts": "true", 
-            "storm.messaging.netty.max_wait_ms": "1000", 
-            "nimbus.topology.validator": "backtype.storm.nimbus.DefaultTopologyValidator", 
-            "nimbus.supervisor.timeout.secs": "60", 
-            "topology.disruptor.wait.strategy": "com.lmax.disruptor.BlockingWaitStrategy", 
-            "nimbus.inbox.jar.expiration.secs": "3600", 
-            "drpc.port": "3772", 
-            "topology.kryo.factory": "backtype.storm.serialization.DefaultKryoFactory", 
-            "storm.zookeeper.retry.interval": "1000", 
-            "storm.messaging.netty.max_retries": "30", 
-            "topology.tick.tuple.freq.secs": "null", 
-            "supervisor.enable": "true", 
-            "nimbus.task.launch.secs": "120", 
-            "task.refresh.poll.secs": "10", 
-            "topology.message.timeout.secs": "30", 
-            "storm.messaging.netty.buffer_size": "5242880", 
-            "topology.state.synchronization.timeout.secs": "60", 
-            "supervisor.worker.timeout.secs": "30", 
-            "topology.trident.batch.emit.interval.millis": "500", 
-            "topology.builtin.metrics.bucket.size.secs": "60", 
-            "storm.thrift.transport": "backtype.storm.security.auth.SimpleTransportPlugin", 
-            "logviewer.port": "8000", 
+            "topology.tuple.serializer": "backtype.storm.serialization.types.ListDelegateSerializer",
+            "topology.workers": "1",
+            "drpc.worker.threads": "64",
+            "storm.zookeeper.servers": "['c6401.ambari.apache.org','c6402.ambari.apache.org']",
+            "supervisor.heartbeat.frequency.secs": "5",
+            "topology.executor.send.buffer.size": "1024",
+            "drpc.childopts": "-Xmx768m",
+            "nimbus.thrift.port": "6627",
+            "storm.zookeeper.retry.intervalceiling.millis": "30000",
+            "storm.local.dir": "/hadoop/storm",
+            "topology.receiver.buffer.size": "8",
+            "storm.messaging.netty.client_worker_threads": "1",
+            "transactional.zookeeper.root": "/transactional",
+            "drpc.request.timeout.secs": "600",
+            "topology.skip.missing.kryo.registrations": "false",
+            "worker.heartbeat.frequency.secs": "1",
+            "zmq.hwm": "0",
+            "storm.zookeeper.connection.timeout": "15000",
+            "topology.max.error.report.per.interval": "5",
+            "storm.messaging.netty.server_worker_threads": "1",
+            "supervisor.worker.start.timeout.secs": "120",
+            "zmq.threads": "1",
+            "topology.acker.executors": "null",
+            "storm.local.mode.zmq": "false",
+            "topology.max.task.parallelism": "null",
+            "storm.zookeeper.port": "2181",
+            "nimbus.childopts": "-Xmx1024m",
+            "worker.childopts": "-Xmx768m",
+            "drpc.queue.size": "128",
+            "storm.zookeeper.retry.times": "5",
+            "nimbus.monitor.freq.secs": "10",
+            "storm.cluster.mode": "distributed",
+            "dev.zookeeper.path": "/tmp/dev-storm-zookeeper",
+            "drpc.invocations.port": "3773",
+            "storm.zookeeper.root": "/storm",
+            "logviewer.childopts": "-Xmx128m",
+            "transactional.zookeeper.port": "null",
+            "topology.worker.childopts": "null",
+            "topology.max.spout.pending": "null",
+            "nimbus.cleanup.inbox.freq.secs": "600",
+            "storm.messaging.netty.min_wait_ms": "100",
+            "nimbus.task.timeout.secs": "30",
+            "nimbus.thrift.max_buffer_size": "1048576",
+            "topology.sleep.spout.wait.strategy.time.ms": "1",
+            "topology.optimize": "true",
+            "nimbus.reassign": "true",
+            "storm.messaging.transport": "backtype.storm.messaging.netty.Context",
+            "logviewer.appender.name": "A1",
+            "nimbus.host": "c6401.ambari.apache.org",
+            "ui.port": "8744",
+            "supervisor.slots.ports": "[6700, 6701]",
+            "nimbus.file.copy.expiration.secs": "600",
+            "supervisor.monitor.frequency.secs": "3",
+            "ui.childopts": "-Xmx768m",
+            "transactional.zookeeper.servers": "null",
+            "zmq.linger.millis": "5000",
+            "topology.error.throttle.interval.secs": "10",
+            "topology.worker.shared.thread.pool.size": "4",
+            "java.library.path": "/usr/local/lib:/opt/local/lib:/usr/lib",
+            "topology.spout.wait.strategy": "backtype.storm.spout.SleepSpoutWaitStrategy",
+            "task.heartbeat.frequency.secs": "3",
+            "topology.transfer.buffer.size": "1024",
+            "storm.zookeeper.session.timeout": "20000",
+            "topology.executor.receive.buffer.size": "1024",
+            "topology.stats.sample.rate": "0.05",
+            "topology.fall.back.on.java.serialization": "true",
+            "supervisor.childopts": "-Xmx256m",
+            "topology.enable.message.timeouts": "true",
+            "storm.messaging.netty.max_wait_ms": "1000",
+            "nimbus.topology.validator": "backtype.storm.nimbus.DefaultTopologyValidator",
+            "nimbus.supervisor.timeout.secs": "60",
+            "topology.disruptor.wait.strategy": "com.lmax.disruptor.BlockingWaitStrategy",
+            "nimbus.inbox.jar.expiration.secs": "3600",
+            "drpc.port": "3772",
+            "topology.kryo.factory": "backtype.storm.serialization.DefaultKryoFactory",
+            "storm.zookeeper.retry.interval": "1000",
+            "storm.messaging.netty.max_retries": "30",
+            "topology.tick.tuple.freq.secs": "null",
+            "supervisor.enable": "true",
+            "nimbus.task.launch.secs": "120",
+            "task.refresh.poll.secs": "10",
+            "topology.message.timeout.secs": "30",
+            "storm.messaging.netty.buffer_size": "5242880",
+            "topology.state.synchronization.timeout.secs": "60",
+            "supervisor.worker.timeout.secs": "30",
+            "topology.trident.batch.emit.interval.millis": "500",
+            "topology.builtin.metrics.bucket.size.secs": "60",
+            "storm.thrift.transport": "backtype.storm.security.auth.SimpleTransportPlugin",
+            "logviewer.port": "8000",
             "topology.debug": "false"
-        }, 
+        },
         "webhcat-site": {
-            "templeton.pig.path": "pig.tar.gz/pig/bin/pig", 
-            "templeton.exec.timeout": "60000", 
-            "templeton.override.enabled": "false", 
-            "templeton.jar": "/usr/lib/hcatalog/share/webhcat/svr/webhcat.jar", 
-            "templeton.zookeeper.hosts": "c6401.ambari.apache.org:2181,c6402.ambari.apache.org:2181", 
-            "templeton.hive.properties": "hive.metastore.local=false,hive.metastore.uris=thrift://c6402.ambari.apache.org:9083,hive.metastore.sasl.enabled=yes,hive.metastore.execute.setugi=true,hive.metastore.warehouse.dir=/apps/hive/warehouse", 
-            "templeton.storage.class": "org.apache.hive.hcatalog.templeton.tool.ZooKeeperStorage", 
-            "templeton.hive.archive": "hdfs:///apps/webhcat/hive.tar.gz", 
-            "templeton.streaming.jar": "hdfs:///apps/webhcat/hadoop-streaming.jar", 
-            "templeton.port": "50111", 
-            "templeton.libjars": "/usr/lib/zookeeper/zookeeper.jar", 
-            "templeton.hadoop": "/usr/bin/hadoop", 
-            "templeton.hive.path": "hive.tar.gz/hive/bin/hive", 
-            "templeton.hadoop.conf.dir": "/etc/hadoop/conf", 
-            "templeton.hcat": "/usr/bin/hcat", 
+            "templeton.pig.path": "pig.tar.gz/pig/bin/pig",
+            "templeton.exec.timeout": "60000",
+            "templeton.override.enabled": "false",
+            "templeton.jar": "/usr/lib/hcatalog/share/webhcat/svr/webhcat.jar",
+            "templeton.zookeeper.hosts": "c6401.ambari.apache.org:2181,c6402.ambari.apache.org:2181",
+            "templeton.hive.properties": "hive.metastore.local=false,hive.metastore.uris=thrift://c6402.ambari.apache.org:9083,hive.metastore.sasl.enabled=yes,hive.metastore.execute.setugi=true,hive.metastore.warehouse.dir=/apps/hive/warehouse",
+            "templeton.storage.class": "org.apache.hive.hcatalog.templeton.tool.ZooKeeperStorage",
+            "templeton.hive.archive": "hdfs:///apps/webhcat/hive.tar.gz",
+            "templeton.streaming.jar": "hdfs:///apps/webhcat/hadoop-streaming.jar",
+            "templeton.port": "50111",
+            "templeton.libjars": "/usr/lib/zookeeper/zookeeper.jar",
+            "templeton.hadoop": "/usr/bin/hadoop",
+            "templeton.hive.path": "hive.tar.gz/hive/bin/hive",
+            "templeton.hadoop.conf.dir": "/etc/hadoop/conf",
+            "templeton.hcat": "/usr/bin/hcat",
             "templeton.pig.archive": "hdfs:///apps/webhcat/pig.tar.gz",
             "templeton.sqoop.archive": "hdfs:///apps/webhcat/sqoop.tar.gz"
-        }, 
+        },
         "capacity-scheduler": {
-            "yarn.scheduler.capacity.node-locality-delay": "40", 
-            "yarn.scheduler.capacity.root.capacity": "100", 
+            "yarn.scheduler.capacity.node-locality-delay": "40",
+            "yarn.scheduler.capacity.root.capacity": "100",
             "yarn.scheduler.capacity.root.acl_administer_queue": "*",
-            "yarn.scheduler.capacity.root.queues": "default", 
-            "yarn.scheduler.capacity.maximum-applications": "10000", 
+            "yarn.scheduler.capacity.root.queues": "default",
+            "yarn.scheduler.capacity.maximum-applications": "10000",
             "yarn.scheduler.capacity.root.default.user-limit-factor": "1",
-            "yarn.scheduler.capacity.root.default.maximum-capacity": "100", 
-            "yarn.scheduler.capacity.root.default.state": "RUNNING", 
-            "yarn.scheduler.capacity.maximum-am-resource-percent": "0.2", 
-            "yarn.scheduler.capacity.root.default.acl_administer_jobs": "*", 
-            "yarn.scheduler.capacity.root.default.capacity": "100", 
+            "yarn.scheduler.capacity.root.default.maximum-capacity": "100",
+            "yarn.scheduler.capacity.root.default.state": "RUNNING",
+            "yarn.scheduler.capacity.maximum-am-resource-percent": "0.2",
+            "yarn.scheduler.capacity.root.default.acl_administer_jobs": "*",
+            "yarn.scheduler.capacity.root.default.capacity": "100",
             "yarn.scheduler.capacity.root.default.acl_submit_applications": "*"
-        }, 
+        },
         "hdfs-site": {
-            "dfs.namenode.checkpoint.period": "21600", 
-            "dfs.namenode.avoid.write.stale.datanode": "true", 
-            "dfs.block.access.token.enable": "true", 
-            "dfs.support.append": "true", 
+            "dfs.namenode.checkpoint.period": "21600",
+            "dfs.namenode.avoid.write.stale.datanode": "true",
+            "dfs.block.access.token.enable": "true",
+            "dfs.support.append": "true",
+            "dfs.datanode.address": "0.0.0.0:${ambari.dfs.datanode.port}",
             "nfs.file.dump.dir": "/tmp/.hdfs-nfs",
             "dfs.datanode.address": "0.0.0.0:${ambari.dfs.datanode.port}", 
             "dfs.cluster.administrators": "test_user1,test_user2 hdfs,test_group",
-            "dfs.replication": "3", 
-            "ambari.dfs.datanode.http.port": "50075", 
-            "dfs.datanode.balance.bandwidthPerSec": "6250000", 
-            "dfs.namenode.safemode.threshold-pct": "1.0f", 
-            "dfs.namenode.checkpoint.edits.dir": "${dfs.namenode.checkpoint.dir}", 
-            "dfs.permissions.enabled": "true", 
-            "fs.checkpoint.size": "67108864", 
-            "dfs.client.read.shortcircuit": "true", 
-            "dfs.namenode.https-address": "c6401.ambari.apache.org:50470", 
-            "dfs.journalnode.edits.dir": "/grid/0/hdfs/journal", 
-            "dfs.blocksize": "134217728", 
-            "dfs.datanode.max.transfer.threads": "1024", 
-            "dfs.datanode.du.reserved": "1073741824", 
-            "dfs.webhdfs.enabled": "true", 
-            "dfs.namenode.handler.count": "100", 
+            "dfs.replication": "3",
+            "ambari.dfs.datanode.http.port": "50075",
+            "dfs.datanode.balance.bandwidthPerSec": "6250000",
+            "dfs.namenode.safemode.threshold-pct": "1.0f",
+            "dfs.namenode.checkpoint.edits.dir": "${dfs.namenode.checkpoint.dir}",
+            "dfs.permissions.enabled": "true",
+            "fs.checkpoint.size": "67108864",
+            "dfs.client.read.shortcircuit": "true",
+            "dfs.namenode.https-address": "c6401.ambari.apache.org:50470",
+            "dfs.journalnode.edits.dir": "/grid/0/hdfs/journal",
+            "dfs.blocksize": "134217728",
+            "dfs.datanode.max.transfer.threads": "1024",
+            "dfs.datanode.du.reserved": "1073741824",
+            "dfs.webhdfs.enabled": "true",
+            "dfs.namenode.handler.count": "100",
             "dfs.namenode.checkpoint.dir": "/hadoop/hdfs/namesecondary,/hadoop/hdfs/namesecondary2",
-            "fs.permissions.umask-mode": "022", 
+            "fs.permissions.umask-mode": "022",
             "dfs.datanode.http.address": "0.0.0.0:50075",
-            "dfs.datanode.ipc.address": "0.0.0.0:8010", 
-            "dfs.datanode.data.dir": "/hadoop/hdfs/data", 
-            "dfs.namenode.http-address": "c6401.ambari.apache.org:50070", 
-            "dfs.blockreport.initialDelay": "120", 
-            "dfs.datanode.failed.volumes.tolerated": "0", 
-            "dfs.namenode.accesstime.precision": "0", 
-            "ambari.dfs.datanode.port": "50010", 
-            "dfs.namenode.avoid.read.stale.datanode": "true", 
-            "dfs.namenode.secondary.http-address": "c6402.ambari.apache.org:50090", 
-            "dfs.namenode.stale.datanode.interval": "30000", 
-            "dfs.heartbeat.interval": "3", 
-            "dfs.client.read.shortcircuit.streams.cache.size": "4096", 
-            "dfs.permissions.superusergroup": "hdfs", 
-            "dfs.https.port": "50470", 
-            "dfs.journalnode.http-address": "0.0.0.0:8480", 
-            "dfs.domain.socket.path": "/var/lib/hadoop-hdfs/dn_socket", 
-            "dfs.namenode.write.stale.datanode.ratio": "1.0f", 
-            "dfs.hosts.exclude": "/etc/hadoop/conf/dfs.exclude", 
-            "dfs.datanode.data.dir.perm": "750", 
-            "dfs.namenode.name.dir.restore": "true", 
-            "dfs.replication.max": "50", 
+            "dfs.datanode.ipc.address": "0.0.0.0:8010",
+            "dfs.datanode.data.dir": "/hadoop/hdfs/data",
+            "dfs.namenode.http-address": "c6401.ambari.apache.org:50070",
+            "dfs.blockreport.initialDelay": "120",
+            "dfs.datanode.failed.volumes.tolerated": "0",
+            "dfs.namenode.accesstime.precision": "0",
+            "ambari.dfs.datanode.port": "50010",
+            "dfs.namenode.avoid.read.stale.datanode": "true",
+            "dfs.namenode.secondary.http-address": "c6402.ambari.apache.org:50090",
+            "dfs.namenode.stale.datanode.interval": "30000",
+            "dfs.heartbeat.interval": "3",
+            "dfs.client.read.shortcircuit.streams.cache.size": "4096",
+            "dfs.permissions.superusergroup": "hdfs",
+            "dfs.https.port": "50470",
+            "dfs.journalnode.http-address": "0.0.0.0:8480",
+            "dfs.domain.socket.path": "/var/lib/hadoop-hdfs/dn_socket",
+            "dfs.namenode.write.stale.datanode.ratio": "1.0f",
+            "dfs.hosts.exclude": "/etc/hadoop/conf/dfs.exclude",
+            "dfs.datanode.data.dir.perm": "750",
+            "dfs.namenode.name.dir.restore": "true",
+            "dfs.replication.max": "50",
             "dfs.namenode.name.dir": "/hadoop/hdfs/namenode"
-        }, 
+        },
         "hbase-site": {
-            "hbase.hstore.flush.retries.number": "120", 
-            "hbase.client.keyvalue.maxsize": "10485760", 
-            "hbase.hstore.compactionThreshold": "3", 
-            "hbase.rootdir": "hdfs://c6401.ambari.apache.org:8020/apps/hbase/data", 
-            "hbase.regionserver.handler.count": "60", 
-            "hbase.regionserver.global.memstore.lowerLimit": "0.38", 
-            "hbase.hregion.memstore.block.multiplier": "2", 
-            "hbase.hregion.memstore.flush.size": "134217728", 
-            "hbase.superuser": "hbase", 
-            "hbase.zookeeper.property.clientPort": "2181", 
-            "hbase.regionserver.global.memstore.upperLimit": "0.4", 
-            "zookeeper.session.timeout": "30000", 
-            "hbase.tmp.dir": "/hadoop/hbase", 
-            "hbase.local.dir": "${hbase.tmp.dir}/local", 
-            "hbase.hregion.max.filesize": "10737418240", 
-            "hfile.block.cache.size": "0.40", 
-            "hbase.security.authentication": "simple", 
-            "hbase.defaults.for.version.skip": "true", 
-            "hbase.zookeeper.quorum": "c6401.ambari.apache.org,c6402.ambari.apache.org", 
-            "zookeeper.znode.parent": "/hbase-unsecure", 
+            "hbase.hstore.flush.retries.number": "120",
+            "hbase.client.keyvalue.maxsize": "10485760",
+            "hbase.hstore.compactionThreshold": "3",
+            "hbase.rootdir": "hdfs://c6401.ambari.apache.org:8020/apps/hbase/data",
+            "hbase.regionserver.handler.count": "60",
+            "hbase.regionserver.global.memstore.lowerLimit": "0.38",
+            "hbase.hregion.memstore.block.multiplier": "2",
+            "hbase.hregion.memstore.flush.size": "134217728",
+            "hbase.superuser": "hbase",
+            "hbase.zookeeper.property.clientPort": "2181",
+            "hbase.regionserver.global.memstore.upperLimit": "0.4",
+            "zookeeper.session.timeout": "30000",
+            "hbase.tmp.dir": "/hadoop/hbase",
+            "hbase.local.dir": "${hbase.tmp.dir}/local",
+            "hbase.hregion.max.filesize": "10737418240",
+            "hfile.block.cache.size": "0.40",
+            "hbase.security.authentication": "simple",
+            "hbase.defaults.for.version.skip": "true",
+            "hbase.zookeeper.quorum": "c6401.ambari.apache.org,c6402.ambari.apache.org",
+            "zookeeper.znode.parent": "/hbase-unsecure",
             "hbase.hstore.blockingStoreFiles": "10",
             "hbase.master.port": "60000",
-            "hbase.hregion.majorcompaction": "86400000", 
-            "hbase.security.authorization": "false", 
-            "hbase.cluster.distributed": "true", 
-            "hbase.hregion.memstore.mslab.enabled": "true", 
-            "hbase.client.scanner.caching": "100", 
+            "hbase.hregion.majorcompaction": "86400000",
+            "hbase.security.authorization": "false",
+            "hbase.cluster.distributed": "true",
+            "hbase.hregion.memstore.mslab.enabled": "true",
+            "hbase.client.scanner.caching": "100",
             "hbase.zookeeper.useMulti": "true"
-        }, 
+        },
         "core-site": {
-            "io.serializations": "org.apache.hadoop.io.serializer.WritableSerialization", 
-            "gluster.daemon.user": "null", 
-            "hadoop.proxyuser.oozie.groups": "users", 
-            "hadoop.proxyuser.hcat.hosts": "c6402.ambari.apache.org", 
-            "hadoop.proxyuser.hive.groups": "users", 
-            "hadoop.security.authentication": "simple", 
-            "hadoop.proxyuser.oozie.hosts": "c6402.ambari.apache.org", 
-            "io.compression.codecs": "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec", 
-            "mapreduce.jobtracker.webinterface.trusted": "false", 
-            "fs.AbstractFileSystem.glusterfs.impl": "null", 
-            "fs.defaultFS": "hdfs://c6401.ambari.apache.org:8020", 
-            "fs.trash.interval": "360", 
-            "ipc.client.idlethreshold": "8000", 
-            "io.file.buffer.size": "131072", 
-            "hadoop.security.authorization": "false", 
-            "hadoop.proxyuser.hive.hosts": "c6402.ambari.apache.org", 
-            "hadoop.security.auth_to_local": "\n        RULE:[2:$1@$0]([rn]m@.*)s/.*/yarn/\n        RULE:[2:$1@$0](jhs@.*)s/.*/mapred/\n        RULE:[2:$1@$0]([nd]n@.*)s/.*/hdfs/\n        RULE:[2:$1@$0](hm@.*)s/.*/hbase/\n        RULE:[2:$1@$0](rs@.*)s/.*/hbase/\n        DEFAULT", 
-            "hadoop.proxyuser.hcat.groups": "users", 
-            "ipc.client.connection.maxidletime": "30000", 
+            "io.serializations": "org.apache.hadoop.io.serializer.WritableSerialization",
+            "gluster.daemon.user": "null",
+            "hadoop.proxyuser.oozie.groups": "users",
+            "hadoop.proxyuser.hcat.hosts": "c6402.ambari.apache.org",
+            "hadoop.proxyuser.hive.groups": "users",
+            "hadoop.security.authentication": "simple",
+            "hadoop.proxyuser.oozie.hosts": "c6402.ambari.apache.org",
+            "io.compression.codecs": "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec",
+            "mapreduce.jobtracker.webinterface.trusted": "false",
+            "fs.AbstractFileSystem.glusterfs.impl": "null",
+            "fs.defaultFS": "hdfs://c6401.ambari.apache.org:8020",
+            "fs.trash.interval": "360",
+            "ipc.client.idlethreshold": "8000",
+            "io.file.buffer.size": "131072",
+            "hadoop.security.authorization": "false",
+            "hadoop.proxyuser.hive.hosts": "c6402.ambari.apache.org",
+            "hadoop.security.auth_to_local": "\n        RULE:[2:$1@$0]([rn]m@.*)s/.*/yarn/\n        RULE:[2:$1@$0](jhs@.*)s/.*/mapred/\n        RULE:[2:$1@$0]([nd]n@.*)s/.*/hdfs/\n        RULE:[2:$1@$0](hm@.*)s/.*/hbase/\n        RULE:[2:$1@$0](rs@.*)s/.*/hbase/\n        DEFAULT",
+            "hadoop.proxyuser.hcat.groups": "users",
+            "ipc.client.connection.maxidletime": "30000",
             "ipc.client.connect.max.retries": "50"
-        }, 
+        },
         "hive-site": {
-            "hive.enforce.sorting": "true", 
-            "javax.jdo.option.ConnectionPassword": "!`\"' 1", 
-            "javax.jdo.option.ConnectionDriverName": "com.mysql.jdbc.Driver", 
-            "hive.optimize.bucketmapjoin.sortedmerge": "true", 
-            "hive.security.metastore.authorization.manager": "org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider", 
-            "fs.file.impl.disable.cache": "true", 
-            "hive.auto.convert.join.noconditionaltask": "true", 
-            "hive.map.aggr": "true", 
-            "hive.optimize.index.filter": "true", 
-            "hive.security.authorization.enabled": "false", 
-            "hive.optimize.reducededuplication.min.reducer": "1", 
-            "hive.optimize.bucketmapjoin": "true", 
-            "hive.metastore.uris": "thrift://c6402.ambari.apache.org:9083", 
-            "hive.mapjoin.bucket.cache.size": "10000", 
-            "hive.auto.convert.join.noconditionaltask.size": "1000000000", 
-            "hive.vectorized.execution.enabled": "false", 
-            "javax.jdo.option.ConnectionUserName": "hive", 
-            "hive.metastore.cache.pinobjtypes": "Table,Database,Type,FieldSchema,Order", 
-            "hive.optimize.reducededuplication": "true", 
-            "hive.metastore.warehouse.dir": "/apps/hive/warehouse", 
+            "hive.enforce.sorting": "true",
+            "javax.jdo.option.ConnectionPassword": "!`\"' 1",
+            "javax.jdo.option.ConnectionDriverName": "com.mysql.jdbc.Driver",
+            "hive.optimize.bucketmapjoin.sortedmerge": "true",
+            "hive.security.metastore.authorization.manager": "org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider",
+            "fs.file.impl.disable.cache": "true",
+            "hive.auto.convert.join.noconditionaltask": "true",
+            "hive.map.aggr": "true",
+            "hive.optimize.index.filter": "true",
+            "hive.security.authorization.enabled": "false",
+            "hive.optimize.reducededuplication.min.reducer": "1",
+            "hive.optimize.bucketmapjoin": "true",
+            "hive.metastore.uris": "thrift://c6402.ambari.apache.org:9083",
+            "hive.mapjoin.bucket.cache.size": "10000",
+            "hive.auto.convert.join.noconditionaltask.size": "1000000000",
+            "hive.vectorized.execution.enabled": "false",
+            "javax.jdo.option.ConnectionUserName": "hive",
+            "hive.metastore.cache.pinobjtypes": "Table,Database,Type,FieldSchema,Order",
+            "hive.optimize.reducededuplication": "true",
+            "hive.metastore.warehouse.dir": "/apps/hive/warehouse",
             "hive.metastore.client.socket.timeout": "60",
-            "hive.auto.convert.join": "true", 
-            "hive.enforce.bucketing": "true", 
-            "hive.mapred.reduce.tasks.speculative.execution": "false", 
-            "hive.security.authenticator.manager": "org.apache.hadoop.hive.ql.security.ProxyUserAuthenticator", 
-            "javax.jdo.option.ConnectionURL": "jdbc:mysql://c6402.ambari.apache.org/hive?createDatabaseIfNotExist=true", 
-            "hive.auto.convert.sortmerge.join": "true", 
-            "fs.hdfs.impl.disable.cache": "true", 
-            "hive.security.authorization.manager": "org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider", 
-            "ambari.hive.db.schema.name": "hive", 
-            "hive.metastore.execute.setugi": "true", 
-            "hive.auto.convert.sortmerge.join.noconditionaltask": "true", 
+            "hive.auto.convert.join": "true",
+            "hive.enforce.bucketing": "true",
+            "hive.mapred.reduce.tasks.speculative.execution": "false",
+            "hive.security.authenticator.manager": "org.apache.hadoop.hive.ql.security.ProxyUserAuthenticator",
+            "javax.jdo.option.ConnectionURL": "jdbc:mysql://c6402.ambari.apache.org/hive?createDatabaseIfNotExist=true",
+            "hive.auto.convert.sortmerge.join": "true",
+            "fs.hdfs.impl.disable.cache": "true",
+            "hive.security.authorization.manager": "org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider",
+            "ambari.hive.db.schema.name": "hive",
+            "hive.metastore.execute.setugi": "true",
+            "hive.auto.convert.sortmerge.join.noconditionaltask": "true",
             "hive.server2.enable.doAs": "true",
             "hive.server2.authentication": "NOSASL",
             "hive.server2.transport.mode": "binary",
             "hive.optimize.mapjoin.mapreduce": "true",
             "hive.exec.scratchdir" : "/custompath/tmp/hive"
-        }, 
+        },
 		"ranger-hive-plugin-properties": {
-            "XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS": "900", 
-            "XAAUDIT.HDFS.DESTINATION_DIRECTORY": "hdfs://__REPLACE__NAME_NODE_HOST:8020/ranger/audit/%app-type%/%time:yyyyMMdd%", 
-            "XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit", 
-            "common.name.for.certificate": "-", 
-            "XAAUDIT.HDFS.IS_ENABLED": "false", 
-            "XAAUDIT.HDFS.LOCAL_BUFFER_FILE": "%time:yyyyMMdd-HHmm.ss%.log", 
-            "SSL_KEYSTORE_PASSWORD": "myKeyFilePassword", 
-            "XAAUDIT.DB.IS_ENABLED": "true", 
-            "XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS": "600", 
-            "XAAUDIT.SOLR.SOLR_URL": "http://localhost:6083/solr/ranger_audits", 
-            "XAAUDIT.SOLR.IS_ENABLED": "false", 
-            "SSL_KEYSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-keystore.jks", 
-            "XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS": "60", 
-            "policy_user": "ambari-qa", 
-            "UPDATE_XAPOLICIES_ON_GRANT_REVOKE": "true", 
-            "XAAUDIT.HDFS.DESTINTATION_FILE": "%hostname%-audit.log", 
-            "XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS": "86400", 
-            "XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT": "10", 
-            "SSL_TRUSTSTORE_PASSWORD": "changeit", 
-            "XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit/archive", 
-            "ranger-hive-plugin-enabled": "No", 
-            "jdbc.driverClassName": "org.apache.hive.jdbc.HiveDriver", 
-            "REPOSITORY_CONFIG_USERNAME": "hive", 
-            "XAAUDIT.SOLR.MAX_FLUSH_INTERVAL_MS": "1000", 
-            "XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS": "60", 
-            "SSL_TRUSTSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-truststore.jks", 
-            "REPOSITORY_CONFIG_PASSWORD": "hive", 
+            "XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS": "900",
+            "XAAUDIT.HDFS.DESTINATION_DIRECTORY": "hdfs://__REPLACE__NAME_NODE_HOST:8020/ranger/audit/%app-type%/%time:yyyyMMdd%",
+            "XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit",
+            "common.name.for.certificate": "-",
+            "XAAUDIT.HDFS.IS_ENABLED": "false",
+            "XAAUDIT.HDFS.LOCAL_BUFFER_FILE": "%time:yyyyMMdd-HHmm.ss%.log",
+            "SSL_KEYSTORE_PASSWORD": "myKeyFilePassword",
+            "XAAUDIT.DB.IS_ENABLED": "true",
+            "XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS": "600",
+            "XAAUDIT.SOLR.SOLR_URL": "http://localhost:6083/solr/ranger_audits",
+            "XAAUDIT.SOLR.IS_ENABLED": "false",
+            "SSL_KEYSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-keystore.jks",
+            "XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS": "60",
+            "policy_user": "ambari-qa",
+            "UPDATE_XAPOLICIES_ON_GRANT_REVOKE": "true",
+            "XAAUDIT.HDFS.DESTINTATION_FILE": "%hostname%-audit.log",
+            "XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS": "86400",
+            "XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT": "10",
+            "SSL_TRUSTSTORE_PASSWORD": "changeit",
+            "XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit/archive",
+            "ranger-hive-plugin-enabled": "No",
+            "jdbc.driverClassName": "org.apache.hive.jdbc.HiveDriver",
+            "REPOSITORY_CONFIG_USERNAME": "hive",
+            "XAAUDIT.SOLR.MAX_FLUSH_INTERVAL_MS": "1000",
+            "XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS": "60",
+            "SSL_TRUSTSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-truststore.jks",
+            "REPOSITORY_CONFIG_PASSWORD": "hive",
             "XAAUDIT.SOLR.MAX_QUEUE_SIZE": "1"
         },
 		"ranger-knox-plugin-properties": {
-            "XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS": "900", 
-            "KNOX_HOME": "/usr/hdp/current/knox-server", 
-            "XAAUDIT.HDFS.DESTINATION_DIRECTORY": "hdfs://__REPLACE__NAME_NODE_HOST:8020/ranger/audit/%app-type%/%time:yyyyMMdd%", 
-            "XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit", 
-            "common.name.for.certificate": "-", 
-            "XAAUDIT.HDFS.IS_ENABLED": "false", 
-            "XAAUDIT.HDFS.LOCAL_BUFFER_FILE": "%time:yyyyMMdd-HHmm.ss%.log", 
-            "SSL_KEYSTORE_PASSWORD": "myKeyFilePassword", 
-            "XAAUDIT.DB.IS_ENABLED": "true", 
-            "XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS": "600", 
-            "XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS": "60", 
-            "XAAUDIT.SOLR.SOLR_URL": "http://localhost:6083/solr/ranger_audits", 
-            "XAAUDIT.SOLR.IS_ENABLED": "false", 
-            "SSL_KEYSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-keystore.jks", 
-            "ranger-knox-plugin-enabled": "No", 
-            "policy_user": "ambari-qa", 
-            "XAAUDIT.HDFS.DESTINTATION_FILE": "%hostname%-audit.log", 
-            "XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS": "86400", 
-            "XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT": "10", 
-            "SSL_TRUSTSTORE_PASSWORD": "changeit", 
-            "XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit/archive", 
-            "REPOSITORY_CONFIG_USERNAME": "admin", 
-            "XAAUDIT.SOLR.MAX_FLUSH_INTERVAL_MS": "1000", 
-            "XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS": "60", 
-            "SSL_TRUSTSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-truststore.jks", 
-            "REPOSITORY_CONFIG_PASSWORD": "admin-password", 
+            "XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS": "900",
+            "KNOX_HOME": "/usr/hdp/current/knox-server",
+            "XAAUDIT.HDFS.DESTINATION_DIRECTORY": "hdfs://__REPLACE__NAME_NODE_HOST:8020/ranger/audit/%app-type%/%time:yyyyMMdd%",
+            "XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit",
+            "common.name.for.certificate": "-",
+            "XAAUDIT.HDFS.IS_ENABLED": "false",
+            "XAAUDIT.HDFS.LOCAL_BUFFER_FILE": "%time:yyyyMMdd-HHmm.ss%.log",
+            "SSL_KEYSTORE_PASSWORD": "myKeyFilePassword",
+            "XAAUDIT.DB.IS_ENABLED": "true",
+            "XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS": "600",
+            "XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS": "60",
+            "XAAUDIT.SOLR.SOLR_URL": "http://localhost:6083/solr/ranger_audits",
+            "XAAUDIT.SOLR.IS_ENABLED": "false",
+            "SSL_KEYSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-keystore.jks",
+            "ranger-knox-plugin-enabled": "No",
+            "policy_user": "ambari-qa",
+            "XAAUDIT.HDFS.DESTINTATION_FILE": "%hostname%-audit.log",
+            "XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS": "86400",
+            "XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT": "10",
+            "SSL_TRUSTSTORE_PASSWORD": "changeit",
+            "XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit/archive",
+            "REPOSITORY_CONFIG_USERNAME": "admin",
+            "XAAUDIT.SOLR.MAX_FLUSH_INTERVAL_MS": "1000",
+            "XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS": "60",
+            "SSL_TRUSTSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-truststore.jks",
+            "REPOSITORY_CONFIG_PASSWORD": "admin-password",
             "XAAUDIT.SOLR.MAX_QUEUE_SIZE": "1"
         },
         "yarn-site": {
-            "yarn.nodemanager.disk-health-checker.min-healthy-disks": "0.25", 
-            "yarn.nodemanager.container-executor.class": "org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor", 
+            "yarn.nodemanager.disk-health-checker.min-healthy-disks": "0.25",
+            "yarn.nodemanager.container-executor.class": "org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor",
             "yarn.nodemanager.local-dirs": "/hadoop/yarn/local,/hadoop/yarn/local1",
-            "yarn.resourcemanager.resource-tracker.address": "c6402.ambari.apache.org:8025", 
-            "yarn.nodemanager.remote-app-log-dir-suffix": "logs", 
-            "yarn.resourcemanager.hostname": "c6402.ambari.apache.org", 
-            "yarn.nodemanager.health-checker.script.timeout-ms": "60000", 
-            "yarn.resourcemanager.scheduler.class": "org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler", 
-            "yarn.nodemanager.resource.memory-mb": "2048", 
-            "yarn.scheduler.minimum-allocation-mb": "683", 
-            "yarn.resourcemanager.address": "c6402.ambari.apache.org:8050", 
-            "yarn.resourcemanager.scheduler.address": "c6402.ambari.apache.org:8030", 
-            "yarn.log-aggregation.retain-seconds": "2592000", 
-            "yarn.scheduler.maximum-allocation-mb": "2048", 
-            "yarn.log-aggregation-enable": "true", 
-            "yarn.nodemanager.address": "0.0.0.0:45454", 
-            "yarn.nodemanager.container-monitor.interval-ms": "3000", 
-            "yarn.nodemanager.log-aggregation.compression-type": "gz", 
-            "yarn.nodemanager.log.retain-second": "604800", 
-            "yarn.nodemanager.delete.debug-delay-sec": "0", 
+            "yarn.resourcemanager.resource-tracker.address": "c6402.ambari.apache.org:8025",
+            "yarn.nodemanager.remote-app-log-dir-suffix": "logs",
+            "yarn.resourcemanager.hostname": "c6402.ambari.apache.org",
+            "yarn.nodemanager.health-checker.script.timeout-ms": "60000",
+            "yarn.resourcemanager.scheduler.class": "org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler",
+            "yarn.nodemanager.resource.memory-mb": "2048",
+            "yarn.scheduler.minimum-allocation-mb": "683",
+            "yarn.resourcemanager.address": "c6402.ambari.apache.org:8050",
+            "yarn.resourcemanager.scheduler.address": "c6402.ambari.apache.org:8030",
+            "yarn.log-aggregation.retain-seconds": "2592000",
+            "yarn.scheduler.maximum-allocation-mb": "2048",
+            "yarn.log-aggregation-enable": "true",
+            "yarn.nodemanager.address": "0.0.0.0:45454",
+            "yarn.nodemanager.container-monitor.interval-ms": "3000",
+            "yarn.nodemanager.log-aggregation.compression-type": "gz",
+            "yarn.nodemanager.log.retain-second": "604800",
+            "yarn.nodemanager.delete.debug-delay-sec": "0",
             "yarn.nodemanager.log-dirs": "/hadoop/yarn/log,/hadoop/yarn/log1",
-            "yarn.nodemanager.health-checker.interval-ms": "135000", 
-            "yarn.resourcemanager.am.max-attempts": "2", 
-            "yarn.nodemanager.remote-app-log-dir": "/app-logs", 
-            "yarn.nodemanager.admin-env": "MALLOC_ARENA_MAX=$MALLOC_ARENA_MAX", 
-            "yarn.nodemanager.aux-services": "mapreduce_shuffle", 
-            "yarn.nodemanager.vmem-check-enabled": "false", 
-            "yarn.nodemanager.vmem-pmem-ratio": "2.1", 
-            "yarn.admin.acl": "*", 
-            "yarn.resourcemanager.webapp.address": "c6402.ambari.apache.org:8088", 
-            "yarn.resourcemanager.nodes.exclude-path": "/etc/hadoop/conf/yarn.exclude", 
-            "yarn.nodemanager.linux-container-executor.group": "hadoop", 
-            "yarn.acl.enable": "true", 
-            "yarn.log.server.url": "http://c6402.ambari.apache.org:19888/jobhistory/logs", 
-            "yarn.application.classpath": "/etc/hadoop/conf,/usr/lib/hadoop/*,/usr/lib/hadoop/lib/*,/usr/lib/hadoop-hdfs/*,/usr/lib/hadoop-hdfs/lib/*,/usr/lib/hadoop-yarn/*,/usr/lib/hadoop-yarn/lib/*,/usr/lib/hadoop-mapreduce/*,/usr/lib/hadoop-mapreduce/lib/*", 
-            "yarn.resourcemanager.admin.address": "c6402.ambari.apache.org:8141", 
+            "yarn.nodemanager.health-checker.interval-ms": "135000",
+            "yarn.resourcemanager.am.max-attempts": "2",
+            "yarn.nodemanager.remote-app-log-dir": "/app-logs",
+            "yarn.nodemanager.admin-env": "MALLOC_ARENA_MAX=$MALLOC_ARENA_MAX",
+            "yarn.nodemanager.aux-services": "mapreduce_shuffle",
+            "yarn.nodemanager.vmem-check-enabled": "false",
+            "yarn.nodemanager.vmem-pmem-ratio": "2.1",
+            "yarn.admin.acl": "*",
+            "yarn.resourcemanager.webapp.address": "c6402.ambari.apache.org:8088",
+            "yarn.resourcemanager.nodes.exclude-path": "/etc/hadoop/conf/yarn.exclude",
+            "yarn.nodemanager.linux-container-executor.group": "hadoop",
+            "yarn.acl.enable": "true",
+            "yarn.log.server.url": "http://c6402.ambari.apache.org:19888/jobhistory/logs",
+            "yarn.application.classpath": "/etc/hadoop/conf,/usr/lib/hadoop/*,/usr/lib/hadoop/lib/*,/usr/lib/hadoop-hdfs/*,/usr/lib/hadoop-hdfs/lib/*,/usr/lib/hadoop-yarn/*,/usr/lib/hadoop-yarn/lib/*,/usr/lib/hadoop-mapreduce/*,/usr/lib/hadoop-mapreduce/lib/*",
+            "yarn.resourcemanager.admin.address": "c6402.ambari.apache.org:8141",
             "yarn.nodemanager.aux-services.mapreduce_shuffle.class": "org.apache.hadoop.mapred.ShuffleHandler",
             "yarn.timeline-service.leveldb-timeline-store.path": "/var/log/hadoop-yarn/timeline",
             "yarn.http.policy": "HTTP_ONLY",
@@ -485,45 +486,45 @@
             "tez.am.am-rm.heartbeat.interval-ms.max": "250"
         },
         "yarn-env": {
-            "yarn_pid_dir_prefix": "/var/run/hadoop-yarn", 
-            "apptimelineserver_heapsize": "1024", 
-            "nodemanager_heapsize": "1024", 
-            "content": "\nexport HADOOP_YARN_HOME={{hadoop_yarn_home}}\nexport YARN_LOG_DIR={{yarn_log_dir_prefix}}/$USER\nexport YARN_PID_DIR={{yarn_pid_dir_prefix}}/$USER\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\nexport JAVA_HOME={{java64_home}}\n\n# User for YARN daemons\nexport HADOOP_YARN_USER=${HADOOP_YARN_USER:-yarn}\n\n# resolve links - $0 may be a softlink\nexport YARN_CONF_DIR=\"${YARN_CONF_DIR:-$HADOOP_YARN_HOME/conf}\"\n\n# some Java parameters\n# export JAVA_HOME=/home/y/libexec/jdk1.6.0/\nif [ \"$JAVA_HOME\" != \"\" ]; then\n  #echo \"run java in $JAVA_HOME\"\n  JAVA_HOME=$JAVA_HOME\nfi\n\nif [ \"$JAVA_HOME\" = \"\" ]; then\n  echo \"Error: JAVA_HOME is not set.\"\n  exit 1\nfi\n\nJAVA=$JAVA_HOME/bin/java\nJAVA_HEAP_MAX=-Xmx1000m\n\n# For setting YARN specific HEAP sizes please use this\n# Parameter and set appropriately\nYARN_HEAPSIZE={{yarn_heapsize}}\n\n# check envvars which might override default args\nif [ \"$YARN_HEAPSIZE\" != \"\" ]; then\n  JAVA_HEAP_M
 AX=\"-Xmx\"\"$YARN_HEAPSIZE\"\"m\"\nfi\n\n# Resource Manager specific parameters\n\n# Specify the max Heapsize for the ResourceManager using a numerical value\n# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set\n# the value to 1000.\n# This value will be overridden by an Xmx setting specified in either YARN_OPTS\n# and/or YARN_RESOURCEMANAGER_OPTS.\n# If not specified, the default value will be picked from either YARN_HEAPMAX\n# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.\nexport YARN_RESOURCEMANAGER_HEAPSIZE={{resourcemanager_heapsize}}\n\n# Specify the JVM options to be used when starting the ResourceManager.\n# These options will be appended to the options specified as YARN_OPTS\n# and therefore may override any similar flags set in YARN_OPTS\n#export YARN_RESOURCEMANAGER_OPTS=\n\n# Node Manager specific parameters\n\n# Specify the max Heapsize for the NodeManager using a numerical value\n# in the scale of MB. For example, to 
 specify an jvm option of -Xmx1000m, set\n# the value to 1000.\n# This value will be overridden by an Xmx setting specified in either YARN_OPTS\n# and/or YARN_NODEMANAGER_OPTS.\n# If not specified, the default value will be picked from either YARN_HEAPMAX\n# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.\nexport YARN_NODEMANAGER_HEAPSIZE={{nodemanager_heapsize}}\n\n# Specify the max Heapsize for the HistoryManager using a numerical value\n# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set\n# the value to 1024.\n# This value will be overridden by an Xmx setting specified in either YARN_OPTS\n# and/or YARN_HISTORYSERVER_OPTS.\n# If not specified, the default value will be picked from either YARN_HEAPMAX\n# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.\nexport YARN_HISTORYSERVER_HEAPSIZE={{apptimelineserver_heapsize}}\n\n# Specify the JVM options to be used when starting the NodeManager.\n# These options will be
  appended to the options specified as YARN_OPTS\n# and therefore may override any similar flags set in YARN_OPTS\n#export YARN_NODEMANAGER_OPTS=\n\n# so that filenames w/ spaces are handled correctly in loops below\nIFS=\n\n\n# default log directory and file\nif [ \"$YARN_LOG_DIR\" = \"\" ]; then\n  YARN_LOG_DIR=\"$HADOOP_YARN_HOME/logs\"\nfi\nif [ \"$YARN_LOGFILE\" = \"\" ]; then\n  YARN_LOGFILE='yarn.log'\nfi\n\n# default policy file for service-level authorization\nif [ \"$YARN_POLICYFILE\" = \"\" ]; then\n  YARN_POLICYFILE=\"hadoop-policy.xml\"\nfi\n\n# restore ordinary behaviour\nunset IFS\n\n\nYARN_OPTS=\"$YARN_OPTS -Dhadoop.log.dir=$YARN_LOG_DIR\"\nYARN_OPTS=\"$YARN_OPTS -Dyarn.log.dir=$YARN_LOG_DIR\"\nYARN_OPTS=\"$YARN_OPTS -Dhadoop.log.file=$YARN_LOGFILE\"\nYARN_OPTS=\"$YARN_OPTS -Dyarn.log.file=$YARN_LOGFILE\"\nYARN_OPTS=\"$YARN_OPTS -Dyarn.home.dir=$YARN_COMMON_HOME\"\nYARN_OPTS=\"$YARN_OPTS -Dyarn.id.str=$YARN_IDENT_STRING\"\nYARN_OPTS=\"$YARN_OPTS -Dhadoop.root.logger=$
 {YARN_ROOT_LOGGER:-INFO,console}\"\nYARN_OPTS=\"$YARN_OPTS -Dyarn.root.logger=${YARN_ROOT_LOGGER:-INFO,console}\"\nif [ \"x$JAVA_LIBRARY_PATH\" != \"x\" ]; then\n  YARN_OPTS=\"$YARN_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH\"\nfi\nYARN_OPTS=\"$YARN_OPTS -Dyarn.policy.file=$YARN_POLICYFILE\"", 
-            "yarn_heapsize": "1024", 
-            "yarn_user": "yarn", 
-            "resourcemanager_heapsize": "1024", 
+            "yarn_pid_dir_prefix": "/var/run/hadoop-yarn",
+            "apptimelineserver_heapsize": "1024",
+            "nodemanager_heapsize": "1024",
+            "content": "\nexport HADOOP_YARN_HOME={{hadoop_yarn_home}}\nexport YARN_LOG_DIR={{yarn_log_dir_prefix}}/$USER\nexport YARN_PID_DIR={{yarn_pid_dir_prefix}}/$USER\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\nexport JAVA_HOME={{java64_home}}\n\n# User for YARN daemons\nexport HADOOP_YARN_USER=${HADOOP_YARN_USER:-yarn}\n\n# resolve links - $0 may be a softlink\nexport YARN_CONF_DIR=\"${YARN_CONF_DIR:-$HADOOP_YARN_HOME/conf}\"\n\n# some Java parameters\n# export JAVA_HOME=/home/y/libexec/jdk1.6.0/\nif [ \"$JAVA_HOME\" != \"\" ]; then\n  #echo \"run java in $JAVA_HOME\"\n  JAVA_HOME=$JAVA_HOME\nfi\n\nif [ \"$JAVA_HOME\" = \"\" ]; then\n  echo \"Error: JAVA_HOME is not set.\"\n  exit 1\nfi\n\nJAVA=$JAVA_HOME/bin/java\nJAVA_HEAP_MAX=-Xmx1000m\n\n# For setting YARN specific HEAP sizes please use this\n# Parameter and set appropriately\nYARN_HEAPSIZE={{yarn_heapsize}}\n\n# check envvars which might override default args\nif [ \"$YARN_HEAPSIZE\" != \"\" ]; then\n  JAVA_HEAP_M
 AX=\"-Xmx\"\"$YARN_HEAPSIZE\"\"m\"\nfi\n\n# Resource Manager specific parameters\n\n# Specify the max Heapsize for the ResourceManager using a numerical value\n# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set\n# the value to 1000.\n# This value will be overridden by an Xmx setting specified in either YARN_OPTS\n# and/or YARN_RESOURCEMANAGER_OPTS.\n# If not specified, the default value will be picked from either YARN_HEAPMAX\n# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.\nexport YARN_RESOURCEMANAGER_HEAPSIZE={{resourcemanager_heapsize}}\n\n# Specify the JVM options to be used when starting the ResourceManager.\n# These options will be appended to the options specified as YARN_OPTS\n# and therefore may override any similar flags set in YARN_OPTS\n#export YARN_RESOURCEMANAGER_OPTS=\n\n# Node Manager specific parameters\n\n# Specify the max Heapsize for the NodeManager using a numerical value\n# in the scale of MB. For example, to 
 specify an jvm option of -Xmx1000m, set\n# the value to 1000.\n# This value will be overridden by an Xmx setting specified in either YARN_OPTS\n# and/or YARN_NODEMANAGER_OPTS.\n# If not specified, the default value will be picked from either YARN_HEAPMAX\n# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.\nexport YARN_NODEMANAGER_HEAPSIZE={{nodemanager_heapsize}}\n\n# Specify the max Heapsize for the HistoryManager using a numerical value\n# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set\n# the value to 1024.\n# This value will be overridden by an Xmx setting specified in either YARN_OPTS\n# and/or YARN_HISTORYSERVER_OPTS.\n# If not specified, the default value will be picked from either YARN_HEAPMAX\n# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.\nexport YARN_HISTORYSERVER_HEAPSIZE={{apptimelineserver_heapsize}}\n\n# Specify the JVM options to be used when starting the NodeManager.\n# These options will be
  appended to the options specified as YARN_OPTS\n# and therefore may override any similar flags set in YARN_OPTS\n#export YARN_NODEMANAGER_OPTS=\n\n# so that filenames w/ spaces are handled correctly in loops below\nIFS=\n\n\n# default log directory and file\nif [ \"$YARN_LOG_DIR\" = \"\" ]; then\n  YARN_LOG_DIR=\"$HADOOP_YARN_HOME/logs\"\nfi\nif [ \"$YARN_LOGFILE\" = \"\" ]; then\n  YARN_LOGFILE='yarn.log'\nfi\n\n# default policy file for service-level authorization\nif [ \"$YARN_POLICYFILE\" = \"\" ]; then\n  YARN_POLICYFILE=\"hadoop-policy.xml\"\nfi\n\n# restore ordinary behaviour\nunset IFS\n\n\nYARN_OPTS=\"$YARN_OPTS -Dhadoop.log.dir=$YARN_LOG_DIR\"\nYARN_OPTS=\"$YARN_OPTS -Dyarn.log.dir=$YARN_LOG_DIR\"\nYARN_OPTS=\"$YARN_OPTS -Dhadoop.log.file=$YARN_LOGFILE\"\nYARN_OPTS=\"$YARN_OPTS -Dyarn.log.file=$YARN_LOGFILE\"\nYARN_OPTS=\"$YARN_OPTS -Dyarn.home.dir=$YARN_COMMON_HOME\"\nYARN_OPTS=\"$YARN_OPTS -Dyarn.id.str=$YARN_IDENT_STRING\"\nYARN_OPTS=\"$YARN_OPTS -Dhadoop.root.logger=$
 {YARN_ROOT_LOGGER:-INFO,console}\"\nYARN_OPTS=\"$YARN_OPTS -Dyarn.root.logger=${YARN_ROOT_LOGGER:-INFO,console}\"\nif [ \"x$JAVA_LIBRARY_PATH\" != \"x\" ]; then\n  YARN_OPTS=\"$YARN_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH\"\nfi\nYARN_OPTS=\"$YARN_OPTS -Dyarn.policy.file=$YARN_POLICYFILE\"",
+            "yarn_heapsize": "1024",
+            "yarn_user": "yarn",
+            "resourcemanager_heapsize": "1024",
             "yarn_log_dir_prefix": "/var/log/hadoop-yarn",
             "min_user_id": "1000",
             "is_supported_yarn_ranger": "false"
         },
         "hadoop-env": {
-            "namenode_opt_maxnewsize": "200m", 
+            "namenode_opt_maxnewsize": "200m",
             "hdfs_log_dir_prefix": "/var/log/hadoop",
-            "namenode_heapsize": "1024m", 
+            "namenode_heapsize": "1024m",
             "namenode_opt_newsize": "200m",
             "namenode_opt_permsize" : "128m",
             "namenode_opt_maxpermsize" : "256m",
-            "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME.  All others are\n# optional.  When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use.  Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop}\n\n# Hadoop Configuration Directory\n#TODO: if env var set that can cause problems\nexport HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-{{hadoop_conf_dir}}}\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options.  Empty by defaul
 t.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appended to HADOOP_OPTS when specified\nexport HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}\"\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}
 }/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\nHADOOP_DATANODE_OPTS=\"-Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_DATANODE_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/
 $USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS\"\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER={{hdfs_user}}\n\n# Extra ssh options.  Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HAD
 OOP_SECURE_DN_USER\n\n# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from.  Unset by default.\n# export HADOOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands.  Unset by default.  This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n#
  The scheduling priority for daemon processes.  See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Use libraries from standard classpath\nJAVA_JDBC_LIBS=\"\"\n#Add libraries required by mysql connector\nfor jarFile in `ls /usr/share/java/*mysql* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n#Add libraries required by oracle connector\nfor jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n#Add libraries required by nodemanager\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}\n\nif [ -d \"/usr/lib/tez\" ]; then\n  export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:/usr/lib/tez/*:/usr/lib/tez/lib/*:/etc/tez/conf\nfi\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n#Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/lib/hadoop/lib/native/Linux-amd64-64", 
+            "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME.  All others are\n# optional.  When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use.  Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop}\n\n# Hadoop Configuration Directory\n#TODO: if env var set that can cause problems\nexport HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-{{hadoop_conf_dir}}}\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options.  Empty by defaul
 t.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appended to HADOOP_OPTS when specified\nexport HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}\"\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}
 }/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\nHADOOP_DATANODE_OPTS=\"-Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_DATANODE_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/
 $USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS\"\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER={{hdfs_user}}\n\n# Extra ssh options.  Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HAD
 OOP_SECURE_DN_USER\n\n# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from.  Unset by default.\n# export HADOOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands.  Unset by default.  This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n#
  The scheduling priority for daemon processes.  See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Use libraries from standard classpath\nJAVA_JDBC_LIBS=\"\"\n#Add libraries required by mysql connector\nfor jarFile in `ls /usr/share/java/*mysql* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n#Add libraries required by oracle connector\nfor jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n#Add libraries required by nodemanager\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}\n\nif [ -d \"/usr/lib/tez\" ]; then\n  export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:/usr/lib/tez/*:/usr/lib/tez/lib/*:/etc/tez/conf\nfi\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n#Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/lib/hadoop/lib/native/Linux-amd64-64",
             "hdfs_user": "hdfs",
-            "dtnode_heapsize": "1024m", 
+            "dtnode_heapsize": "1024m",
             "proxyuser_group": "users",
-            "hadoop_heapsize": "1024", 
+            "hadoop_heapsize": "1024",
             "hadoop_pid_dir_prefix": "/var/run/hadoop"
         },
         "hive-env": {
-            "hcat_pid_dir": "/var/run/webhcat", 
-            "hcat_user": "hcat", 
-            "hive_ambari_database": "MySQL", 
-            "hive_hostname": "abtest-3.c.pramod-thangali.internal", 
-            "hive_metastore_port": "9083", 
-            "webhcat_user": "hcat", 
-            "content": "\n if [ \"$SERVICE\" = \"cli\" ]; then\n   if [ -z \"$DEBUG\" ]; then\n     export HADOOP_OPTS=\"$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseParNewGC -XX:-UseGCOverheadLimit\"\n   else\n     export HADOOP_OPTS=\"$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit\"\n   fi\n fi\n\n# The heap size of the jvm stared by hive shell script can be controlled via:\n\nexport HADOOP_HEAPSIZE=\"{{hive_heapsize}}\"\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS\"\n\n# Larger heap size may be required when running queries over large number of files or partitions.\n# By default hive shell scripts use a heap size of 256 (MB).  Larger heap size would also be\n# appropriate for hive server (hwi etc).\n\n\n# Set HADOOP_HOME to point to a specific hadoop install directory\nHADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hive Configuration Director
 y can be controlled by:\nexport HIVE_CONF_DIR={{conf_dir}}\n\n# Folder containing extra ibraries required for hive compilation/execution can be controlled by:\nif [ \"${HIVE_AUX_JARS_PATH}\" != \"\" ]; then\n  export HIVE_AUX_JARS_PATH=${HIVE_AUX_JARS_PATH}\nelif [ -d \"/usr/lib/hive-hcatalog/\" ]; then\n  export HIVE_AUX_JARS_PATH=/usr/lib/hive-hcatalog/share/hcatalog/hive-hcatalog-core-*.jar\nelse\n  export HIVE_AUX_JARS_PATH=/usr/lib/hcatalog/share/hcatalog/hcatalog-core.jar\nfi\nexport METASTORE_PORT={{hive_metastore_port}}", 
-            "hive_database_name": "hive", 
-            "hive_database_type": "mysql", 
-            "hive_pid_dir": "/var/run/hive", 
-            "hive_log_dir": "/var/log/hive", 
-            "hive_user": "hive", 
-            "hcat_log_dir": "/var/log/webhcat", 
+            "hcat_pid_dir": "/var/run/webhcat",
+            "hcat_user": "hcat",
+            "hive_ambari_database": "MySQL",
+            "hive_hostname": "abtest-3.c.pramod-thangali.internal",
+            "hive_metastore_port": "9083",
+            "webhcat_user": "hcat",
+            "content": "\n if [ \"$SERVICE\" = \"cli\" ]; then\n   if [ -z \"$DEBUG\" ]; then\n     export HADOOP_OPTS=\"$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseParNewGC -XX:-UseGCOverheadLimit\"\n   else\n     export HADOOP_OPTS=\"$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit\"\n   fi\n fi\n\n# The heap size of the jvm stared by hive shell script can be controlled via:\n\nexport HADOOP_HEAPSIZE=\"{{hive_heapsize}}\"\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS\"\n\n# Larger heap size may be required when running queries over large number of files or partitions.\n# By default hive shell scripts use a heap size of 256 (MB).  Larger heap size would also be\n# appropriate for hive server (hwi etc).\n\n\n# Set HADOOP_HOME to point to a specific hadoop install directory\nHADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hive Configuration Director
 y can be controlled by:\nexport HIVE_CONF_DIR={{conf_dir}}\n\n# Folder containing extra ibraries required for hive compilation/execution can be controlled by:\nif [ \"${HIVE_AUX_JARS_PATH}\" != \"\" ]; then\n  export HIVE_AUX_JARS_PATH=${HIVE_AUX_JARS_PATH}\nelif [ -d \"/usr/lib/hive-hcatalog/\" ]; then\n  export HIVE_AUX_JARS_PATH=/usr/lib/hive-hcatalog/share/hcatalog/hive-hcatalog-core-*.jar\nelse\n  export HIVE_AUX_JARS_PATH=/usr/lib/hcatalog/share/hcatalog/hcatalog-core.jar\nfi\nexport METASTORE_PORT={{hive_metastore_port}}",
+            "hive_database_name": "hive",
+            "hive_database_type": "mysql",
+            "hive_pid_dir": "/var/run/hive",
+            "hive_log_dir": "/var/log/hive",
+            "hive_user": "hive",
+            "hcat_log_dir": "/var/log/webhcat",
             "hive_database": "New MySQL Database"
         },
         "ranger-env": {
@@ -545,10 +546,10 @@
           "fetch_nonlocal_groups": "true"
       },
       "hbase-env": {
-            "hbase_pid_dir": "/var/run/hbase", 
-            "hbase_user": "hbase", 
-            "hbase_master_heapsize": "1024m", 
-            "content": "\n# Set environment variables here.\n\n# The java implementation to use. Java 1.6 required.\nexport JAVA_HOME={{java64_home}}\n\n# HBase Configuration directory\nexport HBASE_CONF_DIR=${HBASE_CONF_DIR:-{{hbase_conf_dir}}}\n\n# Extra Java CLASSPATH elements. Optional.\nexport HBASE_CLASSPATH=${HBASE_CLASSPATH}\n\n# The maximum amount of heap to use, in MB. Default is 1000.\n# export HBASE_HEAPSIZE=1000\n\n# Extra Java runtime options.\n# Below are what we set by default. May only work with SUN JVM.\n# For more on why as well as other possible settings,\n# see http://wiki.apache.org/hadoop/PerformanceTuning\nexport HBASE_OPTS=\"-XX:+UseConcMarkSweepGC -XX:ErrorFile={{log_dir}}/hs_err_pid%p.log\"\nexport SERVER_GC_OPTS=\"-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:{{log_dir}}/gc.log-`date +'%Y%m%d%H%M'`\"\n# Uncomment below to enable java garbage collection logging.\n# export HBASE_OPTS=\"$HBASE_OPTS -verbose:gc -XX:+PrintGCDetails -XX:+PrintG
 CDateStamps -Xloggc:$HBASE_HOME/logs/gc-hbase.log\"\n\n# Uncomment and adjust to enable JMX exporting\n# See jmxremote.password and jmxremote.access in $JRE_HOME/lib/management to configure remote password access.\n# More details at: http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html\n#\n# export HBASE_JMX_BASE=\"-Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false\"\nexport HBASE_MASTER_OPTS=\"-Xmx{{master_heapsize}}\"\nexport HBASE_REGIONSERVER_OPTS=\"-Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70  -Xms

<TRUNCATED>

[3/4] ambari git commit: AMBARI-15050 Https Support for Metrics System (dsen)

Posted by ds...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-server/src/main/resources/common-services/KAFKA/0.8.1.2.2/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KAFKA/0.8.1.2.2/metainfo.xml b/ambari-server/src/main/resources/common-services/KAFKA/0.8.1.2.2/metainfo.xml
index 66286b3..1970113 100644
--- a/ambari-server/src/main/resources/common-services/KAFKA/0.8.1.2.2/metainfo.xml
+++ b/ambari-server/src/main/resources/common-services/KAFKA/0.8.1.2.2/metainfo.xml
@@ -64,6 +64,7 @@
         <config-type>ranger-kafka-security</config-type>
         <config-type>zookeeper-env</config-type>
         <config-type>zoo.cfg</config-type>
+        <config-type>ams-ssl-client</config-type>
       </configuration-dependencies>
       <restartRequiredAfterChange>true</restartRequiredAfterChange>
     </service>

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-server/src/main/resources/common-services/KAFKA/0.8.1.2.2/package/scripts/kafka.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KAFKA/0.8.1.2.2/package/scripts/kafka.py b/ambari-server/src/main/resources/common-services/KAFKA/0.8.1.2.2/package/scripts/kafka.py
index 1c01174..830d0ce 100644
--- a/ambari-server/src/main/resources/common-services/KAFKA/0.8.1.2.2/package/scripts/kafka.py
+++ b/ambari-server/src/main/resources/common-services/KAFKA/0.8.1.2.2/package/scripts/kafka.py
@@ -74,6 +74,10 @@ def kafka(upgrade_type=None):
     if params.has_metric_collector:
       kafka_server_config['kafka.timeline.metrics.host'] = params.metric_collector_host
       kafka_server_config['kafka.timeline.metrics.port'] = params.metric_collector_port
+      kafka_server_config['kafka.timeline.metrics.protocol'] = params.metric_collector_protocol
+      kafka_server_config['kafka.timeline.metrics.truststore.path'] = params.metric_truststore_path
+      kafka_server_config['kafka.timeline.metrics.truststore.type'] = params.metric_truststore_type
+      kafka_server_config['kafka.timeline.metrics.truststore.password'] = params.metric_truststore_password
 
     kafka_data_dir = kafka_server_config['log.dirs']
     kafka_data_dirs = filter(None, kafka_data_dir.split(","))

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-server/src/main/resources/common-services/KAFKA/0.8.1.2.2/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KAFKA/0.8.1.2.2/package/scripts/params.py b/ambari-server/src/main/resources/common-services/KAFKA/0.8.1.2.2/package/scripts/params.py
index da76952..47af240 100644
--- a/ambari-server/src/main/resources/common-services/KAFKA/0.8.1.2.2/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/KAFKA/0.8.1.2.2/package/scripts/params.py
@@ -106,6 +106,10 @@ else:
 
 metric_collector_host = ""
 metric_collector_port = ""
+metric_collector_protocol = ""
+metric_truststore_path= default("/configurations/ams-ssl-client/ssl.client.truststore.location", "")
+metric_truststore_type= default("/configurations/ams-ssl-client/ssl.client.truststore.type", "")
+metric_truststore_password= default("/configurations/ams-ssl-client/ssl.client.truststore.password", "")
 
 ams_collector_hosts = default("/clusterHostInfo/metrics_collector_hosts", [])
 has_metric_collector = not len(ams_collector_hosts) == 0
@@ -125,6 +129,10 @@ if has_metric_collector:
       metric_collector_port = metric_collector_web_address.split(':')[1]
     else:
       metric_collector_port = '6188'
+  if default("/configurations/ams-site/timeline.metrics.service.http.policy", "HTTP_ONLY") == "HTTPS_ONLY":
+    metric_collector_protocol = 'https'
+  else:
+    metric_collector_protocol = 'http'
   pass
 # Security-related params
 security_enabled = config['configurations']['cluster-env']['security_enabled']
@@ -274,4 +282,4 @@ HdfsResource = functools.partial(
   principal_name = hdfs_principal_name,
   hdfs_site = hdfs_site,
   default_fs = default_fs
-)
\ No newline at end of file
+)

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/metainfo.xml b/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/metainfo.xml
index 1468a2f..804374a 100644
--- a/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/metainfo.xml
+++ b/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/metainfo.xml
@@ -134,6 +134,7 @@
         <config-type>ranger-admin-site</config-type>
         <config-type>zookeeper-env</config-type>
         <config-type>zoo.cfg</config-type>
+        <config-type>ams-ssl-client</config-type>
       </configuration-dependencies>
       <quickLinksConfigurations>
         <quickLinksConfiguration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/params_linux.py
index 38d1951..25da2a1 100644
--- a/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/params_linux.py
@@ -165,7 +165,14 @@ if has_metric_collector:
 
   metric_collector_report_interval = 60
   metric_collector_app_id = "nimbus"
-
+  if default("/configurations/ams-site/timeline.metrics.service.http.policy", "HTTP_ONLY") == "HTTPS_ONLY":
+    metric_collector_protocol = 'https'
+  else:
+    metric_collector_protocol = 'http'
+  metric_truststore_path= default("/configurations/ams-ssl-client/ssl.client.truststore.location", "")
+  metric_truststore_type= default("/configurations/ams-ssl-client/ssl.client.truststore.type", "")
+  metric_truststore_password= default("/configurations/ams-ssl-client/ssl.client.truststore.password", "")
+  pass
 metrics_report_interval = default("/configurations/ams-site/timeline.metrics.sink.report.interval", 60)
 metrics_collection_period = default("/configurations/ams-site/timeline.metrics.sink.collection.period", 10)
 metric_collector_sink_jar = "/usr/lib/storm/lib/ambari-metrics-storm-sink*.jar"
@@ -296,4 +303,4 @@ HdfsResource = functools.partial(
   principal_name = hdfs_principal_name,
   hdfs_site = hdfs_site,
   default_fs = default_fs
-)
\ No newline at end of file
+)

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/templates/config.yaml.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/templates/config.yaml.j2 b/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/templates/config.yaml.j2
index 01c61bf..2c09bc3 100644
--- a/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/templates/config.yaml.j2
+++ b/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/templates/config.yaml.j2
@@ -57,8 +57,12 @@ enableMetricsSink: True
 metrics_collector:
 
   reportInterval: {{metric_collector_report_interval}}
-  host: "{{metric_collector_host}}"
-  port: {{metric_collector_port}}
+  collector: "{{metric_collector_protocol}}://{{metric_collector_host}}:{{metric_collector_port}}"
   appId: "{{metric_collector_app_id}}"
 
+  # HTTPS settings
+  truststore.path : "{{metric_truststore_path}}"
+  truststore.type : "{{metric_truststore_type}}"
+  truststore.password : "{{metric_truststore_password}}"
+
 {% endif %}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/templates/storm-metrics2.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/templates/storm-metrics2.properties.j2 b/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/templates/storm-metrics2.properties.j2
index a5ee7a9..9acf173 100644
--- a/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/templates/storm-metrics2.properties.j2
+++ b/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/templates/storm-metrics2.properties.j2
@@ -16,7 +16,11 @@
 # limitations under the License.
 #}
 
-collector={{metric_collector_host}}
-port={{metric_collector_port}}
+collector={{metric_collector_protocol}}://{{metric_collector_host}}:{{metric_collector_port}}
 maxRowCacheSize=10000
-sendInterval={{metrics_report_interval}}000
\ No newline at end of file
+sendInterval={{metrics_report_interval}}000
+
+# HTTPS properties
+truststore.path = {{metric_truststore_path}}
+truststore.type = {{metric_truststore_type}}
+truststore.password = {{metric_truststore_password}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/metainfo.xml b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/metainfo.xml
index 969c2a7..41e19a3 100644
--- a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/metainfo.xml
+++ b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/metainfo.xml
@@ -150,6 +150,7 @@
         <config-type>core-site</config-type>
         <config-type>mapred-site</config-type>
         <config-type>yarn-log4j</config-type>
+        <config-type>ams-ssl-client</config-type>
       </configuration-dependencies>
       <widgetsFileName>YARN_widgets.json</widgetsFileName>
       <metricsFileName>YARN_metrics.json</metricsFileName>
@@ -259,6 +260,7 @@
         <config-type>ranger-yarn-audit</config-type>
         <config-type>ranger-yarn-policymgr-ssl</config-type>
         <config-type>ranger-yarn-security</config-type>
+        <config-type>ams-ssl-client</config-type>
       </configuration-dependencies>
       <restartRequiredAfterRackChange>true</restartRequiredAfterRackChange>
       <widgetsFileName>MAPREDUCE2_widgets.json</widgetsFileName>

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py
index b588f86..2a9d7c5 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py
@@ -118,6 +118,14 @@ if has_metric_collector:
       metric_collector_port = metric_collector_web_address.split(':')[1]
     else:
       metric_collector_port = '6188'
+  if default("/configurations/ams-site/timeline.metrics.service.http.policy", "HTTP_ONLY") == "HTTPS_ONLY":
+    metric_collector_protocol = 'https'
+  else:
+    metric_collector_protocol = 'http'
+  metric_truststore_path= default("/configurations/ams-ssl-client/ssl.client.truststore.location", "")
+  metric_truststore_type= default("/configurations/ams-ssl-client/ssl.client.truststore.type", "")
+  metric_truststore_password= default("/configurations/ams-ssl-client/ssl.client.truststore.password", "")
+
   pass
 metrics_report_interval = default("/configurations/ams-site/timeline.metrics.sink.report.interval", 60)
 metrics_collection_period = default("/configurations/ams-site/timeline.metrics.sink.collection.period", 10)

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/hadoop-metrics2.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/hadoop-metrics2.properties.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/hadoop-metrics2.properties.j2
index 8e729a4..f9c2164 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/hadoop-metrics2.properties.j2
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/hadoop-metrics2.properties.j2
@@ -73,16 +73,21 @@ resourcemanager.sink.ganglia.tagsForPrefix.yarn=Queue
 *.sink.timeline.sendInterval={{metrics_report_interval}}000
 *.sink.timeline.slave.host.name = {{hostname}}
 
-datanode.sink.timeline.collector={{metric_collector_host}}:{{metric_collector_port}}
-namenode.sink.timeline.collector={{metric_collector_host}}:{{metric_collector_port}}
-resourcemanager.sink.timeline.collector={{metric_collector_host}}:{{metric_collector_port}}
-nodemanager.sink.timeline.collector={{metric_collector_host}}:{{metric_collector_port}}
-historyserver.sink.timeline.collector={{metric_collector_host}}:{{metric_collector_port}}
-journalnode.sink.timeline.collector={{metric_collector_host}}:{{metric_collector_port}}
-nimbus.sink.timeline.collector={{metric_collector_host}}:{{metric_collector_port}}
-supervisor.sink.timeline.collector={{metric_collector_host}}:{{metric_collector_port}}
-maptask.sink.timeline.collector={{metric_collector_host}}:{{metric_collector_port}}
-reducetask.sink.timeline.collector={{metric_collector_host}}:{{metric_collector_port}}
+# HTTPS properties
+*.sink.timeline.truststore.path = {{metric_truststore_path}}
+*.sink.timeline.truststore.type = {{metric_truststore_type}}
+*.sink.timeline.truststore.password = {{metric_truststore_password}}
+
+datanode.sink.timeline.collector={{metric_collector_protocol}}://{{metric_collector_host}}:{{metric_collector_port}}
+namenode.sink.timeline.collector={{metric_collector_protocol}}://{{metric_collector_host}}:{{metric_collector_port}}
+resourcemanager.sink.timeline.collector={{metric_collector_protocol}}://{{metric_collector_host}}:{{metric_collector_port}}
+nodemanager.sink.timeline.collector={{metric_collector_protocol}}://{{metric_collector_host}}:{{metric_collector_port}}
+historyserver.sink.timeline.collector={{metric_collector_protocol}}://{{metric_collector_host}}:{{metric_collector_port}}
+journalnode.sink.timeline.collector={{metric_collector_protocol}}://{{metric_collector_host}}:{{metric_collector_port}}
+nimbus.sink.timeline.collector={{metric_collector_protocol}}://{{metric_collector_host}}:{{metric_collector_port}}
+supervisor.sink.timeline.collector={{metric_collector_protocol}}://{{metric_collector_host}}:{{metric_collector_port}}
+maptask.sink.timeline.collector={{metric_collector_protocol}}://{{metric_collector_host}}:{{metric_collector_port}}
+reducetask.sink.timeline.collector={{metric_collector_protocol}}://{{metric_collector_host}}:{{metric_collector_port}}
 
 resourcemanager.sink.timeline.tagsForPrefix.yarn=Queue
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-server/src/test/java/org/apache/ambari/server/configuration/ComponentSSLConfigurationTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/configuration/ComponentSSLConfigurationTest.java b/ambari-server/src/test/java/org/apache/ambari/server/configuration/ComponentSSLConfigurationTest.java
index 58bc47e..92a7ef5 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/configuration/ComponentSSLConfigurationTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/configuration/ComponentSSLConfigurationTest.java
@@ -29,12 +29,12 @@ import org.junit.Test;
 public class ComponentSSLConfigurationTest {
 
   public static ComponentSSLConfiguration getConfiguration(String path,
-      String pass, String type, boolean gangliaSSL) {
+      String pass, String type, boolean isSslEnabled) {
     Properties ambariProperties = new Properties();
     ambariProperties.setProperty(Configuration.SSL_TRUSTSTORE_PATH_KEY, path);
     ambariProperties.setProperty(Configuration.SSL_TRUSTSTORE_PASSWORD_KEY, pass);
     ambariProperties.setProperty(Configuration.SSL_TRUSTSTORE_TYPE_KEY, type);
-    ambariProperties.setProperty(Configuration.GANGLIA_HTTPS_KEY, Boolean.toString(gangliaSSL));
+    ambariProperties.setProperty(Configuration.AMRABI_METRICS_HTTPS_ENABLED_KEY, Boolean.toString(isSslEnabled));
 
     Configuration configuration =  new TestConfiguration(ambariProperties);
 
@@ -70,7 +70,7 @@ public class ComponentSSLConfigurationTest {
   public void testIsGangliaSSL() throws Exception {
     ComponentSSLConfiguration sslConfiguration = getConfiguration("tspath",
         "tspass", "tstype", true);
-    Assert.assertTrue(sslConfiguration.isGangliaSSL());
+    Assert.assertTrue(sslConfiguration.isHttpsEnabled());
   }
 
   private static class TestConfiguration extends Configuration {

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-server/src/test/java/org/apache/ambari/server/controller/metrics/ganglia/GangliaPropertyProviderTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/metrics/ganglia/GangliaPropertyProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/metrics/ganglia/GangliaPropertyProviderTest.java
index b513ba5..8611e68 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/metrics/ganglia/GangliaPropertyProviderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/metrics/ganglia/GangliaPropertyProviderTest.java
@@ -220,7 +220,7 @@ public class GangliaPropertyProviderTest {
     Assert.assertEquals(1, propertyProvider.populateResources(Collections.singleton(resource), request, null).size());
 
 
-    String expected = (configuration.isGangliaSSL() ? "https" : "http") +
+    String expected = (configuration.isHttpsEnabled() ? "https" : "http") +
         "://domU-12-31-39-0E-34-E1.compute-1.internal/cgi-bin/rrd.py?c=HDPDataNode%2CHDPSlaves&h=domU-12-31-39-0E-34-E1.compute-1.internal&m=jvm.metrics.gcCount&s=10&e=20&r=1";
     Assert.assertEquals(expected, streamProvider.getLastSpec());
 
@@ -269,7 +269,7 @@ public class GangliaPropertyProviderTest {
     
     URIBuilder expectedUri = new URIBuilder();
 
-    expectedUri.setScheme((configuration.isGangliaSSL() ? "https" : "http"));
+    expectedUri.setScheme((configuration.isHttpsEnabled() ? "https" : "http"));
     expectedUri.setHost("domU-12-31-39-0E-34-E1.compute-1.internal");
     expectedUri.setPath("/cgi-bin/rrd.py");
     expectedUri.setParameter("c", "HDPTaskTracker,HDPSlaves");
@@ -418,7 +418,7 @@ public class GangliaPropertyProviderTest {
     
     URIBuilder uriBuilder = new URIBuilder();
 
-    uriBuilder.setScheme((configuration.isGangliaSSL() ? "https" : "http"));
+    uriBuilder.setScheme((configuration.isHttpsEnabled() ? "https" : "http"));
     uriBuilder.setHost("domU-12-31-39-0E-34-E1.compute-1.internal");
     uriBuilder.setPath("/cgi-bin/rrd.py");
     uriBuilder.setParameter("c", "HDPJobTracker,HDPHBaseMaster,HDPResourceManager,HDPFlumeServer,HDPSlaves,HDPHistoryServer,HDPJournalNode,HDPTaskTracker,HDPHBaseRegionServer,HDPNameNode");
@@ -435,7 +435,7 @@ public class GangliaPropertyProviderTest {
         "HDPSlaves", "HDPHistoryServer", "HDPJournalNode", "HDPTaskTracker", "HDPHBaseRegionServer", "HDPNameNode"});
     List<String> hosts = Arrays.asList(new String[]{"domU-12-31-39-0E-34-E3.compute-1.internal", "domU-12-31-39-0E-34-E1.compute-1.internal",
         "domU-12-31-39-0E-34-E2.compute-1.internal"});
-    int httpsVariation = configuration.isGangliaSSL() ? 1 : 0;
+    int httpsVariation = configuration.isHttpsEnabled() ? 1 : 0;
 
     Assert.assertEquals(expected.substring(0, 66 + httpsVariation), streamProvider.getLastSpec().substring(0, 66 + httpsVariation));
     Assert.assertTrue(CollectionPresentationUtils.isStringPermutationOfCollection(streamProvider.getLastSpec().substring(66 + httpsVariation, 236 + httpsVariation), components, "%2C", 0, 0));
@@ -487,7 +487,7 @@ public class GangliaPropertyProviderTest {
     
     URIBuilder expectedUri = new URIBuilder();
     
-    expectedUri.setScheme((configuration.isGangliaSSL() ? "https" : "http"));
+    expectedUri.setScheme((configuration.isHttpsEnabled() ? "https" : "http"));
     expectedUri.setHost("domU-12-31-39-0E-34-E1.compute-1.internal");
     expectedUri.setPath("/cgi-bin/rrd.py");
     expectedUri.setParameter("c", "HDPJobTracker,HDPHBaseMaster,HDPResourceManager,HDPFlumeServer,HDPSlaves,HDPHistoryServer,HDPJournalNode,HDPTaskTracker,HDPHBaseRegionServer,HDPNameNode");
@@ -543,7 +543,7 @@ public class GangliaPropertyProviderTest {
     
     URIBuilder expectedUri = new URIBuilder();
     
-    expectedUri.setScheme((configuration.isGangliaSSL() ? "https" : "http"));
+    expectedUri.setScheme((configuration.isHttpsEnabled() ? "https" : "http"));
     expectedUri.setHost("domU-12-31-39-0E-34-E1.compute-1.internal");
     expectedUri.setPath("/cgi-bin/rrd.py");
     expectedUri.setParameter("c", "HDPFlumeServer,HDPSlaves");
@@ -606,7 +606,7 @@ public class GangliaPropertyProviderTest {
     
     URIBuilder expectedUri = new URIBuilder();
 
-    expectedUri.setScheme((configuration.isGangliaSSL() ? "https" : "http"));
+    expectedUri.setScheme((configuration.isHttpsEnabled() ? "https" : "http"));
     expectedUri.setHost("domU-12-31-39-0E-34-E1.compute-1.internal");
     expectedUri.setPath("/cgi-bin/rrd.py");
     expectedUri.setParameter("c", "HDPFlumeServer,HDPSlaves");
@@ -653,12 +653,12 @@ public class GangliaPropertyProviderTest {
 
     Assert.assertEquals(1, propertyProvider.populateResources(Collections.singleton(resource), request, null).size());
 
-    String expected = (configuration.isGangliaSSL() ? "https" : "http") +
+    String expected = (configuration.isHttpsEnabled() ? "https" : "http") +
         "://domU-12-31-39-0E-34-E1.compute-1.internal/cgi-bin/rrd.py?c=HDPFlumeServer%2CHDPSlaves&h=ip-10-39-113-33.ec2.internal&m=";
 
     // Depends on hashing, string representation can be different
     List<String> components = Arrays.asList(new String[]{"HDPFlumeServer", "HDPSlaves"});
-    int httpsVariation = configuration.isGangliaSSL() ? 1 : 0;
+    int httpsVariation = configuration.isHttpsEnabled() ? 1 : 0;
 
     Assert.assertEquals(expected.substring(0, 66 + httpsVariation), streamProvider.getLastSpec().substring(0, 66 + httpsVariation));
     Assert.assertTrue(CollectionPresentationUtils.isStringPermutationOfCollection(streamProvider.getLastSpec().substring(66 + httpsVariation, 92 + httpsVariation), components, "%2C", 0, 0));
@@ -704,7 +704,7 @@ public class GangliaPropertyProviderTest {
     
     URIBuilder expectedUri = new URIBuilder();
 
-    expectedUri.setScheme((configuration.isGangliaSSL() ? "https" : "http"));
+    expectedUri.setScheme((configuration.isHttpsEnabled() ? "https" : "http"));
     expectedUri.setHost("domU-12-31-39-0E-34-E1.compute-1.internal");
     expectedUri.setPath("/cgi-bin/rrd.py");
     expectedUri.setParameter("c", "HDPFlumeServer,HDPSlaves");
@@ -762,7 +762,7 @@ public class GangliaPropertyProviderTest {
     
     URIBuilder expectedUri = new URIBuilder();
 
-    expectedUri.setScheme((configuration.isGangliaSSL() ? "https" : "http"));
+    expectedUri.setScheme((configuration.isHttpsEnabled() ? "https" : "http"));
     expectedUri.setHost("domU-12-31-39-0E-34-E1.compute-1.internal");
     expectedUri.setPath("/cgi-bin/rrd.py");
     expectedUri.setParameter("c", "HDPFlumeServer,HDPSlaves");
@@ -821,7 +821,7 @@ public class GangliaPropertyProviderTest {
     
     URIBuilder expectedUri = new URIBuilder();
 
-    expectedUri.setScheme((configuration.isGangliaSSL() ? "https" : "http"));
+    expectedUri.setScheme((configuration.isHttpsEnabled() ? "https" : "http"));
     expectedUri.setHost("domU-12-31-39-0E-34-E1.compute-1.internal");
     expectedUri.setPath("/cgi-bin/rrd.py");
     expectedUri.setParameter("c", "HDPFlumeServer,HDPSlaves");
@@ -880,7 +880,7 @@ public class GangliaPropertyProviderTest {
     
     URIBuilder expectedUri = new URIBuilder();
 
-    expectedUri.setScheme((configuration.isGangliaSSL() ? "https" : "http"));
+    expectedUri.setScheme((configuration.isHttpsEnabled() ? "https" : "http"));
     expectedUri.setHost("domU-12-31-39-0E-34-E1.compute-1.internal");
     expectedUri.setPath("/cgi-bin/rrd.py");
     expectedUri.setParameter("c", "HDPFlumeServer,HDPSlaves");

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-server/src/test/java/org/apache/ambari/server/controller/metrics/ganglia/GangliaReportPropertyProviderTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/metrics/ganglia/GangliaReportPropertyProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/metrics/ganglia/GangliaReportPropertyProviderTest.java
index 0f01b24..2ec9e4f 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/metrics/ganglia/GangliaReportPropertyProviderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/metrics/ganglia/GangliaReportPropertyProviderTest.java
@@ -95,7 +95,7 @@ public class GangliaReportPropertyProviderTest {
 
     Assert.assertEquals(1, propertyProvider.populateResources(Collections.singleton(resource), request, null).size());
 
-    String expected = (configuration.isGangliaSSL() ? "https" : "http") + "://domU-12-31-39-0E-34-E1.compute-1.internal/ganglia/graph.php?g=load_report&json=1";
+    String expected = (configuration.isHttpsEnabled() ? "https" : "http") + "://domU-12-31-39-0E-34-E1.compute-1.internal/ganglia/graph.php?g=load_report&json=1";
     Assert.assertEquals(expected, streamProvider.getLastSpec());
 
     Assert.assertEquals(2, PropertyHelper.getProperties(resource).size());

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-server/src/test/java/org/apache/ambari/server/controller/metrics/timeline/AMSPropertyProviderTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/metrics/timeline/AMSPropertyProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/metrics/timeline/AMSPropertyProviderTest.java
index 1394081..3adf9f7 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/metrics/timeline/AMSPropertyProviderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/metrics/timeline/AMSPropertyProviderTest.java
@@ -215,7 +215,7 @@ public class AMSPropertyProviderTest {
     Resource res = resources.iterator().next();
     Map<String, Object> properties = PropertyHelper.getProperties(resources.iterator().next());
     Assert.assertNotNull(properties);
-    URIBuilder uriBuilder = AMSPropertyProvider.getAMSUriBuilder("localhost", 8188);
+    URIBuilder uriBuilder = AMSPropertyProvider.getAMSUriBuilder("localhost", 6188, false);
     uriBuilder.addParameter("metricNames", "cpu_user");
     uriBuilder.addParameter("hostname", "h1");
     uriBuilder.addParameter("appId", "HOST");
@@ -264,7 +264,7 @@ public class AMSPropertyProviderTest {
     Resource res = resources.iterator().next();
     Map<String, Object> properties = PropertyHelper.getProperties(res);
     Assert.assertNotNull(properties);
-    URIBuilder uriBuilder = AMSPropertyProvider.getAMSUriBuilder("localhost", 8188);
+    URIBuilder uriBuilder = AMSPropertyProvider.getAMSUriBuilder("localhost", 6188, false);
     uriBuilder.addParameter("metricNames", "cpu_user");
     uriBuilder.addParameter("hostname", "h1");
     uriBuilder.addParameter("appId", "HOST");
@@ -309,12 +309,12 @@ public class AMSPropertyProviderTest {
     Resource res = resources.iterator().next();
     Map<String, Object> properties = PropertyHelper.getProperties(resources.iterator().next());
     Assert.assertNotNull(properties);
-    URIBuilder uriBuilder = AMSPropertyProvider.getAMSUriBuilder("localhost", 8188);
+    URIBuilder uriBuilder = AMSPropertyProvider.getAMSUriBuilder("localhost", 6188, false);
     uriBuilder.addParameter("metricNames", "cpu_user,mem_free");
     uriBuilder.addParameter("hostname", "h1");
     uriBuilder.addParameter("appId", "HOST");
 
-    URIBuilder uriBuilder2 = AMSPropertyProvider.getAMSUriBuilder("localhost", 8188);
+    URIBuilder uriBuilder2 = AMSPropertyProvider.getAMSUriBuilder("localhost", 6188, false);
     uriBuilder2.addParameter("metricNames", "mem_free,cpu_user");
     uriBuilder2.addParameter("hostname", "h1");
     uriBuilder2.addParameter("appId", "HOST");
@@ -367,14 +367,14 @@ public class AMSPropertyProviderTest {
     Resource res = resources.iterator().next();
     Map<String, Object> properties = PropertyHelper.getProperties(resources.iterator().next());
     Assert.assertNotNull(properties);
-    URIBuilder uriBuilder1 = AMSPropertyProvider.getAMSUriBuilder("localhost", 8188);
+    URIBuilder uriBuilder1 = AMSPropertyProvider.getAMSUriBuilder("localhost", 6188, false);
     uriBuilder1.addParameter("metricNames", "cpu_user,mem_free");
     uriBuilder1.addParameter("hostname", "h1");
     uriBuilder1.addParameter("appId", "HOST");
     uriBuilder1.addParameter("startTime", "1416445244701");
     uriBuilder1.addParameter("endTime", "1416448936564");
 
-    URIBuilder uriBuilder2 = AMSPropertyProvider.getAMSUriBuilder("localhost", 8188);
+    URIBuilder uriBuilder2 = AMSPropertyProvider.getAMSUriBuilder("localhost", 6188, false);
     uriBuilder2.addParameter("metricNames", "mem_free,cpu_user");
     uriBuilder2.addParameter("hostname", "h1");
     uriBuilder2.addParameter("appId", "HOST");
@@ -435,7 +435,7 @@ public class AMSPropertyProviderTest {
     Resource res = resources.iterator().next();
     Map<String, Object> properties = PropertyHelper.getProperties(resources.iterator().next());
     Assert.assertNotNull(properties);
-    URIBuilder uriBuilder = AMSPropertyProvider.getAMSUriBuilder("localhost", 8188);
+    URIBuilder uriBuilder = AMSPropertyProvider.getAMSUriBuilder("localhost", 6188, false);
     uriBuilder.addParameter("metricNames", "yarn.QueueMetrics.Queue=root.AvailableMB");
     uriBuilder.addParameter("appId", "RESOURCEMANAGER");
     uriBuilder.addParameter("startTime", "1416528759233");
@@ -484,7 +484,7 @@ public class AMSPropertyProviderTest {
     Resource res = resources.iterator().next();
     Map<String, Object> properties = PropertyHelper.getProperties(resources.iterator().next());
     Assert.assertNotNull(properties);
-    URIBuilder uriBuilder = AMSPropertyProvider.getAMSUriBuilder("localhost", 8188);
+    URIBuilder uriBuilder = AMSPropertyProvider.getAMSUriBuilder("localhost", 6188, false);
     uriBuilder.addParameter("metricNames", "rpc.rpc.RpcQueueTimeAvgTime");
     uriBuilder.addParameter("appId", "NAMENODE");
     uriBuilder.addParameter("startTime", "1416528759233");
@@ -558,7 +558,7 @@ public class AMSPropertyProviderTest {
     Resource res = resources.iterator().next();
     Map<String, Object> properties = PropertyHelper.getProperties(resources.iterator().next());
     Assert.assertNotNull(properties);
-    URIBuilder uriBuilder = AMSPropertyProvider.getAMSUriBuilder("localhost", 8188);
+    URIBuilder uriBuilder = AMSPropertyProvider.getAMSUriBuilder("localhost", 6188, false);
     uriBuilder.addParameter("metricNames", "regionserver.Server.totalRequestCount");
     uriBuilder.addParameter("appId", "AMS-HBASE");
     uriBuilder.addParameter("startTime", "1421694000");
@@ -631,7 +631,7 @@ public class AMSPropertyProviderTest {
     Resource res = resources.iterator().next();
     Map<String, Object> properties = PropertyHelper.getProperties(resources.iterator().next());
     Assert.assertNotNull(properties);
-    URIBuilder uriBuilder = AMSPropertyProvider.getAMSUriBuilder("localhost", 8188);
+    URIBuilder uriBuilder = AMSPropertyProvider.getAMSUriBuilder("localhost", 6188, false);
     uriBuilder.addParameter("metricNames", "rpc.rpc.NumOpenConnections._sum");
     uriBuilder.addParameter("appId", "HBASE");
     uriBuilder.addParameter("startTime", "1429824611300");
@@ -675,7 +675,7 @@ public class AMSPropertyProviderTest {
     Resource res = resources.iterator().next();
     Map<String, Object> properties = PropertyHelper.getProperties(resources.iterator().next());
     Assert.assertNotNull(properties);
-    URIBuilder uriBuilder = AMSPropertyProvider.getAMSUriBuilder("localhost", 8188);
+    URIBuilder uriBuilder = AMSPropertyProvider.getAMSUriBuilder("localhost", 6188, false);
     uriBuilder.addParameter("metricNames", "cpu_user");
     uriBuilder.addParameter("hostname", "h1");
     uriBuilder.addParameter("appId", "HOST");
@@ -771,7 +771,7 @@ public class AMSPropertyProviderTest {
     Assert.assertNotNull(hostMetricSpec);
     Assert.assertNotNull(hostComponentMetricsSpec);
     // Verify calls
-    URIBuilder uriBuilder1 = AMSPropertyProvider.getAMSUriBuilder("localhost", 8188);
+    URIBuilder uriBuilder1 = AMSPropertyProvider.getAMSUriBuilder("localhost", 6188, false);
     uriBuilder1.addParameter("metricNames", "dfs.datanode.BlocksReplicated");
     uriBuilder1.addParameter("hostname", "h1");
     uriBuilder1.addParameter("appId", "DATANODE");
@@ -779,7 +779,7 @@ public class AMSPropertyProviderTest {
     uriBuilder1.addParameter("endTime", "1416448936464");
     Assert.assertEquals(uriBuilder1.toString(), hostComponentMetricsSpec);
 
-    URIBuilder uriBuilder2 = AMSPropertyProvider.getAMSUriBuilder("localhost", 8188);
+    URIBuilder uriBuilder2 = AMSPropertyProvider.getAMSUriBuilder("localhost", 6188, false);
     uriBuilder2.addParameter("metricNames", "cpu_user");
     uriBuilder2.addParameter("hostname", "h1");
     uriBuilder2.addParameter("appId", "HOST");
@@ -904,7 +904,7 @@ public class AMSPropertyProviderTest {
     Set<String> specs = streamProvider.getAllSpecs();
     Assert.assertEquals(2, specs.size());
 
-    URIBuilder uriBuilder1 = AMSPropertyProvider.getAMSUriBuilder("localhost", 8188);
+    URIBuilder uriBuilder1 = AMSPropertyProvider.getAMSUriBuilder("localhost", 6188, false);
     Number[][] val;
 
     for (String spec : specs) {
@@ -958,7 +958,7 @@ public class AMSPropertyProviderTest {
 
     @Override
     public String getCollectorPort(String clusterName, MetricsService service) throws SystemException {
-      return "8188";
+      return "6188";
     }
 
     @Override

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-server/src/test/java/org/apache/ambari/server/controller/metrics/timeline/AMSReportPropertyProviderTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/metrics/timeline/AMSReportPropertyProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/metrics/timeline/AMSReportPropertyProviderTest.java
index 1a3afbc..b96d45a 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/metrics/timeline/AMSReportPropertyProviderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/metrics/timeline/AMSReportPropertyProviderTest.java
@@ -93,7 +93,7 @@ public class AMSReportPropertyProviderTest {
     Resource res = resources.iterator().next();
     Map<String, Object> properties = PropertyHelper.getProperties(resources.iterator().next());
     Assert.assertNotNull(properties);
-    URIBuilder uriBuilder = AMSPropertyProvider.getAMSUriBuilder("localhost", 8188);
+    URIBuilder uriBuilder = AMSPropertyProvider.getAMSUriBuilder("localhost", 6188, false);
     uriBuilder.addParameter("metricNames", "cpu_user");
     uriBuilder.addParameter("appId", "HOST");
     uriBuilder.addParameter("startTime", "1416445244800");
@@ -136,7 +136,7 @@ public class AMSReportPropertyProviderTest {
     Resource res = resources.iterator().next();
     Map<String, Object> properties = PropertyHelper.getProperties(resources.iterator().next());
     Assert.assertNotNull(properties);
-    URIBuilder uriBuilder = AMSPropertyProvider.getAMSUriBuilder("localhost", 8188);
+    URIBuilder uriBuilder = AMSPropertyProvider.getAMSUriBuilder("localhost", 6188, false);
     uriBuilder.addParameter("metricNames", "cpu_user._sum");
     uriBuilder.addParameter("appId", "HOST");
     uriBuilder.addParameter("startTime", "1432033257812");

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e75e52a/ambari-server/src/test/python/stacks/2.0.6/AMBARI_METRICS/test_metrics_collector.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/AMBARI_METRICS/test_metrics_collector.py b/ambari-server/src/test/python/stacks/2.0.6/AMBARI_METRICS/test_metrics_collector.py
index 1c83bb7..96e2286 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/AMBARI_METRICS/test_metrics_collector.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/AMBARI_METRICS/test_metrics_collector.py
@@ -131,6 +131,14 @@ class TestMetricsCollector(RMFTestCase):
                               configurations = self.getConfig()['configurations']['ams-site'],
                               configuration_attributes = self.getConfig()['configuration_attributes']['ams-hbase-site']
     )
+
+    self.assertResourceCalled('XmlConfig', 'ssl-server.xml',
+                              owner = 'ams',
+                              group = 'hadoop',
+                              conf_dir = '/etc/ambari-metrics-collector/conf',
+                              configurations = self.getConfig()['configurations']['ams-ssl-server'],
+                              configuration_attributes = self.getConfig()['configuration_attributes']['ams-ssl-server']
+    )
     merged_ams_hbase_site = {}
     merged_ams_hbase_site.update(self.getConfig()['configurations']['ams-hbase-site'])
     merged_ams_hbase_site['phoenix.query.maxGlobalMemoryPercentage'] = '25'