You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by am...@apache.org on 2018/01/10 11:00:01 UTC

[ambari] branch trunk updated (4936124 -> 859d050)

This is an automated email from the ASF dual-hosted git repository.

amagyar pushed a change to branch trunk
in repository https://gitbox.apache.org/repos/asf/ambari.git.


    from 4936124  Merge pull request #70 from vivekratnavel/AMBARI-22749-trunk
     new df9a638  AMBARI-22121. Create mpack for Isilon OneFS (amagyar)
     new f1758bc  AMBARI-22115. Alerts for OneFS mpack (amagyar)
     new 64bc95c  AMBARI-22236. Expression parser support for JMXServerSide alerts (amagyar)
     new b577153  AMBARI-22115. Alerts for OneFS mpack (amagyar)
     new 8fbaf8d  AMBARI-22319. Allow the same config type to belong to multiple services (amagyar)
     new 0b69fef  AMBARI-22339. Metrics for OneFS mpack (amagyar)
     new 3c90642  AMBARI-22424. Quicklinks for OneFS (amagyar)
     new e1c9e86  AMBARI-22449. Improved service/component dependency support (amagyar)
     new 145ebc0  AMBARI-22449. Improved service/component dependency support (amagyar)
     new cbfcbd6  AMBARI-22507. Ambari UI changes to support Isilon OneFS (amagyar)
     new f55b569  AMBARI-22634. Kerberos support for OneFS (amagyar)
     new 89f3bba  AMBARI-22674. ServiceAdvisor for OneFS (amagyar)
     new ebf3630  AMBARI-22449. Improved service/component dependency support (amagyar)
     new 97bd0d8  AMBARI-22121. Create mpack for Isilon OneFS (amagyar)
     new 859d050  AMBARI-22339. Metrics for OneFS mpack (amagyar)

The 15 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .../main/python/ambari_agent/alerts/ams_alert.py   |   4 +-
 .../main/python/ambari_agent/alerts/base_alert.py  |  47 +--
 .../python/ambari_agent/alerts/metric_alert.py     |   3 +-
 .../main/python/ambari_agent/alerts/port_alert.py  |   4 +-
 .../main/python/ambari_agent/alerts/web_alert.py   |   6 +-
 .../src/main/python/ambari_commons/inet_utils.py   |  46 +++
 ambari-server/docs/configuration/index.md          |   1 +
 .../ambari/server/alerts/JmxServerSideAlert.java   |  98 +++++
 .../org/apache/ambari/server/alerts/Threshold.java |  73 ++++
 .../ambari/server/configuration/Configuration.java |  11 +
 .../controller/AmbariManagementControllerImpl.java |   8 +-
 .../ambari/server/controller/ControllerModule.java |   1 +
 .../controller/StackServiceComponentResponse.java  |  10 +
 .../internal/OverriddenMetricsHostProvider.java    | 101 ++++++
 .../ambari/server/controller/internal/Stack.java   |  14 +
 .../internal/StackDefinedPropertyProvider.java     |   9 +-
 .../StackServiceComponentResourceProvider.java     |   9 +-
 .../server/controller/jmx/JMXMetricHolder.java     |  64 ++++
 .../controller/metrics/MetricHostProvider.java     |   9 +
 .../timeline/AMSComponentPropertyProvider.java     |   4 +-
 .../java/org/apache/ambari/server/state/Alert.java |   1 +
 .../apache/ambari/server/state/ComponentInfo.java  |  15 +
 .../apache/ambari/server/state/ConfigHelper.java   |   7 +-
 .../ambari/server/state/alert/AlertDefinition.java |  13 +
 .../apache/ambari/server/state/alert/AlertUri.java |  41 +++
 .../ambari/server/state/alert/MetricSource.java    |  66 +++-
 .../ambari/server/state/alert/Reporting.java       |  38 ++
 .../ambari/server/state/alert/ServerSource.java    |  15 +
 .../ambari/server/state/cluster/ClusterImpl.java   |  79 ++--
 .../ambari/server/state/quicklinks/Host.java       |  75 ++++
 .../ambari/server/state/quicklinks/Link.java       |  13 +
 .../state/services/AmbariServerAlertService.java   |   7 +-
 .../server/state/stack/MetricDefinition.java       |   9 +-
 .../ambari/server/topology/AmbariContext.java      |   7 +-
 .../common-services/HDFS/2.1.0.2.0/metainfo.xml    |   1 +
 .../common-services/HDFS/3.0.0.3.0/metainfo.xml    |   1 +
 .../apache/ambari/server/alerts/ThresholdTest.java |  90 +++++
 .../AlertDefinitionResourceProviderTest.java       |   4 +-
 .../OverriddenMetricsHostProviderTest.java         |  95 +++++
 .../server/controller/jmx/JMXMetricHolderTest.java |  75 ++++
 .../stack/QuickLinksConfigurationModuleTest.java   |   3 +
 .../ambari/server/state/alert/AlertUriTest.java    |  60 +++
 .../ambari/server/state/alert/JmxInfoTest.java     |  68 ++++
 .../ambari/server/state/cluster/ClusterTest.java   |   4 +
 .../ambari/server/topology/AmbariContextTest.java  |   5 +-
 .../resources/child_quicklinks_to_override.json    |   4 +
 .../main/admin/kerberos/step1_controller.js        | 105 +++---
 ambari-web/app/controllers/main/host/details.js    |   8 +-
 .../app/controllers/wizard/step4_controller.js     | 128 ++++---
 .../app/controllers/wizard/step8_controller.js     |   8 +-
 ambari-web/app/mappers/stack_service_mapper.js     |   4 +
 ambari-web/app/messages.js                         |   7 +-
 ambari-web/app/models/stack_service.js             | 103 ++++++
 ambari-web/app/models/stack_service_component.js   |  33 ++
 .../app/templates/main/admin/kerberos/step1.hbs    |  14 +-
 ambari-web/app/templates/wizard/step4.hbs          |  58 ++-
 .../app/views/common/quick_view_link_view.js       |  64 +++-
 .../test/controllers/main/host/details_test.js     |  23 +-
 ambari-web/test/controllers/wizard/step4_test.js   |  45 ++-
 ambari-web/test/controllers/wizard/step8_test.js   |  41 ++-
 .../test/views/common/quick_link_view_test.js      |  15 +-
 .../.gitignore                                     |   0
 .../management-packs/isilon-onefs-mpack/pom.xml    | 133 +++++++
 .../src/main/assemblies/isilon-onefs-mpack.xml     |  40 ++
 .../addon-services/ONEFS/1.0.0/alerts.json         | 122 +++++++
 .../ONEFS/1.0.0/configuration/core-site.xml        | 100 +++++
 .../ONEFS/1.0.0/configuration/hadoop-env.xml       | 404 +++++++++++++++++++++
 .../ONEFS/1.0.0/configuration/hdfs-site.xml        |  65 ++++
 .../ONEFS/1.0.0/configuration/onefs.xml            |  48 +--
 .../addon-services/ONEFS/1.0.0/kerberos.json       |  73 ++++
 .../addon-services/ONEFS/1.0.0/metainfo.xml        | 111 ++++++
 .../addon-services/ONEFS/1.0.0/metrics.json        | 230 ++++++++++++
 .../ONEFS/1.0.0}/package/scripts/__init__.py       |   0
 .../ONEFS/1.0.0/package/scripts/onefs_client.py    |  63 ++++
 .../ONEFS/1.0.0}/package/scripts/params.py         |   0
 .../ONEFS/1.0.0/package/scripts/params_linux.py    |  70 ++++
 .../ONEFS/1.0.0/package/scripts/params_windows.py  |  83 +++++
 .../ONEFS/1.0.0/package/scripts/service_check.py   |  59 +++
 .../ONEFS/1.0.0}/package/scripts/status_params.py  |   0
 .../ONEFS/1.0.0/quicklinks/quicklinks.json         |  31 ++
 .../addon-services/ONEFS/1.0.0/service_advisor.py  | 122 +++++++
 .../addon-services/ONEFS/1.0.0/themes/theme.json   |  60 +++
 .../addon-services/ONEFS/1.0.0/widgets.json        | 141 +++++++
 .../src/main/resources/mpack.json                  |  28 ++
 .../src/test/python/TestServiceAdvisor.py          |  67 ++++
 .../src/test/python/unitTests.py                   | 132 +++++++
 86 files changed, 3815 insertions(+), 341 deletions(-)
 create mode 100644 ambari-server/src/main/java/org/apache/ambari/server/alerts/JmxServerSideAlert.java
 create mode 100644 ambari-server/src/main/java/org/apache/ambari/server/alerts/Threshold.java
 create mode 100644 ambari-server/src/main/java/org/apache/ambari/server/controller/internal/OverriddenMetricsHostProvider.java
 create mode 100644 ambari-server/src/main/java/org/apache/ambari/server/state/quicklinks/Host.java
 create mode 100644 ambari-server/src/test/java/org/apache/ambari/server/alerts/ThresholdTest.java
 create mode 100644 ambari-server/src/test/java/org/apache/ambari/server/controller/internal/OverriddenMetricsHostProviderTest.java
 create mode 100644 ambari-server/src/test/java/org/apache/ambari/server/controller/jmx/JMXMetricHolderTest.java
 create mode 100644 ambari-server/src/test/java/org/apache/ambari/server/state/alert/AlertUriTest.java
 create mode 100644 ambari-server/src/test/java/org/apache/ambari/server/state/alert/JmxInfoTest.java
 copy contrib/management-packs/{hdf-ambari-mpack => isilon-onefs-mpack}/.gitignore (100%)
 create mode 100644 contrib/management-packs/isilon-onefs-mpack/pom.xml
 create mode 100644 contrib/management-packs/isilon-onefs-mpack/src/main/assemblies/isilon-onefs-mpack.xml
 create mode 100644 contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/alerts.json
 create mode 100644 contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/configuration/core-site.xml
 create mode 100644 contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/configuration/hadoop-env.xml
 create mode 100644 contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/configuration/hdfs-site.xml
 copy ambari-server/src/main/java/org/apache/ambari/server/controller/jmx/JMXMetricHolder.java => contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/configuration/onefs.xml (54%)
 create mode 100644 contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/kerberos.json
 create mode 100644 contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/metainfo.xml
 create mode 100644 contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/metrics.json
 copy contrib/management-packs/{odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN => isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0}/package/scripts/__init__.py (100%)
 mode change 100755 => 100644
 create mode 100644 contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/package/scripts/onefs_client.py
 copy {ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0 => contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0}/package/scripts/params.py (100%)
 create mode 100644 contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/package/scripts/params_linux.py
 create mode 100644 contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/package/scripts/params_windows.py
 create mode 100644 contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/package/scripts/service_check.py
 copy {ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0 => contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0}/package/scripts/status_params.py (100%)
 create mode 100644 contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/quicklinks/quicklinks.json
 create mode 100644 contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/service_advisor.py
 create mode 100644 contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/themes/theme.json
 create mode 100644 contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/widgets.json
 create mode 100644 contrib/management-packs/isilon-onefs-mpack/src/main/resources/mpack.json
 create mode 100644 contrib/management-packs/isilon-onefs-mpack/src/test/python/TestServiceAdvisor.py
 create mode 100644 contrib/management-packs/isilon-onefs-mpack/src/test/python/unitTests.py

-- 
To stop receiving notification emails like this one, please contact
['"commits@ambari.apache.org" <co...@ambari.apache.org>'].

[ambari] 04/15: AMBARI-22115. Alerts for OneFS mpack (amagyar)

Posted by am...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

amagyar pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/ambari.git

commit b57715386cfba47be094e1cc13b31747ed51d3fa
Author: Attila Magyar <am...@hortonworks.com>
AuthorDate: Thu Oct 26 11:49:18 2017 +0200

    AMBARI-22115. Alerts for OneFS mpack (amagyar)
---
 .../server/controller/jmx/JMXMetricHolder.java     |  55 ++++-
 .../ambari/server/state/alert/Reporting.java       |   9 +-
 .../server/controller/jmx/JMXMetricHolderTest.java |  14 ++
 .../addon-services/ONEFS/1.0.0/alerts.json         | 112 +++++++--
 .../ONEFS/1.0.0/configuration/hadoop-env.xml       |  18 ++
 .../addon-services/ONEFS/1.0.0/metainfo.xml        |   1 +
 .../addon-services/ONEFS/1.0.0/metrics.json        | 256 +++++++++++++++++----
 .../ONEFS/1.0.0/package/scripts/params_linux.py    |   2 +-
 .../addon-services/ONEFS/1.0.0/widgets.json        | 191 ++++++++++++++-
 9 files changed, 565 insertions(+), 93 deletions(-)

diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/jmx/JMXMetricHolder.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/jmx/JMXMetricHolder.java
index f6ae54f..53227e5 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/jmx/JMXMetricHolder.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/jmx/JMXMetricHolder.java
@@ -22,8 +22,11 @@ import static java.util.stream.Collectors.toList;
 
 import java.util.List;
 import java.util.Map;
-import java.util.Objects;
 import java.util.Optional;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import javax.annotation.Nullable;
 
 /**
  *
@@ -61,17 +64,51 @@ public final class JMXMetricHolder {
       .collect(toList());
   }
 
-  public Optional<Object> find(String property) {
-    String propertyName = property.split("/")[0];
-    String propertyValue = property.split("/")[1];
+  public Optional<Object> find(String pattern) {
+    JmxPattern jmxPattern = JmxPattern.parse(pattern);
     return beans.stream()
-      .filter(each -> propertyName.equals(name(each)))
-      .map(each -> each.get(propertyValue))
-      .filter(Objects::nonNull)
+      .map(jmxPattern::extract)
+      .filter(Optional::isPresent)
+      .map(Optional::get)
       .findFirst();
   }
 
-  private String name(Map<String, Object> bean) {
-    return bean.containsKey(NAME_KEY) ? (String) bean.get(NAME_KEY) : null;
+  private static class JmxPattern {
+    private static final Pattern PATTERN = Pattern.compile("(.*?)\\[(\\S+?)\\]");
+    private final String beanName;
+    private final String propertyName;
+    private final @Nullable String key;
+
+    public static JmxPattern parse(String property) {
+      String beanName = property.split("/")[0];
+      String propertyName = property.split("/")[1];
+      String key = null;
+      Matcher matcher = PATTERN.matcher(propertyName);
+      if (matcher.matches()) {
+        propertyName = matcher.group(1);
+        key = matcher.group(2);
+      }
+      return new JmxPattern(beanName, propertyName, key);
+    }
+
+    private JmxPattern(String beanName, String propertyName, String key) {
+      this.beanName = beanName;
+      this.propertyName = propertyName;
+      this.key = key;
+    }
+
+    public Optional<Object> extract(Map<String, Object> bean) {
+      return beanName.equals(name(bean))
+        ? Optional.ofNullable(lookupByKey(bean.get(propertyName)))
+        : Optional.empty();
+    }
+
+    public Object lookupByKey(Object bean) {
+      return key != null && bean instanceof Map ? ((Map) bean).get(key) : bean;
+    }
+
+    private String name(Map<String, Object> bean) {
+      return bean.containsKey(NAME_KEY) ? (String) bean.get(NAME_KEY) : null;
+    }
   }
 }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/alert/Reporting.java b/ambari-server/src/main/java/org/apache/ambari/server/state/alert/Reporting.java
index a7e11e1..66d09a5 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/alert/Reporting.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/alert/Reporting.java
@@ -17,9 +17,8 @@
  */
 package org.apache.ambari.server.state.alert;
 
-import static org.apache.ambari.server.state.alert.Reporting.ReportingType.PERCENT;
-
 import java.text.MessageFormat;
+import java.util.ArrayList;
 import java.util.List;
 
 import org.apache.ambari.server.alerts.Threshold;
@@ -208,7 +207,7 @@ public class Reporting {
   }
 
   public AlertState state(double value) {
-    return getThreshold().state(PERCENT == getType() ? value * 100 : value);
+    return getThreshold().state(value);
   }
 
   private Threshold getThreshold() {
@@ -216,7 +215,9 @@ public class Reporting {
   }
 
   public String formatMessage(double value, List<Object> args) {
-    return MessageFormat.format(message(value), args.toArray());
+    List<Object> copy = new ArrayList<>(args);
+    copy.add(value);
+    return MessageFormat.format(message(value), copy.toArray());
   }
 
   private String message(double value) {
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/jmx/JMXMetricHolderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/jmx/JMXMetricHolderTest.java
index 61d5bae..99eb961 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/jmx/JMXMetricHolderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/jmx/JMXMetricHolderTest.java
@@ -42,6 +42,13 @@ public class JMXMetricHolderTest {
       new HashMap<String, Object>() {{
         put("name", "bean2");
         put("value", "val2");
+      }},
+      new HashMap<String, Object>() {{
+        put("name", "nested");
+        put("value", new HashMap<String, Object>() {{
+          put("key1", "nested-val1");
+          put("key2", "nested-val2");
+        }});
       }}
     ));
   }
@@ -58,4 +65,11 @@ public class JMXMetricHolderTest {
     List<Object> result = metrics.findAll(asList("bean1/value", "bean2/value", "bean3/notfound"));
     assertThat(result, hasItems("val1", "val2"));
   }
+
+
+  @Test
+  public void testFindNestedBean() throws Exception {
+    List<Object> result = metrics.findAll(asList("nested/value[key1]", "nested/value[key2]"));
+    assertThat(result, hasItems("nested-val1", "nested-val2"));
+  }
 }
\ No newline at end of file
diff --git a/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/alerts.json b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/alerts.json
index 5718721..c84a5be 100644
--- a/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/alerts.json
+++ b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/alerts.json
@@ -2,10 +2,10 @@
   "ONEFS":{
     "service": [
       {
-        "name": "onefs_namenode_cpu",
-        "label": "OneFS NameNode Host CPU Utilization",
-        "description": "This host-level alert is triggered if CPU utilization of the NameNode exceeds certain warning and critical thresholds. It checks the NameNode JMX Servlet for the SystemCPULoad property. The threshold values are in percent.",
-        "interval": 1,
+        "name": "namenode_onefs_capacity_utilization",
+        "label": "OneFS Capacity Utilization",
+        "description": "This service-level alert is triggered if the OneFS capacity utilization exceeds the configured warning and critical thresholds. It checks the OneFS NameNode for the CapacityUsed and CapacityRemaining properties. The threshold values are in percent.",
+        "interval": 2,
         "scope": "HOST",
         "enabled": true,
         "source": {
@@ -20,26 +20,102 @@
           },
           "reporting": {
             "ok": {
-              "text": "{1} CPU, load {0,number,percent}"
+              "text": "Capacity Used:[{2,number,#.##}%, {0}], Capacity Remaining:[{1}]"
             },
             "warning": {
-              "text": "{1} CPU, load {0,number,percent}",
-              "value": 5
+              "text": "Capacity Used:[{2,number,#.##}%, {0}], Capacity Remaining:[{1}]",
+              "value": 75
             },
             "critical": {
-              "text": "{1} CPU, load {0,number,percent}",
-              "value": 20
+              "text": "Capacity Used:[{2,number,#.##}%, {0}], Capacity Remaining:[{1}]",
+              "value": 80
+            }
+          },
+          "jmx": {
+            "property_list": [
+              "Hadoop:service=NameNode,name=FSNamesystem/CapacityUsed",
+              "Hadoop:service=NameNode,name=FSNamesystem/CapacityRemaining"
+            ],
+            "value": "new Double({0})/({0} + {1}) * 100.0"
+          }
+        }
+      },
+      {
+        "name": "namenode_onefs_blocks_health",
+        "label": "OneFS NameNode Blocks Health",
+        "description": "This service-level alert is triggered if the number of corrupt or missing blocks exceeds the configured critical threshold. The threshold values are in blocks.",
+        "interval": 2,
+        "scope": "HOST",
+        "enabled": true,
+        "source": {
+          "type": "SERVER",
+          "class": "org.apache.ambari.server.alerts.JmxServerSideAlert",
+          "uri": {
+            "http": "${hdfs-site/dfs.namenode.http-address}",
+            "https": "${hdfs-site/dfs.namenode.https-address}",
+            "https_property": "${hdfs-site/dfs.http.policy}",
+            "https_property_value": "HTTPS_ONLY",
+            "connection_timeout": 5.0
+          },
+          "reporting": {
+            "ok": {
+              "text": "Missing Blocks:[{0}]"
+            },
+            "warning": {
+              "text": "Missing Blocks:[{0}]",
+              "value": 1
+            },
+            "critical": {
+              "text": "Missing Blocks:[{0}]",
+              "value": 1
+            }
+          },
+          "jmx": {
+            "property_list": [
+              "Hadoop:service=NameNode,name=FSNamesystem/MissingBlocks"
+            ],
+            "value": "{0}"
+          }
+        }
+      },
+      {
+        "name": "onefs_heap_usage",
+        "label": "OneFS Heap Usage",
+        "description": "This host-level alert is triggered if heap usage goes past thresholds on the DataNode. The threshold values are in percent.",
+        "interval": 2,
+        "scope": "HOST",
+        "enabled": true,
+        "source": {
+          "type": "SERVER",
+          "class": "org.apache.ambari.server.alerts.JmxServerSideAlert",
+          "uri": {
+            "http": "${hdfs-site/dfs.namenode.http-address}",
+            "https": "${hdfs-site/dfs.namenode.https-address}",
+            "https_property": "${hdfs-site/dfs.http.policy}",
+            "https_property_value": "HTTPS_ONLY",
+            "connection_timeout": 5.0
+          },
+          "reporting": {
+            "ok": {
+              "text": "Used Heap:[{2,number,#.##}%, {0} B], Max Heap: {1} B"
+            },
+            "warning": {
+              "text": "Used Heap:[{2,number,#.##}%, {0} B], Max Heap: {1} B",
+              "value": 80
             },
-            "units" : "%",
-            "type": "PERCENT"
-           },
-           "jmx": {
-             "property_list": [
-              "java.lang:type=OperatingSystem/SystemCpuLoad",
-              "java.lang:type=OperatingSystem/AvailableProcessors"
-             ]
-           }
+            "critical": {
+              "text": "Used Heap:[{2,number,#.##}%, {0} B], Max Heap: {1} B",
+              "value": 90
+            }
+          },
+          "jmx": {
+            "property_list": [
+              "java.lang:type=Memory/HeapMemoryUsage[used]",
+              "java.lang:type=Memory/HeapMemoryUsage[max]"
+            ],
+            "value": "100.0 - (new Double({1} - {0})/{1} * 100.0)"
           }
+        }
       }
     ]
   }
diff --git a/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/configuration/hadoop-env.xml b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/configuration/hadoop-env.xml
index bb671cc..5558f59 100644
--- a/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/configuration/hadoop-env.xml
+++ b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/configuration/hadoop-env.xml
@@ -193,6 +193,24 @@
     <value-attributes>
       <type>user</type>
       <overridable>false</overridable>
+      <user-groups>
+        <property>
+          <type>hadoop-env</type>
+          <name>hdfs_group</name>
+        </property>
+      </user-groups>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>hdfs_group</name>
+    <display-name>HDFS User Group</display-name>
+    <value>hdfs_group</value>
+    <property-type>GROUP</property-type>
+    <description>HDFS user group.</description>
+    <value-attributes>
+      <type>user</type>
+      <overridable>false</overridable>
     </value-attributes>
     <on-ambari-upgrade add="true"/>
   </property>
diff --git a/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/metainfo.xml b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/metainfo.xml
index f20bcf8..82e2022 100644
--- a/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/metainfo.xml
+++ b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/metainfo.xml
@@ -27,6 +27,7 @@
       <components>
        <component>
           <name>ONEFS_CLIENT</name>
+          <timelineAppid>NAMENODE</timelineAppid>
           <displayName>OneFS Client</displayName>
           <category>CLIENT</category>
           <componentType>HCFS_CLIENT</componentType>
diff --git a/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/metrics.json b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/metrics.json
index cd705eb..335bfba 100644
--- a/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/metrics.json
+++ b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/metrics.json
@@ -1,55 +1,211 @@
 {
-  "ONEFS_CLIENT": {
-    "Component": [
+  "ONEFS_CLIENT" : {
+    "Component" : [
       {
-        "type": "ganglia",
-        "metrics": {
-          "default": {
-            "metrics/onefs/demo/counter": {
-              "metric": "onefs.demo.counter",
-              "pointInTime": true,
-              "temporal": true
-            }
-          }
-        }
-      },
-      {
-        "type": "jmx",
-        "properties": {
-          "url_config_type": "hdfs-site",
-          "url_property_name": "dfs.namenode.http-address"
-        },
-        "metrics": {
-          "default": {
-            "metrics/dfs/namenode/Used": {
-              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Used",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/memHeapUsedM": {
-              "metric": "jvm.JvmMetrics.MemHeapUsedM",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/HeapMemoryMax": {
-              "metric": "java.lang:type=Memory.HeapMemoryUsage[max]",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/HeapMemoryUsed": {
-              "metric": "java.lang:type=Memory.HeapMemoryUsage[used]",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/NonHeapMemoryMax": {
-              "metric": "java.lang:type=Memory.NonHeapMemoryUsage[max]",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/NonHeapMemoryUsed": {
-              "metric": "java.lang:type=Memory.NonHeapMemoryUsage[used]",
-              "pointInTime": true,
-              "temporal": false
+        "type" : "ganglia",
+        "metrics" : {
+          "default" : {
+            "metrics/load/load_five" : {
+              "temporal" : true,
+              "pointInTime" : true,
+              "metric" : "load_five",
+              "amsHostMetric" : true
+            },
+            "metrics/jvm/JvmMetrics/GcCountConcurrentMarkSweep" : {
+              "temporal" : true,
+              "pointInTime" : true,
+              "metric" : "jvm.JvmMetrics.GcCountConcurrentMarkSweep"
+            },
+            "metrics/cpu/cpu_nice" : {
+              "temporal" : true,
+              "pointInTime" : true,
+              "metric" : "cpu_nice",
+              "amsHostMetric" : true
+            },
+            "metrics/rpc/NumOpenConnections" : {
+              "temporal" : true,
+              "pointInTime" : false,
+              "metric" : "rpc.rpc.NumOpenConnections"
+            },
+            "metrics/network/bytes_in" : {
+              "temporal" : true,
+              "pointInTime" : true,
+              "metric" : "bytes_in",
+              "amsHostMetric" : true
+            },
+            "metrics/cpu/cpu_wio" : {
+              "temporal" : true,
+              "pointInTime" : true,
+              "metric" : "cpu_wio",
+              "amsHostMetric" : true
+            },
+            "metrics/process/proc_run" : {
+              "temporal" : true,
+              "pointInTime" : true,
+              "metric" : "proc_run",
+              "amsHostMetric" : true
+            },
+            "metrics/memory/mem_cached" : {
+              "temporal" : true,
+              "pointInTime" : true,
+              "metric" : "mem_cached",
+              "amsHostMetric" : true
+            },
+            "metrics/disk/write_bytes" : {
+              "temporal" : true,
+              "pointInTime" : true,
+              "metric" : "write_bytes",
+              "amsHostMetric" : true
+            },
+            "metrics/network/bytes_out" : {
+              "temporal" : true,
+              "pointInTime" : true,
+              "metric" : "bytes_out",
+              "amsHostMetric" : true
+            },
+            "metrics/cpu/cpu_idle" : {
+              "temporal" : true,
+              "pointInTime" : true,
+              "metric" : "cpu_idle",
+              "amsHostMetric" : true
+            },
+            "metrics/memory/mem_total" : {
+              "temporal" : true,
+              "pointInTime" : true,
+              "metric" : "mem_total",
+              "amsHostMetric" : true
+            },
+            "metrics/jvm/memHeapUsedM" : {
+              "temporal" : true,
+              "pointInTime" : false,
+              "metric" : "jvm.JvmMetrics.MemHeapUsedM",
+              "unit" : "MB"
+            },
+            "metrics/memory/mem_shared" : {
+              "temporal" : true,
+              "pointInTime" : true,
+              "metric" : "mem_shared",
+              "amsHostMetric" : true
+            },
+            "metrics/disk/write_count" : {
+              "temporal" : true,
+              "pointInTime" : true,
+              "metric" : "write_count",
+              "amsHostMetric" : true
+            },
+            "metrics/network/pkts_in" : {
+              "temporal" : true,
+              "pointInTime" : true,
+              "metric" : "pkts_in",
+              "amsHostMetric" : true
+            },
+            "metrics/network/pkts_out" : {
+              "temporal" : true,
+              "pointInTime" : true,
+              "metric" : "pkts_out",
+              "amsHostMetric" : true
+            },
+            "metrics/load/load_one" : {
+              "temporal" : true,
+              "pointInTime" : true,
+              "metric" : "load_one",
+              "amsHostMetric" : true
+            },
+            "metrics/rpc/RpcProcessingTime_avg_time" : {
+              "temporal" : true,
+              "pointInTime" : false,
+              "metric" : "rpc.rpc.RpcProcessingTimeAvgTime"
+            },
+            "metrics/jvm/JvmMetrics/GcTimeMillisConcurrentMarkSweep" : {
+              "temporal" : true,
+              "pointInTime" : true,
+              "metric" : "jvm.JvmMetrics.GcTimeMillisConcurrentMarkSweep"
+            },
+            "metrics/memory/swap_total" : {
+              "temporal" : true,
+              "pointInTime" : true,
+              "metric" : "swap_total",
+              "amsHostMetric" : true
+            },
+            "metrics/disk/read_bytes" : {
+              "temporal" : true,
+              "pointInTime" : true,
+              "metric" : "read_bytes",
+              "amsHostMetric" : true
+            },
+            "metrics/jvm/gcTimeMillis" : {
+              "temporal" : true,
+              "pointInTime" : false,
+              "metric" : "jvm.JvmMetrics.GcTimeMillis"
+            },
+            "metrics/jvm/gcCount" : {
+              "temporal" : true,
+              "pointInTime" : false,
+              "metric" : "jvm.JvmMetrics.GcCount"
+            },
+            "metrics/memory/swap_free" : {
+              "temporal" : true,
+              "pointInTime" : true,
+              "metric" : "swap_free",
+              "amsHostMetric" : true
+            },
+            "metrics/process/proc_total" : {
+              "temporal" : true,
+              "pointInTime" : true,
+              "metric" : "proc_total",
+              "amsHostMetric" : true
+            },
+            "metrics/load/load_fifteen" : {
+              "temporal" : true,
+              "pointInTime" : true,
+              "metric" : "load_fifteen",
+              "amsHostMetric" : true
+            },
+            "metrics/cpu/cpu_user" : {
+              "temporal" : true,
+              "pointInTime" : true,
+              "metric" : "cpu_user",
+              "amsHostMetric" : true
+            },
+            "metrics/jvm/memHeapCommittedM" : {
+              "temporal" : true,
+              "pointInTime" : false,
+              "metric" : "jvm.JvmMetrics.MemHeapCommittedM"
+            },
+            "metrics/disk/disk_total" : {
+              "temporal" : true,
+              "pointInTime" : true,
+              "metric" : "disk_total",
+              "amsHostMetric" : true
+            },
+            "metrics/disk/disk_free" : {
+              "temporal" : true,
+              "pointInTime" : true,
+              "metric" : "disk_free",
+              "amsHostMetric" : true
+            },
+            "metrics/disk/read_count" : {
+              "temporal" : true,
+              "pointInTime" : true,
+              "metric" : "read_count",
+              "amsHostMetric" : true
+            },
+            "metrics/memory/mem_free" : {
+              "temporal" : true,
+              "pointInTime" : true,
+              "metric" : "mem_free",
+              "amsHostMetric" : true
+            },
+            "metrics/rpc/RpcQueueTime_avg_time" : {
+              "temporal" : true,
+              "pointInTime" : false,
+              "metric" : "rpc.rpc.RpcQueueTimeAvgTime"
+            },
+            "metrics/cpu/cpu_system" : {
+              "temporal" : true,
+              "pointInTime" : true,
+              "metric" : "cpu_system",
+              "amsHostMetric" : true
             }
           }
         }
diff --git a/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/package/scripts/params_linux.py b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/package/scripts/params_linux.py
index 72e0ae9..b823fc5 100644
--- a/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/package/scripts/params_linux.py
+++ b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/package/scripts/params_linux.py
@@ -30,7 +30,7 @@ from resource_management.libraries.functions import stack_select
 config = Script.get_config()
 
 hostname = config["hostname"]
-hadoop_conf_dir = conf_select.get_hadoop_conf_dir(force_latest_on_upgrade=True)
+hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
 hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
 user_group = config['configurations']['cluster-env']['user_group']
 hdfs_tmp_dir = config['configurations']['hadoop-env']['hdfs_tmp_dir']
diff --git a/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/widgets.json b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/widgets.json
index 23da1d6..dbde93b 100644
--- a/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/widgets.json
+++ b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/widgets.json
@@ -1,29 +1,173 @@
 {
   "layouts": [
     {
-      "layout_name": "default_onefs_layout",
+      "layout_name": "default_onefs_dashboard",
       "display_name": "Standard ONEFS Dashboard",
       "section_name": "ONEFS_SUMMARY",
       "widgetLayoutInfo": [
         {
-          "widget_name": "OneFS metrics demo",
-          "description": "Test widget",
-          "default_section_name": "ONEFS_SUMMARY",
+          "widget_name": "NameNode GC count",
+          "description": "Count of total garbage collections and count of major type garbage collections of the JVM.",
           "widget_type": "GRAPH",
           "is_visible": true,
-          "scope" : "SERVICE",
           "metrics": [
             {
-              "name": "counter",
-              "metric_path": "metrics/onefs/demo/counter",
+              "name": "jvm.JvmMetrics.GcCount._rate",
+              "metric_path": "metrics/jvm/gcCount._rate",
               "service_name": "ONEFS",
-              "component_name": "ONEFS_CLUSTER"
+              "component_name": "ONEFS_CLIENT",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            },
+            {
+              "name": "jvm.JvmMetrics.GcCountConcurrentMarkSweep._rate",
+              "metric_path": "metrics/jvm/GcCountConcurrentMarkSweep._rate",
+              "service_name": "ONEFS",
+              "component_name": "ONEFS_CLIENT",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            }
+          ],
+          "values": [
+            {
+              "name": "GC total count",
+              "value": "${jvm.JvmMetrics.GcCount._rate}"
+            },
+            {
+              "name": "GC count of type major collection",
+              "value": "${jvm.JvmMetrics.GcCountConcurrentMarkSweep._rate}"
+            }
+          ],
+          "properties": {
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "NameNode GC time",
+          "description": "Total time taken by major type garbage collections in milliseconds.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "jvm.JvmMetrics.GcTimeMillisConcurrentMarkSweep._rate",
+              "metric_path": "metrics/jvm/GcTimeMillisConcurrentMarkSweep._rate",
+              "service_name": "ONEFS",
+              "component_name": "ONEFS_CLIENT",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
             }
           ],
           "values": [
             {
-              "name": "demo value",
-              "values" : "${counter}"
+              "name": "GC time in major collection",
+              "value": "${jvm.JvmMetrics.GcTimeMillisConcurrentMarkSweep._rate}"
+            }
+          ],
+          "properties": {
+            "display_unit": "ms",
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "NameNode Heap",
+          "description": "Heap memory committed and Heap memory used with respect to time.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "jvm.JvmMetrics.MemHeapCommittedM",
+              "metric_path": "metrics/jvm/memHeapCommittedM",
+              "service_name": "ONEFS",
+              "component_name": "ONEFS_CLIENT",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            },
+            {
+              "name": "jvm.JvmMetrics.MemHeapUsedM",
+              "metric_path": "metrics/jvm/memHeapUsedM",
+              "service_name": "ONEFS",
+              "component_name": "ONEFS_CLIENT",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            }
+          ],
+          "values": [
+            {
+              "name": "JVM heap committed",
+              "value": "${jvm.JvmMetrics.MemHeapCommittedM}"
+            },
+            {
+              "name": "JVM heap used",
+              "value": "${jvm.JvmMetrics.MemHeapUsedM}"
+            }
+          ],
+          "properties": {
+            "display_unit": "MB",
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "NameNode Host Load",
+          "description": "Percentage of CPU and Memory resources being consumed on NameNode host.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "cpu_system",
+              "metric_path": "metrics/cpu/cpu_system",
+              "service_name": "ONEFS",
+              "component_name": "ONEFS_CLIENT",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            },
+            {
+              "name": "cpu_user",
+              "metric_path": "metrics/cpu/cpu_user",
+              "service_name": "ONEFS",
+              "component_name": "ONEFS_CLIENT",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            },
+            {
+              "name": "cpu_nice",
+              "metric_path": "metrics/cpu/cpu_nice",
+              "service_name": "ONEFS",
+              "component_name": "ONEFS_CLIENT",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            },
+            {
+              "name": "cpu_idle",
+              "metric_path": "metrics/cpu/cpu_idle",
+              "service_name": "ONEFS",
+              "component_name": "ONEFS_CLIENT",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            },
+            {
+              "name": "cpu_wio",
+              "metric_path": "metrics/cpu/cpu_wio",
+              "service_name": "ONEFS",
+              "component_name": "ONEFS_CLIENT",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            },
+            {
+              "name": "mem_total",
+              "metric_path": "metrics/memory/mem_total",
+              "service_name": "ONEFS",
+              "component_name": "ONEFS_CLIENT",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            },
+            {
+              "name": "mem_free",
+              "metric_path": "metrics/memory/mem_free",
+              "service_name": "ONEFS",
+              "component_name": "ONEFS_CLIENT",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            }
+          ],
+          "values": [
+            {
+              "name": "CPU utilization",
+              "value": "${((cpu_system + cpu_user + cpu_nice)/(cpu_system + cpu_user + cpu_nice + cpu_idle + cpu_wio)) * 100}"
+            },
+            {
+              "name": "Memory utilization",
+              "value": "${((mem_total - mem_free)/mem_total) * 100}"
             }
           ],
           "properties": {
@@ -31,8 +175,33 @@
             "time_range": "1",
             "display_unit": "%"
           }
+        },
+        {
+          "widget_name": "Under Replicated Blocks",
+          "description": "Number represents file blocks that does not meet the replication factor criteria. Its indicative of ONEFS bad health.",
+          "widget_type": "NUMBER",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "Hadoop:service=NameNode,name=FSNamesystem.UnderReplicatedBlocks",
+              "metric_path": "metrics/dfs/FSNamesystem/UnderReplicatedBlocks",
+              "service_name": "ONEFS",
+              "component_name": "ONEFS_CLIENT",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            }
+          ],
+          "values": [
+            {
+              "name": "Under Replicated Blocks",
+              "value": "${Hadoop:service=NameNode,name=FSNamesystem.UnderReplicatedBlocks}"
+            }
+          ],
+          "properties": {
+            "warning_threshold": "0",
+            "error_threshold": "50"
+          }
         }
       ]
     }
   ]
-}
\ No newline at end of file
+}

-- 
To stop receiving notification emails like this one, please contact
"commits@ambari.apache.org" <co...@ambari.apache.org>.

[ambari] 05/15: AMBARI-22319. Allow the same config type to belong to multiple services (amagyar)

Posted by am...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

amagyar pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/ambari.git

commit 8fbaf8dc5683c8f882968add77c5f5025af226ce
Author: Attila Magyar <am...@hortonworks.com>
AuthorDate: Mon Oct 30 11:34:27 2017 +0100

    AMBARI-22319. Allow the same config type to belong to multiple services (amagyar)
---
 .../controller/AmbariManagementControllerImpl.java |  8 +--
 .../ambari/server/controller/internal/Stack.java   | 14 ++++
 .../apache/ambari/server/state/ConfigHelper.java   |  2 +-
 .../ambari/server/state/cluster/ClusterImpl.java   | 79 ++++++++--------------
 .../ambari/server/topology/AmbariContext.java      |  7 +-
 .../ambari/server/state/cluster/ClusterTest.java   |  4 ++
 .../ambari/server/topology/AmbariContextTest.java  |  5 +-
 7 files changed, 58 insertions(+), 61 deletions(-)

diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
index 198b617..a2d8494 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
@@ -802,14 +802,8 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
 
     // If the config type is for a service, then allow a user with SERVICE_MODIFY_CONFIGS to
     // update, else ensure the user has CLUSTER_MODIFY_CONFIGS
-    String service = null;
+    String service = cluster.getServiceByConfigType(configType);
 
-    try {
-      service = cluster.getServiceForConfigTypes(Collections.singleton(configType));
-    } catch (IllegalArgumentException e) {
-      // Ignore this since we may have hit a config type that spans multiple services. This may
-      // happen in unit test cases but should not happen with later versions of stacks.
-    }
 
     // Get the changes so that the user's intention can be determined. For example, maybe
     // the user wants to change the run-as user for a service or maybe the the cluster-wide
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/Stack.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/Stack.java
index f8feef2..912d9be 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/Stack.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/Stack.java
@@ -18,10 +18,12 @@
 
 package org.apache.ambari.server.controller.internal;
 
+import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
+import java.util.List;
 import java.util.Map;
 import java.util.Set;
 
@@ -494,6 +496,18 @@ public class Stack {
         "Specified configuration type is not associated with any service: " + config);
   }
 
+  public List<String> getServicesForConfigType(String config) {
+    List<String> serviceNames = new ArrayList<>();
+    for (Map.Entry<String, Map<String, Map<String, ConfigProperty>>> entry : serviceConfigurations.entrySet()) {
+      Map<String, Map<String, ConfigProperty>> typeMap = entry.getValue();
+      String serviceName = entry.getKey();
+      if (typeMap.containsKey(config) && !getExcludedConfigurationTypes(serviceName).contains(config)) {
+        serviceNames.add(serviceName);
+      }
+    }
+    return serviceNames;
+  }
+
   /**
    * Return the dependencies specified for the given component.
    *
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigHelper.java
index 6813fc0..3b0ee0f 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigHelper.java
@@ -1123,7 +1123,7 @@ public class ConfigHelper {
 
       if (null != baseConfig) {
         try {
-          String service = cluster.getServiceForConfigTypes(Collections.singleton(type));
+          String service = cluster.getServiceByConfigType(type);
           if (!serviceMapped.containsKey(service)) {
             serviceMapped.put(service, new HashSet<>());
           }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
index 2266c62..2f3afd6 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
@@ -18,6 +18,8 @@
 
 package org.apache.ambari.server.state.cluster;
 
+import static java.util.stream.Collectors.toList;
+
 import java.text.MessageFormat;
 import java.util.ArrayList;
 import java.util.Collection;
@@ -29,13 +31,13 @@ import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
+import java.util.Objects;
 import java.util.Set;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentMap;
 import java.util.concurrent.ConcurrentSkipListMap;
 import java.util.concurrent.CopyOnWriteArrayList;
 import java.util.concurrent.locks.ReadWriteLock;
-import java.util.stream.Collectors;
 
 import javax.annotation.Nullable;
 import javax.persistence.EntityManager;
@@ -1135,7 +1137,7 @@ public class ClusterImpl implements Cluster {
     return clusterDAO.getLatestConfigurationsWithTypes(clusterId, getDesiredStackVersion(), types)
       .stream()
       .map(clusterConfigEntity -> configFactory.createExisting(this, clusterConfigEntity))
-      .collect(Collectors.toList());
+      .collect(toList());
   }
 
   @Override
@@ -1581,37 +1583,35 @@ public class ClusterImpl implements Cluster {
 
   @Override
   public String getServiceForConfigTypes(Collection<String> configTypes) {
-    //debug
-    LOG.info("Looking for service for config types {}", configTypes);
-    String serviceName = null;
-    for (String configType : configTypes) {
-      for (Entry<String, String> entry : serviceConfigTypes.entries()) {
-        if (StringUtils.equals(entry.getValue(), configType)) {
-          if (serviceName != null) {
-            if (entry.getKey()!=null && !StringUtils.equals(serviceName, entry.getKey())) {
-              throw new IllegalArgumentException(String.format("Config type %s belongs to %s service, " +
-                "but also qualified for %s", configType, serviceName, entry.getKey()));
-            }
-          } else {
-            serviceName = entry.getKey();
-          }
-        }
-      }
+    List<String> serviceNames = configTypes.stream()
+      .map(this::getServiceByConfigType)
+      .filter(Objects::nonNull)
+      .collect(toList());
+    boolean allTheSame = new HashSet<>(serviceNames).size() <= 1;
+    if (!allTheSame) {
+      throw new IllegalArgumentException(String.format(
+        "Config types: %s should belong to a single installed service. But they belong to: %s", configTypes, serviceNames));
     }
-    LOG.info("Service {} returning", serviceName);
-    return serviceName;
+    return serviceNames.isEmpty() ? null : serviceNames.get(0);
+  }
+
+  public List<String> serviceNameByConfigType(String configType) {
+    return serviceConfigTypes.entries().stream()
+      .filter(entry -> StringUtils.equals(entry.getValue(), configType))
+      .map(entry -> entry.getKey())
+      .collect(toList());
   }
 
   @Override
   public String getServiceByConfigType(String configType) {
-    for (Entry<String, String> entry : serviceConfigTypes.entries()) {
-      String serviceName = entry.getKey();
-      String type = entry.getValue();
-      if (StringUtils.equals(type, configType)) {
-        return serviceName;
-      }
-    }
-    return null;
+    return serviceNameByConfigType(configType).stream()
+      .filter(this::isServiceInstalled)
+      .findFirst()
+      .orElse(null);
+  }
+
+  private boolean isServiceInstalled(String serviceName) {
+    return services.get(serviceName) != null;
   }
 
   @Override
@@ -1894,28 +1894,7 @@ public class ClusterImpl implements Cluster {
 
   @Transactional
   ServiceConfigVersionResponse applyConfigs(Set<Config> configs, String user, String serviceConfigVersionNote) {
-
-    String serviceName = null;
-    for (Config config : configs) {
-      for (Entry<String, String> entry : serviceConfigTypes.entries()) {
-        if (StringUtils.equals(entry.getValue(), config.getType())) {
-          if (serviceName == null) {
-            serviceName = entry.getKey();
-            break;
-          } else if (!serviceName.equals(entry.getKey())) {
-            String error = String.format("Updating configs for multiple services by a " +
-                "single API request isn't supported. Conflicting services %s and %s for %s",
-                                         serviceName, entry.getKey(), config.getType());
-            IllegalArgumentException exception = new IllegalArgumentException(error);
-            LOG.error(error + ", config version not created for {}", serviceName);
-            throw exception;
-          } else {
-            break;
-          }
-        }
-      }
-    }
-
+    String serviceName = getServiceForConfigTypes(configs.stream().map(Config::getType).collect(toList()));
     // update the selected flag for every config type
     ClusterEntity clusterEntity = getClusterEntity();
     Collection<ClusterConfigEntity> clusterConfigs = clusterEntity.getClusterConfigEntities();
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
index 933afa2..9e7eb17 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
@@ -732,7 +732,12 @@ public class AmbariContext {
     // iterate over topo host group configs which were defined in
     for (Map.Entry<String, Map<String, String>> entry : userProvidedGroupProperties.entrySet()) {
       String type = entry.getKey();
-      String service = stack.getServiceForConfigType(type);
+      List<String> services = stack.getServicesForConfigType(type);
+      String service = services.stream()
+        .filter(each -> topology.getBlueprint().getServices().contains(each))
+        .findFirst()
+        .orElseThrow(() -> new IllegalArgumentException("Specified configuration type is not associated with any service: " + type));
+
       Config config = configFactory.createReadOnly(type, groupName, entry.getValue(), null);
       //todo: attributes
       Map<String, Config> serviceConfigs = groupConfigs.get(service);
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java
index 4b09c6d..8b37f72 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java
@@ -1203,6 +1203,7 @@ public class ClusterTest {
   @Test
   public void testServiceConfigVersions() throws Exception {
     createDefaultCluster();
+    c1.addService("HDFS", helper.getOrCreateRepositoryVersion(new StackId("HDP", "0.1"), "0.1"));
 
     Config config1 = configFactory.createNew(c1, "hdfs-site", "version1",
       new HashMap<String, String>() {{ put("a", "b"); }}, new HashMap<>());
@@ -1261,6 +1262,7 @@ public class ClusterTest {
   @Test
   public void testSingleServiceVersionForMultipleConfigs() throws Exception {
     createDefaultCluster();
+    c1.addService("HDFS", helper.getOrCreateRepositoryVersion(new StackId("HDP", "0.1"), "0.1"));
 
     Config config1 = configFactory.createNew(c1, "hdfs-site", "version1",
       new HashMap<String, String>() {{ put("a", "b"); }}, new HashMap<>());
@@ -1383,6 +1385,7 @@ public class ClusterTest {
   public void testAllServiceConfigVersionsWithConfigGroups() throws Exception {
     // Given
     createDefaultCluster();
+    c1.addService("HDFS", helper.getOrCreateRepositoryVersion(new StackId("HDP", "0.1"), "0.1"));
 
     Config hdfsSiteConfigV1 = configFactory.createNew(c1, "hdfs-site", "version1",
         ImmutableMap.of("p1", "v1"), ImmutableMap.of());
@@ -1442,6 +1445,7 @@ public class ClusterTest {
   public void testAllServiceConfigVersionsWithDeletedConfigGroups() throws Exception {
     // Given
     createDefaultCluster();
+    c1.addService("HDFS", helper.getOrCreateRepositoryVersion(new StackId("HDP", "0.1"), "0.1"));
 
     Config hdfsSiteConfigV1 = configFactory.createNew(c1, "hdfs-site", "version1",
         ImmutableMap.of("p1", "v1"), ImmutableMap.of());
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/topology/AmbariContextTest.java b/ambari-server/src/test/java/org/apache/ambari/server/topology/AmbariContextTest.java
index 0deeae9..5128c65 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/topology/AmbariContextTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/topology/AmbariContextTest.java
@@ -18,6 +18,7 @@
 
 package org.apache.ambari.server.topology;
 
+import static java.util.Collections.singletonList;
 import static org.easymock.EasyMock.anyObject;
 import static org.easymock.EasyMock.capture;
 import static org.easymock.EasyMock.createMock;
@@ -215,7 +216,7 @@ public class AmbariContextTest {
     expect(repositoryVersion.getType()).andReturn(RepositoryType.STANDARD).atLeastOnce();
 
     expect(repositoryVersionDAO.findByStack(EasyMock.anyObject(StackId.class))).andReturn(
-        Collections.singletonList(repositoryVersion)).atLeastOnce();
+        singletonList(repositoryVersion)).atLeastOnce();
     replay(repositoryVersionDAO, repositoryVersion);
 
     context.configFactory = configFactory;
@@ -240,7 +241,7 @@ public class AmbariContextTest {
     expect(stack.getVersion()).andReturn(STACK_VERSION).anyTimes();
 
     for (Map.Entry<String, String> entry : configTypeServiceMapping.entrySet()) {
-      expect(stack.getServiceForConfigType(entry.getKey())).andReturn(entry.getValue()).anyTimes();
+      expect(stack.getServicesForConfigType(entry.getKey())).andReturn(singletonList(entry.getValue())).anyTimes();
     }
 
     expect(controller.getClusters()).andReturn(clusters).anyTimes();

-- 
To stop receiving notification emails like this one, please contact
"commits@ambari.apache.org" <co...@ambari.apache.org>.

[ambari] 01/15: AMBARI-22121. Create mpack for Isilon OneFS (amagyar)

Posted by am...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

amagyar pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/ambari.git

commit df9a638c8bc210f9c0a5494d66b17a6e98c3d7a7
Author: Attila Magyar <am...@hortonworks.com>
AuthorDate: Fri Oct 6 09:13:09 2017 +0200

    AMBARI-22121. Create mpack for Isilon OneFS (amagyar)
---
 .../management-packs/isilon-onefs-mpack/.gitignore |   1 +
 .../management-packs/isilon-onefs-mpack/pom.xml    | 110 ++++++
 .../src/main/assemblies/isilon-onefs-mpack.xml     |  40 +++
 .../addon-services/ONEFS/1.0.0/alerts.json         |  46 +++
 .../ONEFS/1.0.0/configuration/core-site.xml        | 102 ++++++
 .../ONEFS/1.0.0/configuration/hadoop-env.xml       | 386 +++++++++++++++++++++
 .../ONEFS/1.0.0/configuration/hdfs-site.xml        |  61 ++++
 .../addon-services/ONEFS/1.0.0/metainfo.xml        |  90 +++++
 .../addon-services/ONEFS/1.0.0/metrics.json        |  59 ++++
 .../ONEFS/1.0.0/package/scripts/__init__.py        |  20 ++
 .../ONEFS/1.0.0/package/scripts/onefs_client.py    |  56 +++
 .../ONEFS/1.0.0/package/scripts/params.py          |  29 ++
 .../ONEFS/1.0.0/package/scripts/params_linux.py    |  74 ++++
 .../ONEFS/1.0.0/package/scripts/params_windows.py  |  83 +++++
 .../ONEFS/1.0.0/package/scripts/service_check.py   | 137 ++++++++
 .../ONEFS/1.0.0/package/scripts/status_params.py   |  58 ++++
 .../addon-services/ONEFS/1.0.0/widgets.json        |  38 ++
 .../src/main/resources/mpack.json                  |  28 ++
 18 files changed, 1418 insertions(+)

diff --git a/contrib/management-packs/isilon-onefs-mpack/.gitignore b/contrib/management-packs/isilon-onefs-mpack/.gitignore
new file mode 100644
index 0000000..1377554
--- /dev/null
+++ b/contrib/management-packs/isilon-onefs-mpack/.gitignore
@@ -0,0 +1 @@
+*.swp
diff --git a/contrib/management-packs/isilon-onefs-mpack/pom.xml b/contrib/management-packs/isilon-onefs-mpack/pom.xml
new file mode 100644
index 0000000..5d8f215
--- /dev/null
+++ b/contrib/management-packs/isilon-onefs-mpack/pom.xml
@@ -0,0 +1,110 @@
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+  xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
+  <modelVersion>4.0.0</modelVersion>
+  <groupId>org.apache.ambari</groupId>
+  <artifactId>isilon-onefs-mpack</artifactId>
+  <packaging>pom</packaging>
+  <version>0.1.0.0-SNAPSHOT</version>
+  <name>Isilon OneFS Ambari Management Pack</name>
+  <url>http://ambari.apache.org/</url>
+  <properties>
+    <minAmbariVersion>3.0.0.0</minAmbariVersion>
+    <maxAmbariVersion></maxAmbariVersion>
+    <nifiversion>1.0.0</nifiversion>
+  </properties>
+  <parent>
+    <groupId>org.apache.ambari.contrib.mpacks</groupId>
+    <artifactId>ambari-contrib-mpacks</artifactId>
+    <version>2.0.0.0-SNAPSHOT</version>
+  </parent>
+  <build>
+    <plugins>
+      <plugin>
+        <groupId>org.codehaus.mojo</groupId>
+        <artifactId>build-helper-maven-plugin</artifactId>
+        <version>1.8</version>
+        <executions>
+          <execution>
+            <id>parse-version</id>
+            <phase>validate</phase>
+            <goals>
+              <goal>parse-version</goal>
+            </goals>
+          </execution>
+          <execution>
+            <id>regex-property</id>
+            <goals>
+              <goal>regex-property</goal>
+            </goals>
+            <configuration>
+              <name>mpackVersion</name>
+              <value>${project.version}</value>
+              <regex>^([0-9]+)\.([0-9]+)\.([0-9]+)\.([0-9]+)(\.|-).*</regex>
+              <replacement>$1.$2.$3.$4</replacement>
+              <failIfNoMatch>false</failIfNoMatch>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
+        <artifactId>maven-resources-plugin</artifactId>
+        <version>2.6</version>
+        <executions>
+          <execution>
+            <id>copy-resources</id>
+            <phase>compile</phase>
+            <goals>
+              <goal>copy-resources</goal>
+            </goals>
+            <configuration>
+              <outputDirectory>${basedir}/target/</outputDirectory>
+              <resources>
+                <resource>
+                  <directory>${basedir}/src/main/resources</directory>
+                  <includes>
+                    <include>mpack.json</include>
+                  </includes>
+                  <filtering>true</filtering>
+                </resource>
+              </resources>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
+        <artifactId>maven-assembly-plugin</artifactId>
+        <configuration>
+          <tarLongFileMode>gnu</tarLongFileMode>
+          <descriptors>
+            <descriptor>src/main/assemblies/isilon-onefs-mpack.xml</descriptor>
+          </descriptors>
+        </configuration>
+        <executions>
+          <execution>
+            <id>build-tarball</id>
+            <phase>package</phase>
+            <goals>
+              <goal>single</goal>
+            </goals>
+          </execution>
+        </executions>
+      </plugin>
+    </plugins>
+  </build>
+</project>
diff --git a/contrib/management-packs/isilon-onefs-mpack/src/main/assemblies/isilon-onefs-mpack.xml b/contrib/management-packs/isilon-onefs-mpack/src/main/assemblies/isilon-onefs-mpack.xml
new file mode 100644
index 0000000..232cf50
--- /dev/null
+++ b/contrib/management-packs/isilon-onefs-mpack/src/main/assemblies/isilon-onefs-mpack.xml
@@ -0,0 +1,40 @@
+<?xml version="1.0"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+  
+       http://www.apache.org/licenses/LICENSE-2.0
+  
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+<assembly>
+  <id></id>
+  <formats>
+    <format>dir</format>
+    <format>tar.gz</format>
+  </formats>
+  <includeBaseDirectory>true</includeBaseDirectory>
+  <fileSets>
+     <fileSet>
+      <directory>src/main/resources/addon-services</directory>
+      <outputDirectory>addon-services</outputDirectory>
+    </fileSet>
+  </fileSets>
+  <files>
+    <file>
+      <source>target/mpack.json</source>
+    </file>
+  </files>    
+  <dependencySets>
+  </dependencySets>
+</assembly>
+
diff --git a/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/alerts.json b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/alerts.json
new file mode 100644
index 0000000..5718721
--- /dev/null
+++ b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/alerts.json
@@ -0,0 +1,46 @@
+{
+  "ONEFS":{
+    "service": [
+      {
+        "name": "onefs_namenode_cpu",
+        "label": "OneFS NameNode Host CPU Utilization",
+        "description": "This host-level alert is triggered if CPU utilization of the NameNode exceeds certain warning and critical thresholds. It checks the NameNode JMX Servlet for the SystemCPULoad property. The threshold values are in percent.",
+        "interval": 1,
+        "scope": "HOST",
+        "enabled": true,
+        "source": {
+          "type": "SERVER",
+          "class": "org.apache.ambari.server.alerts.JmxServerSideAlert",
+          "uri": {
+            "http": "${hdfs-site/dfs.namenode.http-address}",
+            "https": "${hdfs-site/dfs.namenode.https-address}",
+            "https_property": "${hdfs-site/dfs.http.policy}",
+            "https_property_value": "HTTPS_ONLY",
+            "connection_timeout": 5.0
+          },
+          "reporting": {
+            "ok": {
+              "text": "{1} CPU, load {0,number,percent}"
+            },
+            "warning": {
+              "text": "{1} CPU, load {0,number,percent}",
+              "value": 5
+            },
+            "critical": {
+              "text": "{1} CPU, load {0,number,percent}",
+              "value": 20
+            },
+            "units" : "%",
+            "type": "PERCENT"
+           },
+           "jmx": {
+             "property_list": [
+              "java.lang:type=OperatingSystem/SystemCpuLoad",
+              "java.lang:type=OperatingSystem/AvailableProcessors"
+             ]
+           }
+          }
+      }
+    ]
+  }
+}
\ No newline at end of file
diff --git a/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/configuration/core-site.xml b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/configuration/core-site.xml
new file mode 100644
index 0000000..7d3acd7
--- /dev/null
+++ b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/configuration/core-site.xml
@@ -0,0 +1,102 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one or more
+    contributor license agreements.  See the NOTICE file distributed with
+    this work for additional information regarding copyright ownership.
+    The ASF licenses this file to You under the Apache License, Version 2.0
+    (the "License"); you may not use this file except in compliance with
+    the License.  You may obtain a copy of the License at
+ 
+        http://www.apache.org/licenses/LICENSE-2.0
+ 
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+ -->
+<!-- Put site-specific property overrides in this file. -->
+<configuration xmlns:xi="http://www.w3.org/2001/XInclude" supports_final="true">
+  <!-- file system properties -->
+  <property>
+    <name>fs.defaultFS</name>
+    <!-- cluster variant -->
+    <value>hdfs://localhost:8020</value>
+    <description>The name of the default file system.  Either the
+  literal string "local" or a host:port for HDFS.</description>
+    <final>true</final>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hadoop.security.authentication</name>
+    <value>simple</value>
+    <description>
+   Set the authentication for the cluster. Valid values are: simple or
+   kerberos.
+   </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>hadoop.security.authorization</name>
+    <value>false</value>
+    <description>
+     Enable authorization for different protocols.
+  </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>hadoop.security.auth_to_local</name>
+    <value>DEFAULT</value>
+    <description>The mapping from kerberos principal names to local OS mapreduce.job.user.names.
+  So the default rule is just "DEFAULT" which takes all principals in your default domain to their first component.
+  "omalley@APACHE.ORG" and "omalley/admin@APACHE.ORG" to "omalley", if your default domain is APACHE.ORG.
+The translations rules have 3 sections:
+      base     filter    substitution
+The base consists of a number that represents the number of components in the principal name excluding the realm and the pattern for building the name from the sections of the principal name. The base uses $0 to mean the realm, $1 to mean the first component and $2 to mean the second component.
+
+[1:$1@$0] translates "omalley@APACHE.ORG" to "omalley@APACHE.ORG"
+[2:$1] translates "omalley/admin@APACHE.ORG" to "omalley"
+[2:$1%$2] translates "omalley/admin@APACHE.ORG" to "omalley%admin"
+
+The filter is a regex in parens that must the generated string for the rule to apply.
+
+"(.*%admin)" will take any string that ends in "%admin"
+"(.*@ACME.COM)" will take any string that ends in "@ACME.COM"
+
+Finally, the substitution is a sed rule to translate a regex into a fixed string.
+
+"s/@ACME\.COM//" removes the first instance of "@ACME.COM".
+"s/@[A-Z]*\.COM//" removes the first instance of "@" followed by a name followed by ".COM".
+"s/X/Y/g" replaces all of the "X" in the name with "Y"
+
+So, if your default realm was APACHE.ORG, but you also wanted to take all principals from ACME.COM that had a single component "joe@ACME.COM", you'd do:
+
+RULE:[1:$1@$0](.@ACME.ORG)s/@.//
+DEFAULT
+
+To also translate the names with a second component, you'd make the rules:
+
+RULE:[1:$1@$0](.@ACME.ORG)s/@.//
+RULE:[2:$1@$0](.@ACME.ORG)s/@.//
+DEFAULT
+
+If you want to treat all principals from APACHE.ORG with /admin as "admin", your rules would look like:
+
+RULE[2:$1%$2@$0](.%admin@APACHE.ORG)s/./admin/
+DEFAULT
+    </description>
+    <value-attributes>
+      <type>multiLine</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>net.topology.script.file.name</name>
+    <value>/etc/hadoop/conf/topology_script.py</value>
+    <description>
+      Location of topology script used by Hadoop to determine the rack location of nodes.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+</configuration>
diff --git a/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/configuration/hadoop-env.xml b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/configuration/hadoop-env.xml
new file mode 100644
index 0000000..bb671cc
--- /dev/null
+++ b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/configuration/hadoop-env.xml
@@ -0,0 +1,386 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_adding_forbidden="true">
+  <property>
+    <name>hdfs_log_dir_prefix</name>
+    <value>/var/log/hadoop</value>
+    <description>Hadoop Log Dir Prefix</description>
+    <display-name>Hadoop Log Dir Prefix</display-name>
+    <value-attributes>
+      <type>directory</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>hadoop_pid_dir_prefix</name>
+    <value>/var/run/hadoop</value>
+    <display-name>Hadoop PID Dir Prefix</display-name>
+    <description>Hadoop PID Dir Prefix</description>
+    <value-attributes>
+      <type>directory</type>
+      <overridable>false</overridable>
+      <editable-only-at-install>true</editable-only-at-install>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>hadoop_root_logger</name>
+    <value>INFO,RFA</value>
+    <display-name>Hadoop Root Logger</display-name>
+    <description>Hadoop Root Logger</description>
+    <value-attributes>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>hadoop_heapsize</name>
+    <value>1024</value>
+    <description>Hadoop maximum Java heap size</description>
+    <display-name>Hadoop maximum Java heap size</display-name>
+    <value-attributes>
+      <type>int</type>
+      <unit>MB</unit>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>namenode_heapsize</name>
+    <value>1024</value>
+    <description>NameNode Java heap size</description>
+    <display-name>NameNode Java heap size</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <maximum>268435456</maximum>
+      <unit>MB</unit>
+      <increment-step>256</increment-step>
+      <overridable>false</overridable>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>hdfs-site</type>
+        <name>dfs.datanode.data.dir</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>namenode_opt_newsize</name>
+    <value>200</value>
+    <description>Default size of Java new generation for NameNode (Java option -XX:NewSize) Note: The value of NameNode new generation size (default size of Java new generation for NameNode (Java option -XX:NewSize)) should be 1/8 of maximum heap size (-Xmx). Ensure that the value of the namenode_opt_newsize property is 1/8 the value of maximum heap size (-Xmx).</description>
+    <display-name>NameNode new generation size</display-name>
+    <depends-on>
+      <property>
+        <type>hadoop-env</type>
+        <name>namenode_heapsize</name>
+      </property>
+    </depends-on>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <maximum>16384</maximum>
+      <unit>MB</unit>
+      <increment-step>256</increment-step>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>namenode_opt_maxnewsize</name>
+    <value>200</value>
+    <description>NameNode maximum new generation size</description>
+    <display-name>NameNode maximum new generation size</display-name>
+    <depends-on>
+      <property>
+        <type>hadoop-env</type>
+        <name>namenode_heapsize</name>
+      </property>
+    </depends-on>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <maximum>16384</maximum>
+      <unit>MB</unit>
+      <increment-step>256</increment-step>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>namenode_opt_permsize</name>
+    <value>128</value>
+    <description>NameNode permanent generation size</description>
+    <display-name>NameNode permanent generation size</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <maximum>2096</maximum>
+      <unit>MB</unit>
+      <increment-step>128</increment-step>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>namenode_opt_maxpermsize</name>
+    <value>256</value>
+    <description>NameNode maximum permanent generation size</description>
+    <display-name>NameNode maximum permanent generation size</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <maximum>2096</maximum>
+      <unit>MB</unit>
+      <increment-step>128</increment-step>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>dtnode_heapsize</name>
+    <value>1024</value>
+    <description>DataNode maximum Java heap size</description>
+    <display-name>DataNode maximum Java heap size</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <maximum>268435456</maximum>
+      <unit>MB</unit>
+      <increment-step>128</increment-step>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>proxyuser_group</name>
+    <display-name>Proxy User Group</display-name>
+    <value>users</value>
+    <property-type>GROUP</property-type>
+    <description>Proxy user group.</description>
+    <value-attributes>
+      <type>user</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>hdfs_user</name>
+    <display-name>HDFS User</display-name>
+    <value>hdfs</value>
+    <property-type>USER</property-type>
+    <description>User to run HDFS as</description>
+    <value-attributes>
+      <type>user</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>hdfs_tmp_dir</name>
+    <value>/tmp</value>
+    <description>HDFS tmp Dir</description>
+    <display-name>HDFS tmp Dir</display-name>
+    <property-type>NOT_MANAGED_HDFS_PATH</property-type>
+    <value-attributes>
+      <read-only>true</read-only>
+      <overridable>false</overridable>
+      <visible>false</visible>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>hdfs_user_nofile_limit</name>
+    <value>128000</value>
+    <description>Max open files limit setting for HDFS user.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>hdfs_user_nproc_limit</name>
+    <value>65536</value>
+    <description>Max number of processes limit setting for HDFS user.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>namenode_backup_dir</name>
+    <description>Local directory for storing backup copy of NameNode images during upgrade</description>
+    <value>/tmp/upgrades</value>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hdfs_user_keytab</name>
+    <description>HDFS keytab path</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>hdfs_principal_name</name>
+    <description>HDFS principal name</description>
+    <property-type>KERBEROS_PRINCIPAL</property-type>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <!-- hadoop-env.sh -->
+  <property>
+    <name>content</name>
+    <display-name>hadoop-env template</display-name>
+    <description>This is the jinja template for hadoop-env.sh file</description>
+    <value>
+# Set Hadoop-specific environment variables here.
+
+# The only required environment variable is JAVA_HOME.  All others are
+# optional.  When running a distributed configuration it is best to
+# set JAVA_HOME in this file, so that it is correctly defined on
+# remote nodes.
+
+# The java implementation to use.  Required.
+export JAVA_HOME={{java_home}}
+export HADOOP_HOME_WARN_SUPPRESS=1
+
+# Hadoop home directory
+export HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop}
+
+# Hadoop Configuration Directory
+#TODO: if env var set that can cause problems
+export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-{{hadoop_conf_dir}}}
+
+
+# Path to jsvc required by secure datanode
+export JSVC_HOME={{jsvc_path}}
+
+
+# The maximum amount of heap to use, in MB. Default is 1000.
+if [[ ("$SERVICE" = "hiveserver2") || ("$SERVICE" = "metastore") || ( "$SERVICE" = "cli") ]]; then
+  if [ "$HADOOP_HEAPSIZE" = "" ]; then
+    export HADOOP_HEAPSIZE="{{hadoop_heapsize}}"
+  fi
+else
+  export HADOOP_HEAPSIZE="{{hadoop_heapsize}}"
+fi
+
+
+export HADOOP_NAMENODE_INIT_HEAPSIZE="-Xms{{namenode_heapsize}}"
+
+# Extra Java runtime options.  Empty by default.
+export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}"
+
+# Command specific options appended to HADOOP_OPTS when specified
+
+{% if java_version &lt; 8 %}
+export HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70  [...]
+export HADOOP_SECONDARYNAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFra [...]
+
+# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
+export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS"
+{% else %}
+export HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize [...]
+export HADOOP_SECONDARYNAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{n [...]
+
+# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
+export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS"
+{% endif %}
+HADOOP_JOBTRACKER_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger [...]
+
+HADOOP_TASKTRACKER_OPTS="-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}"
+HADOOP_DATANODE_OPTS="-XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -XX:ConcGCThreads=4 -XX:+UseConcMarkSweepGC -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_DATANODE_OPTS}"
+HADOOP_BALANCER_OPTS="-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}"
+
+# On secure datanodes, user to run the datanode as after dropping privileges
+export HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}
+
+# Extra ssh options.  Empty by default.
+export HADOOP_SSH_OPTS="-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR"
+
+# Where log files are stored.  $HADOOP_HOME/logs by default.
+export HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER
+
+# History server logs
+export HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER
+
+# Where log files are stored in the secure data environment.
+export HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER
+
+# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.
+# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves
+
+# host:path where hadoop code should be rsync'd from.  Unset by default.
+# export HADOOP_MASTER=master:/home/$USER/src/hadoop
+
+# Seconds to sleep between slave commands.  Unset by default.  This
+# can be useful in large clusters, where, e.g., slave rsyncs can
+# otherwise arrive faster than the master can service them.
+# export HADOOP_SLAVE_SLEEP=0.1
+
+# The directory where pid files are stored. /tmp by default.
+export HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER
+export HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER
+
+# History server pid
+export HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER
+
+YARN_RESOURCEMANAGER_OPTS="-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY -Drm.audit.logger=INFO,RMAUDIT"
+
+# A string representing this instance of hadoop. $USER by default.
+export HADOOP_IDENT_STRING=$USER
+
+# The scheduling priority for daemon processes.  See 'man nice'.
+
+# export HADOOP_NICENESS=10
+
+# Add database libraries
+JAVA_JDBC_LIBS=""
+if [ -d "/usr/share/java" ]; then
+  for jarFile in `ls /usr/share/java | grep -E "(mysql|ojdbc|postgresql|sqljdbc)" 2&gt;/dev/null`
+  do
+    JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile
+  done
+fi
+
+# Add libraries required by nodemanager
+MAPREDUCE_LIBS={{mapreduce_libs_path}}
+
+# Add libraries to the hadoop classpath - some may not need a colon as they already include it
+export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}
+
+if [ -d "/usr/lib/tez" ]; then
+  export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:/usr/lib/tez/*:/usr/lib/tez/lib/*:/etc/tez/conf
+fi
+
+# Setting path to hdfs command line
+export HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}
+
+#Mostly required for hadoop 2.0
+export JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:{{hadoop_lib_home}}/native/Linux-{{architecture}}-64
+
+{% if is_datanode_max_locked_memory_set %}
+# Fix temporary bug, when ulimit from conf files is not picked up, without full relogin. 
+# Makes sense to fix only when runing DN as root 
+if [ "$command" == "datanode" ] &amp;&amp; [ "$EUID" -eq 0 ] &amp;&amp; [ -n "$HADOOP_SECURE_DN_USER" ]; then
+  ulimit -l {{datanode_max_locked_memory}}
+fi
+{% endif %}
+    </value>
+    <value-attributes>
+      <type>content</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+</configuration>
diff --git a/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/configuration/hdfs-site.xml b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/configuration/hdfs-site.xml
new file mode 100644
index 0000000..cb6544f
--- /dev/null
+++ b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/configuration/hdfs-site.xml
@@ -0,0 +1,61 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<!-- Put site-specific property overrides in this file. -->
+<configuration supports_final="true">
+  <property>
+    <name>dfs.webhdfs.enabled</name>
+    <value>false</value>
+    <display-name>WebHDFS enabled</display-name>
+    <description>Whether to enable WebHDFS feature</description>
+    <final>true</final>
+    <value-attributes>
+      <type>boolean</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>dfs.datanode.http.address</name>
+    <value>0.0.0.0:8082</value>
+    <description>
+      The datanode http server address and port.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>dfs.namenode.http-address</name>
+    <value>localhost:8082</value>
+    <description>The name of the default file system.  Either the
+      literal string "local" or a host:port for HDFS.</description>
+    <final>true</final>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>dfs.namenode.https-address</name>
+    <value>localhost:8080</value>
+    <description>The https address where namenode binds</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>dfs.client-write-packet-size</name>
+    <value>131072</value>
+    <description>Packet size for clients to write</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>
diff --git a/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/metainfo.xml b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/metainfo.xml
new file mode 100644
index 0000000..f20bcf8
--- /dev/null
+++ b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/metainfo.xml
@@ -0,0 +1,90 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>ONEFS</name>
+      <displayName>OneFS</displayName>
+      <serviceType>HCFS</serviceType>
+      <comment>Isilon Systems OneFS</comment>
+      <version>1.0.0</version>
+      <components>
+       <component>
+          <name>ONEFS_CLIENT</name>
+          <displayName>OneFS Client</displayName>
+          <category>CLIENT</category>
+          <componentType>HCFS_CLIENT</componentType>
+          <cardinality>1+</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <commandScript>
+            <script>scripts/onefs_client.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>1200</timeout>
+          </commandScript>
+          <configFiles>
+            <configFile>
+              <type>xml</type>
+              <fileName>hdfs-site.xml</fileName>
+              <dictionaryName>hdfs-site</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>xml</type>
+              <fileName>core-site.xml</fileName>
+              <dictionaryName>core-site</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>env</type>
+              <fileName>log4j.properties</fileName>
+              <dictionaryName>hdfs-log4j,yarn-log4j</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>env</type>
+              <fileName>hadoop-env.sh</fileName>
+              <dictionaryName>hadoop-env</dictionaryName>
+            </configFile>
+          </configFiles>
+        </component>
+      </components>
+
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>hadoop-client</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <configuration-dependencies>
+        <config-type>core-site</config-type>
+        <config-type>hdfs-site</config-type>
+        <config-type>hadoop-env</config-type>
+      </configuration-dependencies>
+      <restartRequiredAfterRackChange>true</restartRequiredAfterRackChange>
+    </service>
+  </services>
+</metainfo>
\ No newline at end of file
diff --git a/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/metrics.json b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/metrics.json
new file mode 100644
index 0000000..cd705eb
--- /dev/null
+++ b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/metrics.json
@@ -0,0 +1,59 @@
+{
+  "ONEFS_CLIENT": {
+    "Component": [
+      {
+        "type": "ganglia",
+        "metrics": {
+          "default": {
+            "metrics/onefs/demo/counter": {
+              "metric": "onefs.demo.counter",
+              "pointInTime": true,
+              "temporal": true
+            }
+          }
+        }
+      },
+      {
+        "type": "jmx",
+        "properties": {
+          "url_config_type": "hdfs-site",
+          "url_property_name": "dfs.namenode.http-address"
+        },
+        "metrics": {
+          "default": {
+            "metrics/dfs/namenode/Used": {
+              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Used",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/memHeapUsedM": {
+              "metric": "jvm.JvmMetrics.MemHeapUsedM",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/HeapMemoryMax": {
+              "metric": "java.lang:type=Memory.HeapMemoryUsage[max]",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/HeapMemoryUsed": {
+              "metric": "java.lang:type=Memory.HeapMemoryUsage[used]",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/NonHeapMemoryMax": {
+              "metric": "java.lang:type=Memory.NonHeapMemoryUsage[max]",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/NonHeapMemoryUsed": {
+              "metric": "java.lang:type=Memory.NonHeapMemoryUsage[used]",
+              "pointInTime": true,
+              "temporal": false
+            }
+          }
+        }
+      }
+    ]
+  }
+}
diff --git a/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/package/scripts/__init__.py b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/package/scripts/__init__.py
new file mode 100644
index 0000000..35de4bb
--- /dev/null
+++ b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/package/scripts/__init__.py
@@ -0,0 +1,20 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
diff --git a/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/package/scripts/onefs_client.py b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/package/scripts/onefs_client.py
new file mode 100644
index 0000000..dbf1331
--- /dev/null
+++ b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/package/scripts/onefs_client.py
@@ -0,0 +1,56 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from resource_management.core.exceptions import ClientComponentHasNoStatus
+from resource_management.libraries.script import Script
+from resource_management.libraries.resources.xml_config import XmlConfig
+
+class OneFsClient(Script):
+
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env):
+    self.setup_config(env)
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+
+  def setup_config(self, env):
+    import params
+    env.set_params(params)
+    XmlConfig("hdfs-site.xml",
+            conf_dir=params.hadoop_conf_dir,
+            configurations=params.config['configurations']['hdfs-site'],
+            configuration_attributes=params.config['configuration_attributes']['hdfs-site'],
+            owner=params.hdfs_user,
+            group=params.user_group
+    )
+    XmlConfig("core-site.xml",
+            conf_dir=params.hadoop_conf_dir,
+            configurations=params.config['configurations']['core-site'],
+            configuration_attributes=params.config['configuration_attributes']['core-site'],
+            owner=params.hdfs_user,
+            group=params.user_group,
+            mode=0644
+    )
+
+if __name__ == "__main__":
+  OneFsClient().execute()
+
diff --git a/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/package/scripts/params.py b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/package/scripts/params.py
new file mode 100644
index 0000000..838510c
--- /dev/null
+++ b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/package/scripts/params.py
@@ -0,0 +1,29 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from ambari_commons import OSCheck
+from resource_management.libraries.functions.default import default
+
+if OSCheck.is_windows_family():
+  from params_windows import *
+else:
+  from params_linux import *
+
+nfsgateway_heapsize = config['configurations']['hadoop-env']['nfsgateway_heapsize']
+retryAble = default("/commandParams/command_retry_enabled", False)
+script_https_protocol = Script.get_force_https_protocol_name()
\ No newline at end of file
diff --git a/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/package/scripts/params_linux.py b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/package/scripts/params_linux.py
new file mode 100644
index 0000000..72e0ae9
--- /dev/null
+++ b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/package/scripts/params_linux.py
@@ -0,0 +1,74 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management.libraries.functions.version import format_stack_version, compare_versions
+from resource_management import *
+import os
+import itertools
+import re
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
+from resource_management.libraries.resources.hdfs_resource import HdfsResource
+from resource_management.libraries.functions import stack_select
+
+config = Script.get_config()
+
+hostname = config["hostname"]
+hadoop_conf_dir = conf_select.get_hadoop_conf_dir(force_latest_on_upgrade=True)
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+user_group = config['configurations']['cluster-env']['user_group']
+hdfs_tmp_dir = config['configurations']['hadoop-env']['hdfs_tmp_dir']
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+
+namenode_host = default("/clusterHostInfo/namenode_host", [])
+journalnode_hosts = default("/clusterHostInfo/journalnode_hosts", [])
+zkfc_hosts = default("/clusterHostInfo/zkfc_hosts", [])
+
+has_journalnode_hosts = not len(journalnode_hosts)  == 0
+has_zkfc_hosts = not len(zkfc_hosts)  == 0
+is_namenode_master = hostname in namenode_host
+
+dfs_type = default("/commandParams/dfs_type", "")
+hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
+kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
+hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
+hdfs_principal_name = default('/configurations/hadoop-env/hdfs_principal_name', None)
+hdfs_site = config['configurations']['hdfs-site']
+default_fs = config['configurations']['core-site']['fs.defaultFS']
+
+import functools
+#create partial functions with common arguments for every HdfsResource call
+#to create/delete/copyfromlocal hdfs directories/files we need to call params.HdfsResource in code
+HdfsResource = functools.partial(
+  HdfsResource,
+  user=hdfs_user,
+  hdfs_resource_ignore_file = "/var/lib/ambari-agent/data/.hdfs_resource_ignore",
+  security_enabled = security_enabled,
+  keytab = hdfs_user_keytab,
+  kinit_path_local = kinit_path_local,
+  hadoop_bin_dir = hadoop_bin_dir,
+  hadoop_conf_dir = hadoop_conf_dir,
+  principal_name = hdfs_principal_name,
+  hdfs_site = hdfs_site,
+  default_fs = default_fs,
+  immutable_paths = get_not_managed_resources(),
+  dfs_type = dfs_type
+)
+
diff --git a/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/package/scripts/params_windows.py b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/package/scripts/params_windows.py
new file mode 100644
index 0000000..3e712b3
--- /dev/null
+++ b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/package/scripts/params_windows.py
@@ -0,0 +1,83 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+
+#Used in subsequent imports from params
+from install_params import exclude_packages
+from status_params import *
+
+config = Script.get_config()
+hadoop_conf_dir = None
+hbase_conf_dir = None
+hadoop_home = None
+try:
+  hadoop_conf_dir = os.environ["HADOOP_CONF_DIR"]
+  hbase_conf_dir = os.environ["HBASE_CONF_DIR"]
+  hadoop_home = os.environ["HADOOP_HOME"]
+except:
+  pass
+#directories & files
+dfs_name_dir = config['configurations']['hdfs-site']['dfs.namenode.name.dir']
+fs_checkpoint_dir = config['configurations']['hdfs-site']['dfs.namenode.checkpoint.dir']
+dfs_data_dir = config['configurations']['hdfs-site']['dfs.datanode.data.dir']
+#decomission
+hdfs_exclude_file = default("/clusterHostInfo/decom_dn_hosts", [])
+exclude_file_path = config['configurations']['hdfs-site']['dfs.hosts.exclude']
+include_file_path = default("/configurations/hdfs-site/dfs.hosts", None)
+hdfs_include_file = None
+manage_include_files = default("/configurations/hdfs-site/manage.include.files", False)
+if include_file_path and manage_include_files:
+  slave_hosts = default("/clusterHostInfo/slave_hosts", [])
+  hdfs_include_file = list(set(slave_hosts) - set(hdfs_exclude_file))
+update_files_only = default("/commandParams/update_files_only",False)
+# HDFS High Availability properties
+dfs_ha_enabled = False
+dfs_ha_nameservices = default("/configurations/hdfs-site/dfs.internal.nameservices", None)
+dfs_ha_namenode_ids = default(format("/configurations/hdfs-site/dfs.ha.namenodes.{dfs_ha_nameservices}"), None)
+
+namenode_id = None
+namenode_rpc = None
+hostname = config["hostname"]
+if dfs_ha_namenode_ids:
+  dfs_ha_namemodes_ids_list = dfs_ha_namenode_ids.split(",")
+  dfs_ha_namenode_ids_array_len = len(dfs_ha_namemodes_ids_list)
+  if dfs_ha_namenode_ids_array_len > 1:
+    dfs_ha_enabled = True
+if dfs_ha_enabled:
+  for nn_id in dfs_ha_namemodes_ids_list:
+    nn_host = config['configurations']['hdfs-site'][format('dfs.namenode.rpc-address.{dfs_ha_nameservices}.{nn_id}')]
+    if hostname.lower() in nn_host.lower():
+      namenode_id = nn_id
+      namenode_rpc = nn_host
+
+hadoop_user = config["configurations"]["cluster-env"]["hadoop.user.name"]
+hdfs_user = hadoop_user
+
+grep_exe = "findstr"
+
+name_node_params = default("/commandParams/namenode", None)
+
+service_map = {
+  "datanode" : datanode_win_service_name,
+  "journalnode" : journalnode_win_service_name,
+  "namenode" : namenode_win_service_name,
+  "secondarynamenode" : snamenode_win_service_name,
+  "zkfc_slave": zkfc_win_service_name
+}
diff --git a/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/package/scripts/service_check.py b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/package/scripts/service_check.py
new file mode 100644
index 0000000..3d798a3
--- /dev/null
+++ b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/package/scripts/service_check.py
@@ -0,0 +1,137 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from resource_management.core.shell import as_user
+from ambari_commons.os_family_impl import OsFamilyImpl
+from ambari_commons import OSConst
+from resource_management.libraries.functions.curl_krb_request import curl_krb_request
+from resource_management.core.logger import Logger
+
+class HdfsServiceCheck(Script):
+  pass
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class HdfsServiceCheckDefault(HdfsServiceCheck):
+  def service_check(self, env):
+    import params
+
+    env.set_params(params)
+    unique = functions.get_unique_id_and_date()
+    dir = params.hdfs_tmp_dir
+    tmp_file = format("{dir}/{unique}")
+
+    if params.security_enabled:
+      Execute(format("{kinit_path_local} -kt {hdfs_user_keytab} {hdfs_principal_name}"),
+        user=params.hdfs_user
+      )
+    params.HdfsResource(dir,
+                        type="directory",
+                        action="create_on_execute",
+                        mode=0777
+    )
+    params.HdfsResource(tmp_file,
+                        type="file",
+                        action="delete_on_execute",
+    )
+
+    params.HdfsResource(tmp_file,
+                        type="file",
+                        source="/etc/passwd",
+                        action="create_on_execute"
+    )
+    params.HdfsResource(None, action="execute")
+
+    if params.has_journalnode_hosts:
+      if params.security_enabled:
+        for host in params.journalnode_hosts:
+          if params.https_only:
+            uri = format("https://{host}:{journalnode_port}")
+          else:
+            uri = format("http://{host}:{journalnode_port}")
+          response, errmsg, time_millis = curl_krb_request(params.tmp_dir, params.smoke_user_keytab,
+                                                           params.smokeuser_principal, uri, "jn_service_check",
+                                                           params.kinit_path_local, False, None, params.smoke_user)
+          if not response:
+            Logger.error("Cannot access WEB UI on: {0}. Error : {1}", uri, errmsg)
+            return 1
+      else:
+        journalnode_port = params.journalnode_port
+        checkWebUIFileName = "checkWebUI.py"
+        checkWebUIFilePath = format("{tmp_dir}/{checkWebUIFileName}")
+        comma_sep_jn_hosts = ",".join(params.journalnode_hosts)
+
+        checkWebUICmd = format("ambari-python-wrap {checkWebUIFilePath} -m {comma_sep_jn_hosts} -p {journalnode_port} -s {https_only} -o {script_https_protocol}")
+        File(checkWebUIFilePath,
+             content=StaticFile(checkWebUIFileName),
+             mode=0775)
+
+        Execute(checkWebUICmd,
+                logoutput=True,
+                try_sleep=3,
+                tries=5,
+                user=params.smoke_user
+        )
+
+    if params.is_namenode_master:
+      if params.has_zkfc_hosts:
+        pid_dir = format("{hadoop_pid_dir_prefix}/{hdfs_user}")
+        pid_file = format("{pid_dir}/hadoop-{hdfs_user}-zkfc.pid")
+        check_zkfc_process_cmd = as_user(format(
+          "ls {pid_file} >/dev/null 2>&1 && ps -p `cat {pid_file}` >/dev/null 2>&1"), user=params.hdfs_user)
+        Execute(check_zkfc_process_cmd,
+                logoutput=True,
+                try_sleep=3,
+                tries=5
+        )
+
+@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
+class HdfsServiceCheckWindows(HdfsServiceCheck):
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+
+    unique = functions.get_unique_id_and_date()
+
+    #Hadoop uses POSIX-style paths, separator is always /
+    dir = params.hdfs_tmp_dir
+    tmp_file = dir + '/' + unique
+
+    #commands for execution
+    hadoop_cmd = "cmd /C %s" % (os.path.join(params.hadoop_home, "bin", "hadoop.cmd"))
+    create_dir_cmd = "%s fs -mkdir %s" % (hadoop_cmd, dir)
+    own_dir = "%s fs -chmod 777 %s" % (hadoop_cmd, dir)
+    test_dir_exists = "%s fs -test -e %s" % (hadoop_cmd, dir)
+    cleanup_cmd = "%s fs -rm %s" % (hadoop_cmd, tmp_file)
+    create_file_cmd = "%s fs -put %s %s" % (hadoop_cmd, os.path.join(params.hadoop_conf_dir, "core-site.xml"), tmp_file)
+    test_cmd = "%s fs -test -e %s" % (hadoop_cmd, tmp_file)
+
+    hdfs_cmd = "cmd /C %s" % (os.path.join(params.hadoop_home, "bin", "hdfs.cmd"))
+    safemode_command = "%s dfsadmin -safemode get | %s OFF" % (hdfs_cmd, params.grep_exe)
+
+    Execute(safemode_command, logoutput=True, try_sleep=3, tries=20)
+    Execute(create_dir_cmd, user=params.hdfs_user,logoutput=True, ignore_failures=True)
+    Execute(own_dir, user=params.hdfs_user,logoutput=True)
+    Execute(test_dir_exists, user=params.hdfs_user,logoutput=True)
+    Execute(create_file_cmd, user=params.hdfs_user,logoutput=True)
+    Execute(test_cmd, user=params.hdfs_user,logoutput=True)
+    Execute(cleanup_cmd, user=params.hdfs_user,logoutput=True)
+
+if __name__ == "__main__":
+  HdfsServiceCheck().execute()
diff --git a/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/package/scripts/status_params.py b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/package/scripts/status_params.py
new file mode 100644
index 0000000..153f9a6
--- /dev/null
+++ b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/package/scripts/status_params.py
@@ -0,0 +1,58 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from ambari_commons import OSCheck
+
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.script.script import Script
+
+config = Script.get_config()
+
+if OSCheck.is_windows_family():
+  namenode_win_service_name = "namenode"
+  datanode_win_service_name = "datanode"
+  snamenode_win_service_name = "secondarynamenode"
+  journalnode_win_service_name = "journalnode"
+  zkfc_win_service_name = "zkfc"
+else:
+  hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
+  hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+  hadoop_pid_dir = format("{hadoop_pid_dir_prefix}/{hdfs_user}")
+  datanode_pid_file = format("{hadoop_pid_dir}/hadoop-{hdfs_user}-datanode.pid")
+  namenode_pid_file = format("{hadoop_pid_dir}/hadoop-{hdfs_user}-namenode.pid")
+  snamenode_pid_file = format("{hadoop_pid_dir}/hadoop-{hdfs_user}-secondarynamenode.pid")
+  journalnode_pid_file = format("{hadoop_pid_dir}/hadoop-{hdfs_user}-journalnode.pid")
+  zkfc_pid_file = format("{hadoop_pid_dir}/hadoop-{hdfs_user}-zkfc.pid")
+  nfsgateway_pid_file = format("{hadoop_pid_dir_prefix}/root/hadoop_privileged_nfs3.pid")
+
+  # Security related/required params
+  hostname = config['hostname']
+  security_enabled = config['configurations']['cluster-env']['security_enabled']
+  hdfs_user_principal = config['configurations']['hadoop-env']['hdfs_principal_name']
+  hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
+
+  hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
+
+  kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+  tmp_dir = Script.get_tmp_dir()
+
+stack_name = default("/hostLevelParams/stack_name", None)
\ No newline at end of file
diff --git a/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/widgets.json b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/widgets.json
new file mode 100644
index 0000000..23da1d6
--- /dev/null
+++ b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/widgets.json
@@ -0,0 +1,38 @@
+{
+  "layouts": [
+    {
+      "layout_name": "default_onefs_layout",
+      "display_name": "Standard ONEFS Dashboard",
+      "section_name": "ONEFS_SUMMARY",
+      "widgetLayoutInfo": [
+        {
+          "widget_name": "OneFS metrics demo",
+          "description": "Test widget",
+          "default_section_name": "ONEFS_SUMMARY",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "scope" : "SERVICE",
+          "metrics": [
+            {
+              "name": "counter",
+              "metric_path": "metrics/onefs/demo/counter",
+              "service_name": "ONEFS",
+              "component_name": "ONEFS_CLUSTER"
+            }
+          ],
+          "values": [
+            {
+              "name": "demo value",
+              "values" : "${counter}"
+            }
+          ],
+          "properties": {
+            "graph_type": "LINE",
+            "time_range": "1",
+            "display_unit": "%"
+          }
+        }
+      ]
+    }
+  ]
+}
\ No newline at end of file
diff --git a/contrib/management-packs/isilon-onefs-mpack/src/main/resources/mpack.json b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/mpack.json
new file mode 100644
index 0000000..f15fcb6
--- /dev/null
+++ b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/mpack.json
@@ -0,0 +1,28 @@
+{
+  "type" : "full-release",
+  "name" : "onefs-ambari-mpack",
+  "version": "0.1",
+  "description" : "OneFS Ambari Management Pack",
+  "prerequisites": {
+    "min-ambari-version" : "3.0.0.0"
+  },
+  "artifacts": [
+    {
+      "name" : "ONEFS-addon-services",
+      "type" : "stack-addon-service-definitions",
+      "source_dir" : "addon-services",
+      "service_versions_map" : [
+          {
+             "service_name" : "ONEFS",
+             "service_version" : "1.0.0",
+             "applicable_stacks" : [
+                 {
+                      "stack_name" : "HDP", "stack_version" : "2.6"
+                  }
+              ]
+          }
+      ]
+    }
+  ]
+}
+

-- 
To stop receiving notification emails like this one, please contact
"commits@ambari.apache.org" <co...@ambari.apache.org>.

[ambari] 02/15: AMBARI-22115. Alerts for OneFS mpack (amagyar)

Posted by am...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

amagyar pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/ambari.git

commit f1758bc9f931b578bb63b345542551ef6be32427
Author: Attila Magyar <am...@hortonworks.com>
AuthorDate: Thu Oct 12 15:21:13 2017 +0200

    AMBARI-22115. Alerts for OneFS mpack (amagyar)
---
 ambari-server/docs/configuration/index.md          |   1 +
 .../ambari/server/alerts/JmxServerSideAlert.java   | 104 +++++++++++++++++++++
 .../org/apache/ambari/server/alerts/Threshold.java |  73 +++++++++++++++
 .../ambari/server/configuration/Configuration.java |  11 +++
 .../ambari/server/controller/ControllerModule.java |   1 +
 .../server/controller/jmx/JMXMetricHolder.java     |  27 ++++++
 .../java/org/apache/ambari/server/state/Alert.java |   1 +
 .../apache/ambari/server/state/alert/AlertUri.java |  41 ++++++++
 .../ambari/server/state/alert/MetricSource.java    |   7 ++
 .../ambari/server/state/alert/Reporting.java       |  43 +++++++++
 .../ambari/server/state/alert/ServerSource.java    |  15 +++
 .../state/services/AmbariServerAlertService.java   |   7 +-
 .../apache/ambari/server/alerts/ThresholdTest.java |  90 ++++++++++++++++++
 .../server/controller/jmx/JMXMetricHolderTest.java |  61 ++++++++++++
 .../ambari/server/state/alert/AlertUriTest.java    |  60 ++++++++++++
 15 files changed, 541 insertions(+), 1 deletion(-)

diff --git a/ambari-server/docs/configuration/index.md b/ambari-server/docs/configuration/index.md
index 9dbe9c4..8f1531b 100644
--- a/ambari-server/docs/configuration/index.md
+++ b/ambari-server/docs/configuration/index.md
@@ -57,6 +57,7 @@ The following are the properties which can be used to configure Ambari.
 | alerts.execution.scheduler.threadpool.size.core | The core number of threads used to process incoming alert events. The value should be increased as the size of the cluster increases. |`2` | 
 | alerts.execution.scheduler.threadpool.size.max | The number of threads used to handle alerts received from the Ambari Agents. The value should be increased as the size of the cluster increases. |`2` | 
 | alerts.execution.scheduler.threadpool.worker.size | The number of queued alerts allowed before discarding old alerts which have not been handled. The value should be increased as the size of the cluster increases. |`2000` | 
+| alerts.server.side.scheduler.threadpool.size.core | The core pool size of the executor service that runs server side alerts. |`4` | 
 | alerts.snmp.dispatcher.udp.port | The UDP port to use when binding the SNMP dispatcher on Ambari Server startup. If no port is specified, then a random port will be used. | | 
 | alerts.template.file | The full path to the XML file that describes the different alert templates. | | 
 | ambari.display.url | The URL to use when creating messages which should include the Ambari Server URL.<br/><br/>The following are examples of valid values:<ul><li>`http://ambari.apache.org:8080`</ul> | | 
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/alerts/JmxServerSideAlert.java b/ambari-server/src/main/java/org/apache/ambari/server/alerts/JmxServerSideAlert.java
new file mode 100644
index 0000000..a4b86f8
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/alerts/JmxServerSideAlert.java
@@ -0,0 +1,104 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.alerts;
+
+import static java.util.Collections.emptyList;
+import static java.util.Collections.singletonList;
+
+import java.net.URI;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.configuration.ComponentSSLConfiguration;
+import org.apache.ambari.server.controller.internal.URLStreamProvider;
+import org.apache.ambari.server.controller.jmx.JMXMetricHolder;
+import org.apache.ambari.server.orm.entities.AlertDefinitionEntity;
+import org.apache.ambari.server.state.Alert;
+import org.apache.ambari.server.state.Cluster;
+import org.apache.ambari.server.state.ConfigHelper;
+import org.apache.ambari.server.state.alert.AlertDefinition;
+import org.apache.ambari.server.state.alert.AlertDefinitionFactory;
+import org.apache.ambari.server.state.alert.MetricSource;
+import org.apache.ambari.server.state.alert.Reporting;
+import org.apache.ambari.server.state.alert.ServerSource;
+import org.apache.ambari.server.state.services.MetricsRetrievalService;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.inject.Inject;
+
+/**
+ * I represent a "SERVER" {@link org.apache.ambari.server.state.alert.SourceType} alert
+ * which can pull JMX metrics from a remote cluster.
+ */
+public class JmxServerSideAlert extends AlertRunnable {
+  private final static Logger LOG = LoggerFactory.getLogger(JmxServerSideAlert.class);
+  @Inject
+  private AlertDefinitionFactory definitionFactory;
+  @Inject
+  private MetricsRetrievalService metricsRetrievalService;
+  @Inject
+  private ConfigHelper configHelper;
+
+  public JmxServerSideAlert(String definitionName) {
+    super(definitionName);
+  }
+
+  @Override
+  List<Alert> execute(Cluster cluster, AlertDefinitionEntity entity) throws AmbariException {
+    AlertDefinition alertDef = definitionFactory.coerce(entity);
+    ServerSource serverSource = (ServerSource) alertDef.getSource();
+    URI jmxUrl = jmxUrl(cluster, serverSource);
+    JMXMetricHolder metricHolder = jmxMetric(serverSource, jmxUrl);
+    return metricHolder == null
+      ? emptyList()
+      : alerts(alertDef, serverSource.getJmxInfo(), metricHolder, serverSource.getReporting());
+  }
+
+  private URI jmxUrl(Cluster cluster, ServerSource serverSource) throws AmbariException {
+    return serverSource.getUri().resolve(config(cluster)).resolve(serverSource.getJmxInfo().getUrlSuffix());
+  }
+
+  private Map<String, Map<String, String>> config(Cluster cluster) throws AmbariException {
+    return configHelper.getEffectiveConfigProperties(cluster, configHelper.getEffectiveDesiredTags(cluster, null));
+  }
+
+  private JMXMetricHolder jmxMetric(ServerSource serverSource, URI jmxUri) {
+    URLStreamProvider streamProvider = new URLStreamProvider(
+      serverSource.getUri().getConnectionTimeoutMsec(),
+      serverSource.getUri().getReadTimeoutMsec(),
+      ComponentSSLConfiguration.instance());
+    metricsRetrievalService.submitRequest(MetricsRetrievalService.MetricSourceType.JMX, streamProvider, jmxUri.toString());
+    return metricsRetrievalService.getCachedJMXMetric(jmxUri.toString());
+  }
+
+  private List<Alert> alerts(AlertDefinition alertDef, MetricSource.JmxInfo jmxInfo, JMXMetricHolder jmxMetricHolder, Reporting reporting) throws AmbariException {
+    List<Object> metrics = jmxMetricHolder.findAll(jmxInfo.getPropertyList());
+    if (metrics.isEmpty()) {
+      return emptyList();
+    }
+    if (metrics.get(0) instanceof Number) {
+      Alert alert = reporting.alert(((Number) metrics.get(0)).doubleValue(), metrics, alertDef);
+      return singletonList(alert);
+    } else {
+      LOG.info("Unsupported metrics value: {} when running alert: {}", metrics.get(0), alertDef);
+      return emptyList();
+    }
+  }
+}
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/alerts/Threshold.java b/ambari-server/src/main/java/org/apache/ambari/server/alerts/Threshold.java
new file mode 100644
index 0000000..ae33eeb
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/alerts/Threshold.java
@@ -0,0 +1,73 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.alerts;
+
+import javax.annotation.Nullable;
+
+import org.apache.ambari.server.state.AlertState;
+
+/**
+ * I'm a 3 level threshold where each level corresponds to ok, warn and critical levels.
+ * The levels are either increasing or decreasing numerical sequences.
+ * My main responsibility is to map incoming values to {@link AlertState} based on the threshold levels.
+ */
+public class Threshold {
+  private final Double okValue;
+  private final double warnValue;
+  private final double critValue;
+
+  public Threshold(@Nullable Double okValue, double warnValue, double critValue) {
+    this.okValue = okValue;
+    this.warnValue = warnValue;
+    this.critValue = critValue;
+  }
+
+  public AlertState state(double value) {
+    return directionUp() ? stateWhenDirectionUp(value) : stateWhenDirectionDown(value);
+  }
+
+  private boolean directionUp() {
+    return critValue >= warnValue;
+  }
+
+  private AlertState stateWhenDirectionUp(double value) {
+    if (value >= critValue) {
+      return AlertState.CRITICAL;
+    }
+    if (value >= warnValue) {
+      return AlertState.WARNING;
+    }
+    if (okValue == null || value >= okValue) {
+      return AlertState.OK;
+    }
+    return AlertState.UNKNOWN;
+  }
+
+  private AlertState stateWhenDirectionDown(double value) {
+    if (value <= critValue) {
+      return AlertState.CRITICAL;
+    }
+    if (value <= warnValue) {
+      return AlertState.WARNING;
+    }
+    if (okValue == null || value <= okValue) {
+      return AlertState.OK;
+    }
+    return AlertState.UNKNOWN;
+  }
+}
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java b/ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java
index 7003c98..09d5799 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java
@@ -2786,6 +2786,13 @@ public class Configuration {
   @Markdown(description = "Whether security password encryption is enabled or not. In case it is we store passwords in their own file(s); otherwise we store passwords in the Ambari credential store.")
   public static final ConfigurationProperty<Boolean> SECURITY_PASSWORD_ENCRYPTON_ENABLED = new ConfigurationProperty<Boolean>("security.passwords.encryption.enabled", false);
 
+  /**
+   * The core pool size of the executor service that runs server side alerts.
+   */
+  @Markdown(description = "The core pool size of the executor service that runs server side alerts.")
+  public static final ConfigurationProperty<Integer> SERVER_SIDE_ALERTS_CORE_POOL_SIZE = new ConfigurationProperty<>(
+          "alerts.server.side.scheduler.threadpool.size.core", 4);
+
 
   private static final Logger LOG = LoggerFactory.getLogger(
     Configuration.class);
@@ -6234,4 +6241,8 @@ public class Configuration {
   public String getAutoGroupCreation() {
     return getProperty(AUTO_GROUP_CREATION);
   }
+
+  public int getAlertServiceCorePoolSize() {
+    return Integer.parseInt(getProperty(SERVER_SIDE_ALERTS_CORE_POOL_SIZE));
+  }
 }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/ControllerModule.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/ControllerModule.java
index ed7513f..2938b26 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/ControllerModule.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/ControllerModule.java
@@ -359,6 +359,7 @@ public class ControllerModule extends AbstractModule {
     // So it's an "additional time", given to stage to finish execution before
     // it is considered as timed out
     bindConstant().annotatedWith(Names.named("actionTimeout")).to(600000L);
+    bindConstant().annotatedWith(Names.named("alertServiceCorePoolSize")).to(configuration.getAlertServiceCorePoolSize());
 
     bindConstant().annotatedWith(Names.named("dbInitNeeded")).to(dbInitNeeded);
     bindConstant().annotatedWith(Names.named("statusCheckInterval")).to(5000L);
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/jmx/JMXMetricHolder.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/jmx/JMXMetricHolder.java
index 81d72fb..f6ae54f 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/jmx/JMXMetricHolder.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/jmx/JMXMetricHolder.java
@@ -18,13 +18,18 @@
 
 package org.apache.ambari.server.controller.jmx;
 
+import static java.util.stream.Collectors.toList;
+
 import java.util.List;
 import java.util.Map;
+import java.util.Objects;
+import java.util.Optional;
 
 /**
  *
  */
 public final class JMXMetricHolder {
+  private static final String NAME_KEY = "name";
 
   private List<Map<String, Object>> beans;
 
@@ -47,4 +52,26 @@ public final class JMXMetricHolder {
     }
     return stringBuilder.toString();
   }
+
+  public List<Object> findAll(List<String> properties) {
+    return properties.stream()
+      .map(this::find)
+      .filter(Optional::isPresent)
+      .map(Optional::get)
+      .collect(toList());
+  }
+
+  public Optional<Object> find(String property) {
+    String propertyName = property.split("/")[0];
+    String propertyValue = property.split("/")[1];
+    return beans.stream()
+      .filter(each -> propertyName.equals(name(each)))
+      .map(each -> each.get(propertyValue))
+      .filter(Objects::nonNull)
+      .findFirst();
+  }
+
+  private String name(Map<String, Object> bean) {
+    return bean.containsKey(NAME_KEY) ? (String) bean.get(NAME_KEY) : null;
+  }
 }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/Alert.java b/ambari-server/src/main/java/org/apache/ambari/server/state/Alert.java
index 5d2ecc6..49bd5d2 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/Alert.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/Alert.java
@@ -56,6 +56,7 @@ public class Alert {
     component = componentName;
     this.hostName = hostName;
     state = alertState;
+    timestamp = System.currentTimeMillis();
   }
 
   public Alert() {
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/alert/AlertUri.java b/ambari-server/src/main/java/org/apache/ambari/server/state/alert/AlertUri.java
index 93801d5..b3ef2de 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/alert/AlertUri.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/alert/AlertUri.java
@@ -17,8 +17,13 @@
  */
 package org.apache.ambari.server.state.alert;
 
+import java.net.URI;
+import java.util.Map;
 import java.util.Set;
 
+import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.state.kerberos.VariableReplacementHelper;
+
 import com.google.gson.annotations.SerializedName;
 
 /**
@@ -87,6 +92,13 @@ public class AlertUri {
   @SerializedName("connection_timeout")
   private float m_connectionTimeout = 5.0f;
 
+
+  /**
+   * An optional read timeout value for connections.
+   */
+  @SerializedName("read_timeout")
+  private float readTimeout = 15.0f;
+
   /**
    * If present, then the component supports HA mode and the properties
    * contained within need to be checked to see if an HA URI is required to be
@@ -117,6 +129,18 @@ public class AlertUri {
     m_httpUri = httpUri;
   }
 
+  public void setHttpsUri(String httpsUri) {
+    this.m_httpsUri = httpsUri;
+  }
+
+  public void setHttpsPropertyValue(String m_httpsPropertyValue) {
+    this.m_httpsPropertyValue = m_httpsPropertyValue;
+  }
+
+  public void setHttpsProperty(String m_httpsProperty) {
+    this.m_httpsProperty = m_httpsProperty;
+  }
+
   /**
    * Gets the default port to use on the host running the alert if none of the
    * http properties are available.
@@ -276,6 +300,23 @@ public class AlertUri {
     }
   }
 
+  public URI resolve(Map<String, Map<String, String>> config) throws AmbariException {
+    VariableReplacementHelper variableReplacer = new VariableReplacementHelper();
+    String httpsProperty = variableReplacer.replaceVariables(m_httpsProperty, config);
+    String httpsPropertyValue = variableReplacer.replaceVariables(m_httpsPropertyValue, config);
+    return httpsProperty == null || !httpsProperty.equals(httpsPropertyValue)
+      ? URI.create(String.format("http://%s", variableReplacer.replaceVariables(m_httpUri, config)))
+      : URI.create(String.format("https://%s", variableReplacer.replaceVariables(m_httpsUri, config)));
+  }
+
+  public int getConnectionTimeoutMsec() {
+    return (int) m_connectionTimeout * 1000;
+  }
+
+  public int getReadTimeoutMsec() {
+    return (int) readTimeout * 1000;
+  }
+
   /**
    * {@inheritDoc}
    */
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/alert/MetricSource.java b/ambari-server/src/main/java/org/apache/ambari/server/state/alert/MetricSource.java
index 11eee05..d7283fe 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/alert/MetricSource.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/alert/MetricSource.java
@@ -129,6 +129,9 @@ public class MetricSource extends Source {
 
     private String value;
 
+    @SerializedName("url_suffix")
+    private String urlSuffix = "/jmx";
+
     public List<String> getPropertyList() {
       return propertyList;
     }
@@ -152,5 +155,9 @@ public class MetricSource extends Source {
       return list1.equals(list2);
 
     }
+
+    public String getUrlSuffix() {
+      return urlSuffix;
+    }
   }
 }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/alert/Reporting.java b/ambari-server/src/main/java/org/apache/ambari/server/state/alert/Reporting.java
index 4aeba45..51d074e 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/alert/Reporting.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/alert/Reporting.java
@@ -17,6 +17,15 @@
  */
 package org.apache.ambari.server.state.alert;
 
+import static org.apache.ambari.server.state.alert.Reporting.ReportingType.PERCENT;
+
+import java.text.MessageFormat;
+import java.util.List;
+
+import org.apache.ambari.server.alerts.Threshold;
+import org.apache.ambari.server.state.Alert;
+import org.apache.ambari.server.state.AlertState;
+
 import com.google.gson.annotations.SerializedName;
 
 /**
@@ -200,6 +209,40 @@ public class Reporting {
   }
 
   /**
+   * Map the incoming value to {@link AlertState} and generate an alert with that state.
+   */
+  public Alert alert(double value, List<Object> args, AlertDefinition alertDef) {
+    Alert alert = new Alert(alertDef.getName(), null, alertDef.getServiceName(), alertDef.getComponentName(), null, state(value));
+    alert.setText(MessageFormat.format(message(value), args.toArray()));
+    return alert;
+  }
+
+  private AlertState state(double value) {
+    return getThreshold().state(PERCENT == getType() ? value * 100 : value);
+  }
+
+  private Threshold getThreshold() {
+    return new Threshold(getOk().getValue(), getWarning().getValue(), getCritical().getValue());
+  }
+
+  private String message(double value) {
+    switch (state(value)) {
+      case OK:
+        return getOk().getText();
+      case WARNING:
+        return getWarning().getText();
+      case CRITICAL:
+        return getCritical().getText();
+      case UNKNOWN:
+        return "Unknown";
+      case SKIPPED:
+        return "Skipped";
+      default:
+        throw new IllegalStateException("Invalid alert state: " + state(value));
+    }
+  }
+
+  /**
    * The {@link ReportTemplate} class is used to pair a label and threshhold
    * value.
    * <p/>
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/alert/ServerSource.java b/ambari-server/src/main/java/org/apache/ambari/server/state/alert/ServerSource.java
index c58867a..765bdea 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/alert/ServerSource.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/alert/ServerSource.java
@@ -28,6 +28,13 @@ public class ServerSource extends ParameterizedSource {
   @SerializedName("class")
   private String m_class;
 
+  @SerializedName("uri")
+  private AlertUri uri = null;
+
+  @SerializedName("jmx")
+  private MetricSource.JmxInfo jmxInfo = null;
+
+
   /**
    * Gets the fully qualified classname specified in the source.
    */
@@ -35,6 +42,14 @@ public class ServerSource extends ParameterizedSource {
     return m_class;
   }
 
+  public MetricSource.JmxInfo getJmxInfo() {
+    return jmxInfo;
+  }
+
+  public AlertUri getUri() {
+    return uri;
+  }
+
   /**
    * {@inheritDoc}
    */
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/services/AmbariServerAlertService.java b/ambari-server/src/main/java/org/apache/ambari/server/state/services/AmbariServerAlertService.java
index 305f693..6cce1b2 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/services/AmbariServerAlertService.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/services/AmbariServerAlertService.java
@@ -45,6 +45,7 @@ import com.google.common.util.concurrent.AbstractScheduledService;
 import com.google.inject.Inject;
 import com.google.inject.Injector;
 import com.google.inject.Provider;
+import com.google.inject.name.Named;
 
 /**
  * The {@link AmbariServerAlertService} is used to manage the dynamically loaded
@@ -87,7 +88,7 @@ public class AmbariServerAlertService extends AbstractScheduledService {
   /**
    * The executor to use to run all {@link Runnable} alert classes.
    */
-  private final ScheduledExecutorService m_scheduledExecutorService = Executors.newScheduledThreadPool(3);
+  private ScheduledExecutorService m_scheduledExecutorService;
 
   /**
    * A map of all of the definition names to {@link ScheduledFuture}s.
@@ -101,6 +102,10 @@ public class AmbariServerAlertService extends AbstractScheduledService {
   public AmbariServerAlertService() {
   }
 
+  @Inject
+  public void initExecutor(@Named("alertServiceCorePoolSize") int alertServiceCorePoolSize) {
+    this.m_scheduledExecutorService = Executors.newScheduledThreadPool(alertServiceCorePoolSize);
+  }
   /**
    * {@inheritDoc}
    */
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/alerts/ThresholdTest.java b/ambari-server/src/test/java/org/apache/ambari/server/alerts/ThresholdTest.java
new file mode 100644
index 0000000..07bfa03
--- /dev/null
+++ b/ambari-server/src/test/java/org/apache/ambari/server/alerts/ThresholdTest.java
@@ -0,0 +1,90 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.alerts;
+
+import static org.apache.ambari.server.state.AlertState.CRITICAL;
+import static org.apache.ambari.server.state.AlertState.OK;
+import static org.apache.ambari.server.state.AlertState.UNKNOWN;
+import static org.apache.ambari.server.state.AlertState.WARNING;
+import static org.hamcrest.core.Is.is;
+import static org.junit.Assert.assertThat;
+
+import org.apache.ambari.server.state.AlertState;
+import org.junit.Test;
+
+public class ThresholdTest {
+  @Test
+  public void testBetweenOkAndWarnIsOk_dirUp() throws Exception {
+    Threshold threshold = new Threshold(10.0, 20.0, 30.0);
+    assertState(threshold, OK, 10, 15, 19);
+  }
+
+  @Test
+  public void testBetweenWarnAndCritIsWarn_dirUp() throws Exception {
+    Threshold threshold = new Threshold(10.0, 20.0, 30.0);
+    assertState(threshold, WARNING, 20, 25, 29);
+  }
+
+  @Test
+  public void testAboveCritIsCrit_dirUp() throws Exception {
+    Threshold threshold = new Threshold(10.0, 20.0, 30.0);
+    assertState(threshold, CRITICAL, 30, 40, 99999);
+  }
+
+  @Test
+  public void testBelowOkIsUnknown_dirUp() throws Exception {
+    Threshold threshold = new Threshold(10.0, 20, 30);
+    assertState(threshold, UNKNOWN, 9, 2, -99999);
+  }
+
+  @Test
+  public void testBelowCritIsCrit_dirDown() throws Exception {
+    Threshold threshold = new Threshold(40.0, 30.0, 20.0);
+    assertState(threshold, CRITICAL, 20, 15, 2, -99999);
+  }
+
+  @Test
+  public void testBetweenWarnAndCritIsWarn_dirDown() throws Exception {
+    Threshold threshold = new Threshold(40.0, 30.0, 20.0);
+    assertState(threshold, WARNING, 30, 25, 21);
+  }
+
+  @Test
+  public void testBetweenOkAndWarnIsOk_dirDown() throws Exception {
+    Threshold threshold = new Threshold(40.0, 30.0, 20.0);
+    assertState(threshold, OK, 40, 35, 31);
+  }
+
+  @Test
+  public void testAboveOkIsUnknown_dirDown() throws Exception {
+    Threshold threshold = new Threshold(40.0, 30.0, 20.0);
+    assertState(threshold, UNKNOWN, 41, 50, 9999);
+  }
+
+  @Test
+  public void testOkIsOptional() throws Exception {
+    Threshold threshold = new Threshold(null, 20.0, 30.0);
+    assertState(threshold, OK, 10, 15, 19);
+  }
+
+  private void assertState(Threshold threshold, AlertState expectedState, int... values) {
+    for (int value: values) {
+      assertThat(expectedState, is(threshold.state(value)));
+    }
+  }
+}
\ No newline at end of file
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/jmx/JMXMetricHolderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/jmx/JMXMetricHolderTest.java
new file mode 100644
index 0000000..61d5bae
--- /dev/null
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/jmx/JMXMetricHolderTest.java
@@ -0,0 +1,61 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.controller.jmx;
+
+import static java.util.Arrays.asList;
+import static org.hamcrest.core.Is.is;
+import static org.junit.Assert.assertThat;
+import static org.junit.internal.matchers.IsCollectionContaining.hasItems;
+
+import java.util.HashMap;
+import java.util.List;
+import java.util.Optional;
+
+import org.junit.Before;
+import org.junit.Test;
+
+public class JMXMetricHolderTest {
+  private JMXMetricHolder metrics = new JMXMetricHolder();
+
+  @Before
+  public void setUp() {
+    metrics.setBeans(asList(
+      new HashMap<String, Object>() {{
+        put("name", "bean1");
+        put("value", "val1");
+      }},
+      new HashMap<String, Object>() {{
+        put("name", "bean2");
+        put("value", "val2");
+      }}
+    ));
+  }
+
+  @Test
+  public void testFindSingleBeanByName() throws Exception {
+    assertThat(metrics.find("bean1/value"), is(Optional.of("val1")));
+    assertThat(metrics.find("bean2/value"), is(Optional.of("val2")));
+    assertThat(metrics.find("bean3/notfound"), is(Optional.empty()));
+  }
+
+  @Test
+  public void testFindMultipleBeansByName() throws Exception {
+    List<Object> result = metrics.findAll(asList("bean1/value", "bean2/value", "bean3/notfound"));
+    assertThat(result, hasItems("val1", "val2"));
+  }
+}
\ No newline at end of file
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/alert/AlertUriTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/alert/AlertUriTest.java
new file mode 100644
index 0000000..295e364
--- /dev/null
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/alert/AlertUriTest.java
@@ -0,0 +1,60 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.state.alert;
+
+import static org.hamcrest.core.Is.is;
+import static org.junit.Assert.assertThat;
+
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.ambari.server.AmbariException;
+import org.junit.Test;
+
+public class AlertUriTest {
+  @Test
+  public void testChoosesHttpByDefault() throws Exception {
+    AlertUri uri = new AlertUri();
+    uri.setHttpUri("${config1/http-host}/path");
+    assertThat(resolved(uri), is("http://http-host/path"));
+  }
+
+  @Test
+  public void testChoosesHttpsBasedOnProperties() throws Exception {
+    AlertUri uri = new AlertUri();
+    uri.setHttpUri("${config1/http-host}/path");
+    uri.setHttpsUri("${config1/https-host}/path");
+    uri.setHttpsProperty("${config1/use-http}");
+    uri.setHttpsPropertyValue("YES");
+    assertThat(resolved(uri), is("https://https-host/path"));
+  }
+
+  private Map<String, Map<String, String>> config() {
+    return new HashMap<String, Map<String, String>>() {{
+        put("config1", new HashMap<String, String>() {{
+          put("http-host", "http-host");
+          put("https-host", "https-host");
+          put("use-http", "YES");
+        }});
+      }};
+  }
+
+  private String resolved(AlertUri uri) throws AmbariException {
+    return uri.resolve(config()).toString();
+  }
+}
\ No newline at end of file

-- 
To stop receiving notification emails like this one, please contact
"commits@ambari.apache.org" <co...@ambari.apache.org>.

[ambari] 07/15: AMBARI-22424. Quicklinks for OneFS (amagyar)

Posted by am...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

amagyar pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/ambari.git

commit 3c90642d312f1943870ec517d515b9744ddd69ba
Author: Attila Magyar <am...@hortonworks.com>
AuthorDate: Mon Nov 13 16:00:50 2017 +0100

    AMBARI-22424. Quicklinks for OneFS (amagyar)
---
 .../ambari/server/state/quicklinks/Host.java       | 75 ++++++++++++++++++++++
 .../ambari/server/state/quicklinks/Link.java       | 13 ++++
 .../stack/QuickLinksConfigurationModuleTest.java   |  3 +
 .../resources/child_quicklinks_to_override.json    |  4 ++
 .../app/views/common/quick_view_link_view.js       | 64 ++++++++++++++----
 .../test/views/common/quick_link_view_test.js      | 15 ++++-
 .../addon-services/ONEFS/1.0.0/metainfo.xml        |  7 ++
 .../ONEFS/1.0.0/quicklinks/quicklinks.json         | 31 +++++++++
 8 files changed, 197 insertions(+), 15 deletions(-)

diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/quicklinks/Host.java b/ambari-server/src/main/java/org/apache/ambari/server/state/quicklinks/Host.java
new file mode 100644
index 0000000..074d4e7
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/quicklinks/Host.java
@@ -0,0 +1,75 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.state.quicklinks;
+
+import org.codehaus.jackson.annotate.JsonIgnoreProperties;
+import org.codehaus.jackson.annotate.JsonProperty;
+import org.codehaus.jackson.map.annotate.JsonSerialize;
+
+/**
+ * Quick links may override host names of host components with host names that come from configuration.
+ */
+@JsonSerialize(include= JsonSerialize.Inclusion.NON_NULL)
+@JsonIgnoreProperties(ignoreUnknown = true)
+public class Host {
+  /**
+   * The property name that has the host name if the protocol is http
+   */
+  @JsonProperty("http_property")
+  private String httpProperty;
+
+  /**
+   * The property name that has the host name if the protocol is https
+   */
+  @JsonProperty("https_property")
+  private String httpsProperty;
+
+  /**
+   * The config type where the overridden host name comes from
+   */
+  @JsonProperty("site")
+  private String site;
+
+  public String getHttpProperty() {
+    return httpProperty;
+  }
+
+  public String getHttpsProperty() {
+    return httpsProperty;
+  }
+
+  public String getSite() {
+    return site;
+  }
+
+  public void mergeWithParent(Host parentHost) {
+    if(null == parentHost) {
+      return;
+    }
+    if(null == httpProperty && null != parentHost.getHttpProperty()) {
+      httpProperty = parentHost.getHttpProperty();
+    }
+    if(null == httpsProperty && null != parentHost.getHttpsProperty()) {
+      httpsProperty = parentHost.getHttpsProperty();
+    }
+    if(null == site && null != parentHost.getSite()) {
+      site = parentHost.getSite();
+    }
+  }
+}
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/quicklinks/Link.java b/ambari-server/src/main/java/org/apache/ambari/server/state/quicklinks/Link.java
index 1d2e712..4e8e8a3 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/quicklinks/Link.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/quicklinks/Link.java
@@ -47,6 +47,9 @@ public class Link{
   @JsonProperty("port")
   private Port port;
 
+  @JsonProperty("host")
+  private Host host;
+
   @JsonProperty("protocol")
   private Protocol protocol;
 
@@ -100,6 +103,10 @@ public class Link{
     return port;
   }
 
+  public Host getHost() {
+    return host;
+  }
+
   public void setPort(Port port) {
     this.port = port;
   }
@@ -159,6 +166,12 @@ public class Link{
       port.mergetWithParent(parentLink.getPort());
     }
 
+    if(null == host){
+      host = parentLink.getHost();
+    } else {
+      host.mergeWithParent(parentLink.getHost());
+    }
+
     if (null == attributes && null != parentLink.attributes) {
       attributes = parentLink.attributes;
     }
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/stack/QuickLinksConfigurationModuleTest.java b/ambari-server/src/test/java/org/apache/ambari/server/stack/QuickLinksConfigurationModuleTest.java
index aac278d..040d630 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/stack/QuickLinksConfigurationModuleTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/stack/QuickLinksConfigurationModuleTest.java
@@ -33,6 +33,7 @@ import java.util.Set;
 
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.state.quicklinks.Check;
+import org.apache.ambari.server.state.quicklinks.Host;
 import org.apache.ambari.server.state.quicklinks.Link;
 import org.apache.ambari.server.state.quicklinks.Port;
 import org.apache.ambari.server.state.quicklinks.Protocol;
@@ -127,6 +128,8 @@ public class QuickLinksConfigurationModuleTest {
         hasLink = true;
         Port port = link.getPort();
         assertEquals("mapred-site", port.getSite());
+        Host host = link.getHost();
+        assertEquals("core-site", host.getSite());
       }
     }
     assertTrue(hasLink);
diff --git a/ambari-server/src/test/resources/child_quicklinks_to_override.json b/ambari-server/src/test/resources/child_quicklinks_to_override.json
index 4309532..0aaec1f 100644
--- a/ambari-server/src/test/resources/child_quicklinks_to_override.json
+++ b/ambari-server/src/test/resources/child_quicklinks_to_override.json
@@ -79,6 +79,10 @@
           "https_default_port": "8090",
           "regex": "\\w*:(\\d+)",
           "site": "mapred-site"
+        },
+        "host":{
+          "http_property": "fs.defaultFS",
+          "site": "core-site"
         }
       }
     ]
diff --git a/ambari-web/app/views/common/quick_view_link_view.js b/ambari-web/app/views/common/quick_view_link_view.js
index 5888acb..3ccc973 100644
--- a/ambari-web/app/views/common/quick_view_link_view.js
+++ b/ambari-web/app/views/common/quick_view_link_view.js
@@ -206,11 +206,8 @@ App.QuickLinksView = Em.View.extend({
       if (!Em.isEmpty(links)) {
         links.forEach(function (link) {
           if (!link.remove) {
-            var portConfig = Em.get(link, 'port');
-            var portConfigSiteProp = Em.get(portConfig, 'site');
-            if (!sites.contains(portConfigSiteProp)) {
-              sites.push(portConfigSiteProp);
-            }
+            this.addSite(link, 'host', sites);
+            this.addSite(link, 'port', sites);
           }
         }, this);
         this.set('requiredSiteNames', this.get('requiredSiteNames').pushObjects(sites).uniq());
@@ -222,6 +219,16 @@ App.QuickLinksView = Em.View.extend({
     }
   },
 
+  addSite: function(link, linkPropertyName, sites) {
+      var config = Em.get(link, linkPropertyName);
+      if (config) {
+        var siteName = Em.get(config, 'site');
+        if (!sites.contains(siteName)) {
+          sites.push(siteName);
+        }
+      }
+  },
+
   /**
    * call for public host names
    *
@@ -267,8 +274,8 @@ App.QuickLinksView = Em.View.extend({
     });
     // no need to set quicklinks if
     // 1)current service does not have quick links configured
-    // 2)No host component present for the configured quicklinks
-    if(hasQuickLinks && hasHosts) {
+    // 2)No host component present for the configured quicklinks and has no overridden hosts
+    if(hasQuickLinks && (hasHosts || this.hasOverriddenHost())) {
       this.set('showQuickLinks', true);
     } else {
       this.set('showNoLinks', true);
@@ -276,15 +283,20 @@ App.QuickLinksView = Em.View.extend({
 
     var isMultipleComponentsInLinks = componentNames.uniq().length > 1;
 
-    if (hosts.length === 0) {
+    if (hosts.length === 0 && !this.hasOverriddenHost()) {
       this.setEmptyLinks();
-    } else if (hosts.length === 1 || isMultipleComponentsInLinks) {
+    } else if (hosts.length === 1 || isMultipleComponentsInLinks || this.hasOverriddenHost()) {
       this.setSingleHostLinks(hosts, response);
     } else {
       this.setMultipleHostLinks(hosts);
     }
   },
 
+  hasOverriddenHost: function() {
+    var links = Em.get(this.getQuickLinksConfiguration(), 'links');
+    return links && links.some(function (each) { return each.host; });
+  },
+
   /**
    * Get public host name by its host name.
    *
@@ -424,10 +436,8 @@ App.QuickLinksView = Em.View.extend({
 
       var links = Em.get(quickLinksConfig, 'links');
       links.forEach(function (link) {
-        var componentName = link.component_name;
-        var hostNameForComponent = hosts.findProperty('componentName',componentName);
-        if (hostNameForComponent) {
-          var publicHostName = hostNameForComponent.publicHostName;
+        var publicHostName = this.publicHostName(link, hosts, protocol);
+        if (publicHostName) {
           if (link.protocol) {
             protocol = this.setProtocol(configProperties, link.protocol);
           }
@@ -446,6 +456,30 @@ App.QuickLinksView = Em.View.extend({
     }
   },
 
+  publicHostName: function(link, hosts, protocol) {
+    if (link.host) { // if quicklink overrides hostcomponent host name, get host from config
+      var configProperties = this.get('configProperties');
+      var hostProperty = Em.get(link.host, protocol + '_property');
+      var site = configProperties.findProperty('type', Em.get(link.host, 'site'));
+      return site && site.properties ? this.parseHostFromUri(site.properties[hostProperty]) : null;
+    } else {
+      var hostNameForComponent = hosts.findProperty('componentName', link.component_name);
+      return hostNameForComponent ? hostNameForComponent.publicHostName : null;
+    }
+  },
+
+  /**
+   * @param {string} uri
+   */
+  parseHostFromUri: function(uri) {
+    if (uri) {
+      var match = uri.match(/:\/\/([^/:]+)/i);
+      return match != null && match.length == 2 ? match[1] : uri;
+    } else {
+      return null;
+    }
+  },
+
   /**
    * set links that contain multiple hosts
    *
@@ -786,7 +820,9 @@ App.QuickLinksView = Em.View.extend({
    * @method setPort
    */
   setPort: function (portConfigs, protocol, configProperties) {
-
+    if (!portConfigs) {
+      return '';
+    }
     var defaultPort = Em.get(portConfigs, protocol + '_default_port');
     var portProperty = Em.get(portConfigs, protocol + '_property');
     var site = configProperties.findProperty('type', Em.get(portConfigs, 'site'));
diff --git a/ambari-web/test/views/common/quick_link_view_test.js b/ambari-web/test/views/common/quick_link_view_test.js
index 1998a9c..9adbbc8 100644
--- a/ambari-web/test/views/common/quick_link_view_test.js
+++ b/ambari-web/test/views/common/quick_link_view_test.js
@@ -195,6 +195,9 @@ describe('App.QuickViewLinks', function () {
           {
             port: {
               site: "yarn-site"
+            },
+            host: {
+              site: "yarn-env"
             }
           }
         ]
@@ -205,7 +208,7 @@ describe('App.QuickViewLinks', function () {
       quickViewLinks.set('content.serviceName', 'YARN');
       mock.returns(quickLinksConfigYARN);
       quickViewLinks.loadQuickLinksConfigSuccessCallback({items: []});
-      expect(quickViewLinks.get('requiredSiteNames')).to.be.eql(["core-site", "hdfs-site", "admin-properties", "hbase-site", "yarn-site"]);
+      expect(quickViewLinks.get('requiredSiteNames')).to.be.eql(["core-site", "hdfs-site", "admin-properties", "hbase-site", "yarn-site", "yarn-env"]);
     });
   });
 
@@ -247,8 +250,11 @@ describe('App.QuickViewLinks', function () {
   });
 
   describe("#setQuickLinksSuccessCallback()", function () {
+    var getQuickLinks;
     beforeEach(function () {
       this.mock = sinon.stub(quickViewLinks, 'getHosts');
+      getQuickLinks = sinon.stub(quickViewLinks, 'getQuickLinksConfiguration');
+      getQuickLinks.returns({});
       sinon.stub(quickViewLinks, 'setEmptyLinks');
       sinon.stub(quickViewLinks, 'setSingleHostLinks');
       sinon.stub(quickViewLinks, 'setMultipleHostLinks');
@@ -256,6 +262,7 @@ describe('App.QuickViewLinks', function () {
     });
     afterEach(function () {
       this.mock.restore();
+      getQuickLinks.restore();
       quickViewLinks.setEmptyLinks.restore();
       quickViewLinks.setSingleHostLinks.restore();
       quickViewLinks.setMultipleHostLinks.restore();
@@ -265,6 +272,12 @@ describe('App.QuickViewLinks', function () {
       quickViewLinks.setQuickLinksSuccessCallback();
       expect(quickViewLinks.setEmptyLinks.calledOnce).to.be.true;
     });
+    it("has overridden hosts", function () {
+      this.mock.returns([]);
+      getQuickLinks.returns({ links: [{ host: {site: "yarn-env"} }] });
+      quickViewLinks.setQuickLinksSuccessCallback();
+      expect(quickViewLinks.setEmptyLinks.calledOnce).to.be.false;
+    });
     it("quickLinks is not configured", function () {
       this.mock.returns([{}]);
       quickViewLinks.setQuickLinksSuccessCallback();
diff --git a/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/metainfo.xml b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/metainfo.xml
index 82e2022..5ba562e 100644
--- a/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/metainfo.xml
+++ b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/metainfo.xml
@@ -86,6 +86,13 @@
         <config-type>hadoop-env</config-type>
       </configuration-dependencies>
       <restartRequiredAfterRackChange>true</restartRequiredAfterRackChange>
+
+      <quickLinksConfigurations>
+        <quickLinksConfiguration>
+          <fileName>quicklinks.json</fileName>
+          <default>true</default>
+        </quickLinksConfiguration>
+      </quickLinksConfigurations>
     </service>
   </services>
 </metainfo>
\ No newline at end of file
diff --git a/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/quicklinks/quicklinks.json b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/quicklinks/quicklinks.json
new file mode 100644
index 0000000..0b9656f
--- /dev/null
+++ b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/quicklinks/quicklinks.json
@@ -0,0 +1,31 @@
+{
+  "name": "default",
+  "description": "default quick links configuration",
+  "configuration": {
+    "protocol": {
+      "type": "HTTP_ONLY"
+    },
+    "links": [
+      {
+        "component_name" : "ONEFS_CLIENT",
+        "name": "onefs_web_ui",
+        "label": "OneFS Web UI",
+        "url": "%@://%@",
+        "host": {
+          "http_property": "fs.defaultFS",
+          "site": "core-site"
+        }
+      },
+      {
+        "component_name" : "ONEFS_CLIENT",
+        "name": "onefs_hdfs_web_ui",
+        "label": "OneFS HDFS Settings",
+        "url": "%@://%@/OneFS#HDFS/Settings",
+        "host": {
+          "http_property": "fs.defaultFS",
+          "site": "core-site"
+        }
+      }
+    ]
+  }
+}
\ No newline at end of file

-- 
To stop receiving notification emails like this one, please contact
"commits@ambari.apache.org" <co...@ambari.apache.org>.

[ambari] 13/15: AMBARI-22449. Improved service/component dependency support (amagyar)

Posted by am...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

amagyar pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/ambari.git

commit ebf3630e5759d39bdc9e4ea0dd3b97217b5248b2
Author: Attila Magyar <am...@hortonworks.com>
AuthorDate: Fri Jan 5 08:53:59 2018 +0100

    AMBARI-22449. Improved service/component dependency support (amagyar)
---
 .../controller/StackServiceComponentResponse.java  | 10 ++++++
 .../StackServiceComponentResourceProvider.java     |  9 ++++-
 .../apache/ambari/server/state/ComponentInfo.java  | 15 ++++++++
 .../common-services/HDFS/2.1.0.2.0/metainfo.xml    |  1 +
 .../common-services/HDFS/3.0.0.3.0/metainfo.xml    |  1 +
 ambari-web/app/controllers/main/host/details.js    |  8 ++---
 .../app/controllers/wizard/step8_controller.js     |  8 +++--
 ambari-web/app/mappers/stack_service_mapper.js     |  1 +
 ambari-web/app/models/stack_service_component.js   | 33 +++++++++++++++++
 .../test/controllers/main/host/details_test.js     | 23 +++++++++---
 ambari-web/test/controllers/wizard/step8_test.js   | 41 ++++++++++++----------
 11 files changed, 117 insertions(+), 33 deletions(-)

diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/StackServiceComponentResponse.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/StackServiceComponentResponse.java
index 75fac6e..8797f03 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/StackServiceComponentResponse.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/StackServiceComponentResponse.java
@@ -118,6 +118,11 @@ public class StackServiceComponentResponse {
   private String reassignAllowed;
 
   /**
+   * @see ComponentInfo#componentType
+   */
+  private String componentType;
+
+  /**
    * Constructor.
    *
    * @param component
@@ -139,6 +144,7 @@ public class StackServiceComponentResponse {
     bulkCommandMasterComponentName = getBulkCommandsMasterComponentName(component);
     reassignAllowed = component.getReassignAllowed();
     rollingRestartSupported = component.getRollingRestartSupported();
+    componentType = component.getComponentType();
 
     // the custom command names defined for this component
     List<CustomCommandDefinition> definitions = component.getCustomCommands();
@@ -511,6 +517,10 @@ public class StackServiceComponentResponse {
     return bulkCommandMasterComponentName == null ? "":bulkCommandMasterComponentName;
   }
 
+  public String getComponentType() {
+    return componentType;
+  }
+
   /**
    * Interface to help correct Swagger documentation generation
    */
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/StackServiceComponentResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/StackServiceComponentResourceProvider.java
index a221248..933dcb7 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/StackServiceComponentResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/StackServiceComponentResourceProvider.java
@@ -104,6 +104,10 @@ public class StackServiceComponentResourceProvider extends
   private static final String AUTO_DEPLOY_LOCATION_ID = PropertyHelper.getPropertyId(
       "auto_deploy", "location");
 
+  private static final String COMPONENT_TYPE = PropertyHelper.getPropertyId(
+    "StackServiceComponents", "component_type");
+
+
   /**
    * The key property ids for a StackServiceComponent resource.
    */
@@ -137,7 +141,8 @@ public class StackServiceComponentResourceProvider extends
       RECOVERY_ENABLED,
       ROLLING_RESTART_SUPPORTED,
       AUTO_DEPLOY_ENABLED_ID,
-      AUTO_DEPLOY_LOCATION_ID);
+      AUTO_DEPLOY_LOCATION_ID,
+      COMPONENT_TYPE);
 
   protected StackServiceComponentResourceProvider(AmbariManagementController managementController) {
     super(Type.StackServiceComponent, propertyIds, keyPropertyIds, managementController);
@@ -225,6 +230,8 @@ public class StackServiceComponentResourceProvider extends
 
       setResourceProperty(resource, ROLLING_RESTART_SUPPORTED, response.isRollingRestartSupported(),  requestedIds);
 
+      setResourceProperty(resource, COMPONENT_TYPE, response.getComponentType(),  requestedIds);
+
       AutoDeployInfo autoDeployInfo = response.getAutoDeploy();
       if (autoDeployInfo != null) {
         setResourceProperty(resource, AUTO_DEPLOY_ENABLED_ID,
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/ComponentInfo.java b/ambari-server/src/main/java/org/apache/ambari/server/state/ComponentInfo.java
index d361a29..a4bac56 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/ComponentInfo.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/ComponentInfo.java
@@ -145,6 +145,12 @@ public class ComponentInfo {
   @XmlElement(name="customFolder")
   private String customFolder;
 
+  /**
+   * Optional component type like HCFS_CLIENT.
+   * HCFS_CLIENT indicates compatibility with HDFS_CLIENT
+   */
+  private String componentType;
+
   public ComponentInfo() {
   }
 
@@ -173,6 +179,7 @@ public class ComponentInfo {
     reassignAllowed = prototype.reassignAllowed;
     customFolder = prototype.customFolder;
     rollingRestartSupported = prototype.rollingRestartSupported;
+    componentType = prototype.componentType;
   }
 
   public String getName() {
@@ -435,6 +442,14 @@ public class ComponentInfo {
     this.customFolder = customFolder;
   }
 
+  public String getComponentType() {
+    return componentType;
+  }
+
+  public void setComponentType(String componentType) {
+    this.componentType = componentType;
+  }
+
   @Override
   public boolean equals(Object o) {
     if (this == o) return true;
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/metainfo.xml b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/metainfo.xml
index 6bbb583..71e487c 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/metainfo.xml
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/metainfo.xml
@@ -157,6 +157,7 @@
           <name>HDFS_CLIENT</name>
           <displayName>HDFS Client</displayName>
           <category>CLIENT</category>
+          <componentType>HCFS_CLIENT</componentType>
           <cardinality>1+</cardinality>
           <versionAdvertised>true</versionAdvertised>
           <commandScript>
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/metainfo.xml b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/metainfo.xml
index 0c629f3..c1dd4af 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/metainfo.xml
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/metainfo.xml
@@ -157,6 +157,7 @@
           <name>HDFS_CLIENT</name>
           <displayName>HDFS Client</displayName>
           <category>CLIENT</category>
+          <componentType>HCFS_CLIENT</componentType>
           <cardinality>1+</cardinality>
           <versionAdvertised>true</versionAdvertised>
           <commandScript>
diff --git a/ambari-web/app/controllers/main/host/details.js b/ambari-web/app/controllers/main/host/details.js
index e52eec0..9e05f2a 100644
--- a/ambari-web/app/controllers/main/host/details.js
+++ b/ambari-web/app/controllers/main/host/details.js
@@ -3005,9 +3005,6 @@ App.MainHostDetailsController = Em.Controller.extend(App.SupportClientConfigsDow
     opt = opt || {};
     opt.scope = opt.scope || '*';
     var installedComponents;
-    var dependencies = App.StackServiceComponent.find(componentName).get('dependencies');
-    dependencies = opt.scope === '*' ? dependencies : dependencies.filterProperty('scope', opt.scope);
-    if (dependencies.length == 0) return [];
     switch (opt.scope) {
       case 'host':
         Em.assert("You should pass at least `hostName` or `installedComponents` to options.", opt.hostName || opt.installedComponents);
@@ -3018,9 +3015,8 @@ App.MainHostDetailsController = Em.Controller.extend(App.SupportClientConfigsDow
         installedComponents = opt.installedComponents || App.HostComponent.find().mapProperty('componentName').uniq();
         break;
     }
-    return dependencies.filter(function (dependency) {
-      return !installedComponents.contains(dependency.componentName);
-    }).mapProperty('componentName');
+    var component = App.StackServiceComponent.find(componentName);
+    return component.missingDependencies(installedComponents, opt);
   },
 
   /**
diff --git a/ambari-web/app/controllers/wizard/step8_controller.js b/ambari-web/app/controllers/wizard/step8_controller.js
index bd3c36f..2d9f4d2 100644
--- a/ambari-web/app/controllers/wizard/step8_controller.js
+++ b/ambari-web/app/controllers/wizard/step8_controller.js
@@ -1181,13 +1181,15 @@ App.WizardStep8Controller = Em.Controller.extend(App.AddSecurityConfigs, App.wiz
   },
 
   getClientsMap: function (flag) {
-    var clientNames = App.StackServiceComponent.find().filterProperty('isClient').mapProperty('componentName'),
+    var clients = App.StackServiceComponent.find().filterProperty('isClient'),
       clientsMap = {},
       dependedComponents = flag ? App.StackServiceComponent.find().filterProperty(flag) : App.StackServiceComponent.find();
-    clientNames.forEach(function (clientName) {
+    clients.forEach(function (client) {
+      var clientName = client.get('componentName');
       clientsMap[clientName] = Em.A([]);
       dependedComponents.forEach(function (component) {
-        if (component.get('dependencies').mapProperty('componentName').contains(clientName)) clientsMap[clientName].push(component.get('componentName'));
+        if (component.dependsOn(client))
+          clientsMap[clientName].push(component.get('componentName'));
       });
       if (!clientsMap[clientName].length) delete clientsMap[clientName];
     });
diff --git a/ambari-web/app/mappers/stack_service_mapper.js b/ambari-web/app/mappers/stack_service_mapper.js
index 368a182..9b55a05 100644
--- a/ambari-web/app/mappers/stack_service_mapper.js
+++ b/ambari-web/app/mappers/stack_service_mapper.js
@@ -64,6 +64,7 @@ App.stackServiceMapper = App.QuickDataMapper.create({
     rolling_restart_supported: 'rolling_restart_supported',
     is_master: 'is_master',
     is_client: 'is_client',
+    component_type: 'component_type',
     stack_name: 'stack_name',
     stack_version: 'stack_version',
     stack_service_id: 'service_name',
diff --git a/ambari-web/app/models/stack_service_component.js b/ambari-web/app/models/stack_service_component.js
index 27aa8aa..d4e3446 100644
--- a/ambari-web/app/models/stack_service_component.js
+++ b/ambari-web/app/models/stack_service_component.js
@@ -38,6 +38,7 @@ App.StackServiceComponent = DS.Model.extend({
   rollingRestartSupported: DS.attr('boolean'),
   isMaster: DS.attr('boolean'),
   isClient: DS.attr('boolean'),
+  componentType: DS.attr('string'),
   stackName: DS.attr('string'),
   stackVersion: DS.attr('string'),
   stackService: DS.belongsTo('App.StackService'),
@@ -61,6 +62,38 @@ App.StackServiceComponent = DS.Model.extend({
     return numberUtils.getCardinalityValue(this.get('cardinality'), true);
   }.property('cardinality'),
 
+  /**
+   * Check if the given component is compatible with this one. Having the same name or componentType indicates compatibility.
+   **/
+  dependsOn: function(aStackServiceComponent, opt) {
+    return this.get('dependencies').some(function(each) {
+      return aStackServiceComponent.compatibleWith(App.StackServiceComponent.find(each.componentName));
+    });
+  },
+
+  compatibleWith: function(aStackServiceComponent) {
+    return this.get('componentName') === aStackServiceComponent.get('componentName')
+      || (this.get('componentType') && this.get('componentType') === aStackServiceComponent.get('componentType'));
+  },
+
+  /**
+   * Collect dependencies which are required by this component but not installed.
+   * A compatible installed component (e.g.: componentType=HCFS_CLIENT) is not considered as a missing dependency.
+   **/
+  missingDependencies: function(installedComponents, opt) {
+    opt = opt || {};
+    opt.scope = opt.scope || '*';
+    var dependencies = this.get('dependencies');
+    dependencies = opt.scope === '*' ? dependencies : dependencies.filterProperty('scope', opt.scope);
+    if (dependencies.length == 0) return [];
+    installedComponents = installedComponents.map(function(each) { return App.StackServiceComponent.find(each); });
+    return dependencies.filter(function (dependency) {
+      return !installedComponents.some(function(each) {
+        return each.compatibleWith(App.StackServiceComponent.find(dependency.componentName));
+      });
+    }).mapProperty('componentName');
+  },
+
   /** @property {Boolean} isRequired - component required to install **/
   isRequired: Em.computed.gt('minToInstall', 0),
 
diff --git a/ambari-web/test/controllers/main/host/details_test.js b/ambari-web/test/controllers/main/host/details_test.js
index f9df36d..e9490fb 100644
--- a/ambari-web/test/controllers/main/host/details_test.js
+++ b/ambari-web/test/controllers/main/host/details_test.js
@@ -3566,30 +3566,43 @@ describe('App.MainHostDetailsController', function () {
 
     it("no dependencies", function () {
       var opt = {scope: '*'};
-      this.mock.returns(Em.Object.create({
-        dependencies: []
+      this.mock.withArgs('C1').returns(App.StackServiceComponent.createRecord({
+          'dependencies': []
       }));
       expect(controller.checkComponentDependencies('C1', opt)).to.be.empty;
     });
     it("dependecies already installed", function () {
       var opt = {scope: '*', installedComponents: ['C2']};
-      this.mock.returns(Em.Object.create({
+      this.mock.withArgs('C1').returns(App.StackServiceComponent.createRecord({
         dependencies: [{componentName: 'C2'}]
       }));
+      this.mock.withArgs('C2').returns(App.StackServiceComponent.createRecord({ componentName: 'C2' }));
       expect(controller.checkComponentDependencies('C1', opt)).to.be.empty;
     });
     it("dependecies should be added", function () {
       var opt = {scope: '*', installedComponents: ['C2']};
-      this.mock.returns(Em.Object.create({
+      this.mock.withArgs('C1').returns(App.StackServiceComponent.createRecord({
         dependencies: [{componentName: 'C3'}]
       }));
+      this.mock.withArgs('C2').returns(App.StackServiceComponent.createRecord({ componentName: 'C2' }));
+      this.mock.withArgs('C3').returns(App.StackServiceComponent.createRecord({ componentName: 'C3' }));
       expect(controller.checkComponentDependencies('C1', opt)).to.eql(['C3']);
     });
+    it("dependecies already installed by component type", function () {
+      var opt = {scope: '*', installedComponents: ['C3']};
+      this.mock.withArgs('C1').returns(App.StackServiceComponent.createRecord({
+        dependencies: [{componentName: 'C2'}]
+      }));
+      this.mock.withArgs('C2').returns(App.StackServiceComponent.createRecord({ componentName: 'C2', componentType: 'HCFS_CLIENT' }));
+      this.mock.withArgs('C3').returns(App.StackServiceComponent.createRecord({ componentName: 'C3', componentType: 'HCFS_CLIENT' }));
+      expect(controller.checkComponentDependencies('C1', opt)).to.be.empty;
+    });
     it("scope is host", function () {
       var opt = {scope: 'host', hostName: 'host1'};
-      this.mock.returns(Em.Object.create({
+      this.mock.withArgs('C1').returns(App.StackServiceComponent.createRecord({
         dependencies: [{componentName: 'C3', scope: 'host'}]
       }));
+      this.mock.withArgs('C3').returns(App.StackServiceComponent.createRecord({ componentName: 'C3' }));
       expect(controller.checkComponentDependencies('C1', opt)).to.eql(['C3']);
     });
   });
diff --git a/ambari-web/test/controllers/wizard/step8_test.js b/ambari-web/test/controllers/wizard/step8_test.js
index 2a52598..22bcf96 100644
--- a/ambari-web/test/controllers/wizard/step8_test.js
+++ b/ambari-web/test/controllers/wizard/step8_test.js
@@ -2086,8 +2086,9 @@ describe('App.WizardStep8Controller', function () {
     ];
 
     before(function () {
-      sinon.stub(App.StackServiceComponent, 'find').returns([
-        Em.Object.create({
+      var mock = sinon.stub(App.StackServiceComponent, 'find');
+      var components = [
+        App.StackServiceComponent.createRecord({
           componentName: 'c0',
           isMaster: true,
           dependencies: [
@@ -2105,7 +2106,7 @@ describe('App.WizardStep8Controller', function () {
             }
           ]
         }),
-        Em.Object.create({
+        App.StackServiceComponent.createRecord({
           componentName: 'c1',
           isMaster: true,
           dependencies: [
@@ -2123,7 +2124,7 @@ describe('App.WizardStep8Controller', function () {
             }
           ]
         }),
-        Em.Object.create({
+        App.StackServiceComponent.createRecord({
           componentName: 'c2',
           isMaster: true,
           dependencies: [
@@ -2141,14 +2142,14 @@ describe('App.WizardStep8Controller', function () {
             }
           ]
         }),
-        Em.Object.create({
+        App.StackServiceComponent.createRecord({
           componentName: 'c3',
           isMaster: true,
           dependencies: []
         }),
-        Em.Object.create({
+        App.StackServiceComponent.createRecord({
           componentName: 'c4',
-          isSlave: true,
+          componentCategory: 'SLAVE',
           dependencies: [
             {
               componentName: 'c1'
@@ -2164,9 +2165,9 @@ describe('App.WizardStep8Controller', function () {
             }
           ]
         }),
-        Em.Object.create({
+        App.StackServiceComponent.createRecord({
           componentName: 'c5',
-          isSlave: true,
+          componentCategory: 'SLAVE',
           dependencies: [
             {
               componentName: 'c4'
@@ -2182,9 +2183,9 @@ describe('App.WizardStep8Controller', function () {
             }
           ]
         }),
-        Em.Object.create({
+        App.StackServiceComponent.createRecord({
           componentName: 'c6',
-          isSlave: true,
+          componentCategory: 'SLAVE',
           dependencies: [
             {
               componentName: 'c1'
@@ -2200,12 +2201,12 @@ describe('App.WizardStep8Controller', function () {
             }
           ]
         }),
-        Em.Object.create({
+        App.StackServiceComponent.createRecord({
           componentName: 'c7',
-          isSlave: true,
+          componentCategory: 'SLAVE',
           dependencies: []
         }),
-        Em.Object.create({
+        App.StackServiceComponent.createRecord({
           componentName: 'c8',
           isClient: true,
           dependencies: [
@@ -2223,7 +2224,7 @@ describe('App.WizardStep8Controller', function () {
             }
           ]
         }),
-        Em.Object.create({
+        App.StackServiceComponent.createRecord({
           componentName: 'c9',
           isClient: true,
           dependencies: [
@@ -2241,7 +2242,7 @@ describe('App.WizardStep8Controller', function () {
             }
           ]
         }),
-        Em.Object.create({
+        App.StackServiceComponent.createRecord({
           componentName: 'c10',
           isClient: true,
           dependencies: [
@@ -2259,12 +2260,16 @@ describe('App.WizardStep8Controller', function () {
             }
           ]
         }),
-        Em.Object.create({
+        App.StackServiceComponent.createRecord({
           componentName: 'c11',
           isClient: true,
           dependencies: []
         })
-      ]);
+      ];
+      components.forEach(function(component) {
+        mock.withArgs(component.get('componentName')).returns(component);
+      });
+      mock.returns(components);
     });
 
     after(function () {

-- 
To stop receiving notification emails like this one, please contact
"commits@ambari.apache.org" <co...@ambari.apache.org>.

[ambari] 06/15: AMBARI-22339. Metrics for OneFS mpack (amagyar)

Posted by am...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

amagyar pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/ambari.git

commit 0b69fefd9a5fda0f91b916a45e140820e50d1345
Author: Attila Magyar <am...@hortonworks.com>
AuthorDate: Mon Nov 6 10:48:44 2017 +0100

    AMBARI-22339. Metrics for OneFS mpack (amagyar)
---
 .../internal/OverriddenMetricsHostProvider.java    | 101 +++++++++++++++++++++
 .../internal/StackDefinedPropertyProvider.java     |   9 +-
 .../controller/metrics/MetricHostProvider.java     |   9 ++
 .../timeline/AMSComponentPropertyProvider.java     |   4 +-
 .../apache/ambari/server/state/ConfigHelper.java   |   5 +
 .../server/state/stack/MetricDefinition.java       |   9 +-
 .../OverriddenMetricsHostProviderTest.java         |  95 +++++++++++++++++++
 .../addon-services/ONEFS/1.0.0/metrics.json        |  13 ++-
 .../addon-services/ONEFS/1.0.0/widgets.json        |  65 +++----------
 9 files changed, 254 insertions(+), 56 deletions(-)

diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/OverriddenMetricsHostProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/OverriddenMetricsHostProvider.java
new file mode 100644
index 0000000..cb59b2f
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/OverriddenMetricsHostProvider.java
@@ -0,0 +1,101 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.controller.internal;
+
+import java.net.URI;
+import java.util.Map;
+import java.util.Optional;
+
+import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.controller.metrics.MetricHostProvider;
+import org.apache.ambari.server.controller.metrics.MetricsServiceProvider;
+import org.apache.ambari.server.controller.spi.SystemException;
+import org.apache.ambari.server.state.ConfigHelper;
+import org.apache.ambari.server.state.kerberos.VariableReplacementHelper;
+
+/**
+ * I'm a special {@link MetricHostProvider} that can override default component host names.
+ */
+public class OverriddenMetricsHostProvider implements MetricHostProvider {
+  private final Map<String, String> overriddenHosts;
+  private final MetricHostProvider metricHostProvider;
+  private final ConfigHelper configHelper;
+  private final VariableReplacementHelper variableReplacer = new VariableReplacementHelper();
+
+  public OverriddenMetricsHostProvider(Map<String, String> overriddenHosts, MetricHostProvider metricHostProvider, ConfigHelper configHelper) {
+    this.overriddenHosts = overriddenHosts;
+    this.metricHostProvider = metricHostProvider;
+    this.configHelper = configHelper;
+  }
+
+  @Override
+  public Optional<String> getExternalHostName(String clusterName, String componentName) {
+    return getOverriddenHost(componentName).map(host -> replaceVariables(clusterName, host));
+  }
+
+  private Optional<String> getOverriddenHost(String componentName) {
+    return Optional.ofNullable(overriddenHosts.get(componentName));
+  }
+
+  private String replaceVariables(String clusterName, String hostName) {
+    try {
+      return hostName(variableReplacer.replaceVariables(hostName, config(clusterName)));
+    } catch (AmbariException e) {
+      throw new RuntimeException(e);
+    }
+  }
+
+  private Map<String, Map<String, String>> config(String clusterName) throws AmbariException {
+    return configHelper.getEffectiveConfigProperties(clusterName, null);
+  }
+
+  private String hostName(String resolvedHost) throws AmbariException {
+    return hasScheme(resolvedHost)
+      ? URI.create(resolvedHost).getHost()
+      : URI.create("any://" + resolvedHost).getHost();
+  }
+
+  private boolean hasScheme(String host) {
+    return host.contains("://");
+  }
+
+  @Override
+  public String getHostName(String clusterName, String componentName) throws SystemException {
+    return metricHostProvider.getHostName(clusterName, componentName);
+  }
+
+  @Override
+  public String getCollectorHostName(String clusterName, MetricsServiceProvider.MetricsService service) throws SystemException {
+    return metricHostProvider.getCollectorHostName(clusterName, service);
+  }
+
+  @Override
+  public String getCollectorPort(String clusterName, MetricsServiceProvider.MetricsService service) throws SystemException {
+    return metricHostProvider.getCollectorPort(clusterName, service);
+  }
+
+  @Override
+  public boolean isCollectorHostLive(String clusterName, MetricsServiceProvider.MetricsService service) throws SystemException {
+    return metricHostProvider.isCollectorHostLive(clusterName, service);
+  }
+
+  @Override
+  public boolean isCollectorComponentLive(String clusterName, MetricsServiceProvider.MetricsService service) throws SystemException {
+    return metricHostProvider.isCollectorComponentLive(clusterName, service);
+  }
+}
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/StackDefinedPropertyProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/StackDefinedPropertyProvider.java
index 8c2198b..861007b 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/StackDefinedPropertyProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/StackDefinedPropertyProvider.java
@@ -47,6 +47,7 @@ import org.apache.ambari.server.controller.utilities.StreamProvider;
 import org.apache.ambari.server.security.authorization.AuthorizationException;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
+import org.apache.ambari.server.state.ConfigHelper;
 import org.apache.ambari.server.state.Service;
 import org.apache.ambari.server.state.StackId;
 import org.apache.ambari.server.state.stack.Metric;
@@ -155,6 +156,7 @@ public class StackDefinedPropertyProvider implements PropertyProvider {
     Map<String, Map<String, PropertyInfo>> jmxMap = new HashMap<>();
 
     List<PropertyProvider> additional = new ArrayList<>();
+    Map<String, String> overriddenHosts = new HashMap<>();
 
     try {
       for (Resource r : resources) {
@@ -183,6 +185,7 @@ public class StackDefinedPropertyProvider implements PropertyProvider {
         for (MetricDefinition m : defs) {
           if (m.getType().equals("ganglia")) {
             gangliaMap.put(componentName, getPropertyInfo(m));
+            m.getOverriddenHosts().ifPresent(host -> overriddenHosts.put(componentName, host));
           } else if (m.getType().equals("jmx")) {
             jmxMap.put(componentName, getPropertyInfo(m));
           } else {
@@ -207,7 +210,7 @@ public class StackDefinedPropertyProvider implements PropertyProvider {
           MetricsPropertyProvider.createInstance(type, gangliaMap,
             streamProvider, sslConfig,
             cacheProvider,
-            metricHostProvider,
+            metricHostProvider(overriddenHosts),
             metricsServiceProvider, clusterNamePropertyId,
             hostNamePropertyId, componentNamePropertyId);
 
@@ -244,6 +247,10 @@ public class StackDefinedPropertyProvider implements PropertyProvider {
     return resources;
   }
 
+  private MetricHostProvider metricHostProvider(Map<String, String> overriddenHosts) {
+    return new OverriddenMetricsHostProvider(overriddenHosts, metricHostProvider, injector.getInstance(ConfigHelper.class));
+  }
+
   @Override
   public Set<String> checkPropertyIds(Set<String> propertyIds) {
     return Collections.emptySet();
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/metrics/MetricHostProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/metrics/MetricHostProvider.java
index 28e5381..86d7114 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/metrics/MetricHostProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/metrics/MetricHostProvider.java
@@ -19,6 +19,8 @@ package org.apache.ambari.server.controller.metrics;
 
 import static org.apache.ambari.server.controller.metrics.MetricsServiceProvider.MetricsService;
 
+import java.util.Optional;
+
 import org.apache.ambari.server.controller.spi.SystemException;
 
 public interface MetricHostProvider {
@@ -76,4 +78,11 @@ public interface MetricHostProvider {
    * @throws SystemException if unable to get the status of metrics server component
    */
   boolean isCollectorComponentLive(String clusterName, MetricsService service) throws SystemException;
+
+  /**
+   * If a component is installed to a host that is managed outside of Ambari (for example OneFS) then this method will return the external hostname.
+   */
+  default Optional<String> getExternalHostName(String clusterName, String componentName) {
+    return Optional.empty();
+  }
 }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/metrics/timeline/AMSComponentPropertyProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/metrics/timeline/AMSComponentPropertyProvider.java
index 487afde..efc0f4f 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/metrics/timeline/AMSComponentPropertyProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/metrics/timeline/AMSComponentPropertyProvider.java
@@ -42,7 +42,9 @@ public class AMSComponentPropertyProvider extends AMSPropertyProvider {
 
   @Override
   protected String getHostName(Resource resource) {
-    return null;
+      return hostProvider
+        .getExternalHostName((String) resource.getPropertyValue(clusterNamePropertyId), (String) resource.getPropertyValue(componentNamePropertyId))
+        .orElse(null);
   }
 
   @Override
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigHelper.java
index 3b0ee0f..f08ef2f 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigHelper.java
@@ -298,6 +298,11 @@ public class ConfigHelper {
     return properties;
   }
 
+  public Map<String, Map<String, String>> getEffectiveConfigProperties(String clusterName, String hostName) throws AmbariException {
+    Cluster cluster = clusters.getCluster(clusterName);
+    return getEffectiveConfigProperties(cluster, getEffectiveDesiredTags(cluster, hostName));
+  }
+
   /**
    * Get all config attributes for a cluster given a set of configType to
    * versionTags map. This helper method merges all the override tags with a
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/MetricDefinition.java b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/MetricDefinition.java
index 635ae35..033b18b 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/MetricDefinition.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/MetricDefinition.java
@@ -22,6 +22,7 @@ import static java.util.Map.Entry;
 
 import java.util.HashMap;
 import java.util.Map;
+import java.util.Optional;
 
 import org.codehaus.jackson.annotate.JsonIgnore;
 import org.codehaus.jackson.annotate.JsonProperty;
@@ -30,6 +31,7 @@ import org.codehaus.jackson.annotate.JsonProperty;
  * Used to represent metrics for a stack component.
  */
 public class MetricDefinition {
+  private static final String OVERRIDDEN_HOST_PROP = "overridden_host";
   private String type = null;
   private Map<String, String> properties = null;
   private Map<String, Map<String, Metric>> metrics = null;
@@ -74,5 +76,10 @@ public class MetricDefinition {
     
     return sb.toString();
   }
-  
+
+  public Optional<String> getOverriddenHosts() {
+    return properties == null
+      ? Optional.empty()
+      : Optional.ofNullable(properties.get(OVERRIDDEN_HOST_PROP));
+  }
 }
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/OverriddenMetricsHostProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/OverriddenMetricsHostProviderTest.java
new file mode 100644
index 0000000..cf4b131
--- /dev/null
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/OverriddenMetricsHostProviderTest.java
@@ -0,0 +1,95 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.controller.internal;
+
+import static edu.emory.mathcs.backport.java.util.Collections.emptyMap;
+import static org.easymock.EasyMock.expect;
+import static org.hamcrest.core.Is.is;
+import static org.junit.Assert.assertThat;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Optional;
+
+import org.apache.ambari.server.controller.metrics.MetricHostProvider;
+import org.apache.ambari.server.state.ConfigHelper;
+import org.easymock.EasyMockRule;
+import org.easymock.EasyMockSupport;
+import org.easymock.Mock;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+
+public class OverriddenMetricsHostProviderTest extends EasyMockSupport {
+  private static final String COMPONENT_WITH_OVERRIDDEN_HOST = "component1";
+  private static final String CLUSTER_1 = "cluster1";
+  private static final String COMPONENT_WITHOUT_OVERRIDDEN_HOST = "componentWithoutOverriddenHost";
+  private static final String OVERRIDEN_HOST = "overridenHost1";
+  private static final String COMPONENT_WITH_OVERRIDDEN_HOST_PLACEHOLDER = "${hdfs-site/dfs.namenode.http-address}";
+  private static final String RESOLVED_HOST = "resolved.fqdn";
+  @Rule
+  public EasyMockRule mocks = new EasyMockRule(this);
+  @Mock
+  private MetricHostProvider defaultHostProvider;
+  @Mock
+  private ConfigHelper configHelper;
+  private MetricHostProvider hostProvider;
+
+  @Before
+  public void setUp() throws Exception {
+    hostProvider = new OverriddenMetricsHostProvider(overrideHosts(), defaultHostProvider, configHelper);
+  }
+
+  @Test
+  public void testReturnsDefaultWhenNotOverridden() throws Exception {
+    replayAll();
+    assertThat(hostProvider.getExternalHostName(CLUSTER_1, COMPONENT_WITHOUT_OVERRIDDEN_HOST), is(Optional.empty()));
+    verifyAll();
+  }
+
+  @Test
+  public void testReturnOverriddenHostIfPresent() throws Exception {
+    expect(configHelper.getEffectiveConfigProperties(CLUSTER_1, null)).andReturn(emptyMap()).anyTimes();
+    replayAll();
+    assertThat(hostProvider.getExternalHostName(CLUSTER_1, COMPONENT_WITH_OVERRIDDEN_HOST), is(Optional.of(OVERRIDEN_HOST)));
+    verifyAll();
+  }
+
+  @Test
+  public void testReplacesPlaceholderInOverriddenHost() throws Exception {
+    expect(configHelper.getEffectiveConfigProperties(CLUSTER_1, null)).andReturn(config()).anyTimes();
+    replayAll();
+    assertThat(hostProvider.getExternalHostName(CLUSTER_1, COMPONENT_WITH_OVERRIDDEN_HOST_PLACEHOLDER), is(Optional.of(RESOLVED_HOST)));
+    verifyAll();
+  }
+
+  private Map<String, String> overrideHosts() {
+    return new HashMap<String, String>() {{
+      put(COMPONENT_WITH_OVERRIDDEN_HOST, OVERRIDEN_HOST);
+      put(COMPONENT_WITH_OVERRIDDEN_HOST_PLACEHOLDER, "${hdfs-site/dfs.namenode.http-address}");
+    }};
+  }
+
+  private Map<String, Map<String, String>> config() {
+    return new HashMap<String, Map<String, String>>() {{
+      put("hdfs-site", new HashMap<String, String>() {{
+        put("dfs.namenode.http-address", "http://" + RESOLVED_HOST + ":8080");
+      }});
+    }};
+  }
+}
\ No newline at end of file
diff --git a/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/metrics.json b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/metrics.json
index 335bfba..30a9f5c 100644
--- a/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/metrics.json
+++ b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/metrics.json
@@ -3,6 +3,9 @@
     "Component" : [
       {
         "type" : "ganglia",
+        "properties" : {
+          "overridden_host" : "${core-site/fs.defaultFS}"
+        },
         "metrics" : {
           "default" : {
             "metrics/load/load_five" : {
@@ -11,6 +14,12 @@
               "metric" : "load_five",
               "amsHostMetric" : true
             },
+            "metrics/cpu/cpu_num": {
+              "metric": "cpu_num",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric" : true
+            },
             "metrics/jvm/JvmMetrics/GcCountConcurrentMarkSweep" : {
               "temporal" : true,
               "pointInTime" : true,
@@ -77,7 +86,7 @@
             },
             "metrics/jvm/memHeapUsedM" : {
               "temporal" : true,
-              "pointInTime" : false,
+              "pointInTime" : true,
               "metric" : "jvm.JvmMetrics.MemHeapUsedM",
               "unit" : "MB"
             },
@@ -169,7 +178,7 @@
             },
             "metrics/jvm/memHeapCommittedM" : {
               "temporal" : true,
-              "pointInTime" : false,
+              "pointInTime" : true,
               "metric" : "jvm.JvmMetrics.MemHeapCommittedM"
             },
             "metrics/disk/disk_total" : {
diff --git a/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/widgets.json b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/widgets.json
index dbde93b..fe2c52e 100644
--- a/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/widgets.json
+++ b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/widgets.json
@@ -15,15 +15,13 @@
               "name": "jvm.JvmMetrics.GcCount._rate",
               "metric_path": "metrics/jvm/gcCount._rate",
               "service_name": "ONEFS",
-              "component_name": "ONEFS_CLIENT",
-              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+              "component_name": "ONEFS_CLIENT"
             },
             {
               "name": "jvm.JvmMetrics.GcCountConcurrentMarkSweep._rate",
               "metric_path": "metrics/jvm/GcCountConcurrentMarkSweep._rate",
               "service_name": "ONEFS",
-              "component_name": "ONEFS_CLIENT",
-              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+              "component_name": "ONEFS_CLIENT"
             }
           ],
           "values": [
@@ -49,10 +47,9 @@
           "metrics": [
             {
               "name": "jvm.JvmMetrics.GcTimeMillisConcurrentMarkSweep._rate",
-              "metric_path": "metrics/jvm/GcTimeMillisConcurrentMarkSweep._rate",
+              "metric_path": "metrics/jvm/JvmMetrics/GcTimeMillisConcurrentMarkSweep._rate",
               "service_name": "ONEFS",
-              "component_name": "ONEFS_CLIENT",
-              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+              "component_name": "ONEFS_CLIENT"
             }
           ],
           "values": [
@@ -77,15 +74,13 @@
               "name": "jvm.JvmMetrics.MemHeapCommittedM",
               "metric_path": "metrics/jvm/memHeapCommittedM",
               "service_name": "ONEFS",
-              "component_name": "ONEFS_CLIENT",
-              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+              "component_name": "ONEFS_CLIENT"
             },
             {
               "name": "jvm.JvmMetrics.MemHeapUsedM",
               "metric_path": "metrics/jvm/memHeapUsedM",
               "service_name": "ONEFS",
-              "component_name": "ONEFS_CLIENT",
-              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+              "component_name": "ONEFS_CLIENT"
             }
           ],
           "values": [
@@ -114,50 +109,43 @@
               "name": "cpu_system",
               "metric_path": "metrics/cpu/cpu_system",
               "service_name": "ONEFS",
-              "component_name": "ONEFS_CLIENT",
-              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+              "component_name": "ONEFS_CLIENT"
             },
             {
               "name": "cpu_user",
               "metric_path": "metrics/cpu/cpu_user",
               "service_name": "ONEFS",
-              "component_name": "ONEFS_CLIENT",
-              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+              "component_name": "ONEFS_CLIENT"
             },
             {
               "name": "cpu_nice",
               "metric_path": "metrics/cpu/cpu_nice",
               "service_name": "ONEFS",
-              "component_name": "ONEFS_CLIENT",
-              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+              "component_name": "ONEFS_CLIENT"
             },
             {
               "name": "cpu_idle",
               "metric_path": "metrics/cpu/cpu_idle",
               "service_name": "ONEFS",
-              "component_name": "ONEFS_CLIENT",
-              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+              "component_name": "ONEFS_CLIENT"
             },
             {
               "name": "cpu_wio",
               "metric_path": "metrics/cpu/cpu_wio",
               "service_name": "ONEFS",
-              "component_name": "ONEFS_CLIENT",
-              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+              "component_name": "ONEFS_CLIENT"
             },
             {
               "name": "mem_total",
               "metric_path": "metrics/memory/mem_total",
               "service_name": "ONEFS",
-              "component_name": "ONEFS_CLIENT",
-              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+              "component_name": "ONEFS_CLIENT"
             },
             {
               "name": "mem_free",
               "metric_path": "metrics/memory/mem_free",
               "service_name": "ONEFS",
-              "component_name": "ONEFS_CLIENT",
-              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+              "component_name": "ONEFS_CLIENT"
             }
           ],
           "values": [
@@ -175,33 +163,8 @@
             "time_range": "1",
             "display_unit": "%"
           }
-        },
-        {
-          "widget_name": "Under Replicated Blocks",
-          "description": "Number represents file blocks that does not meet the replication factor criteria. Its indicative of ONEFS bad health.",
-          "widget_type": "NUMBER",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "Hadoop:service=NameNode,name=FSNamesystem.UnderReplicatedBlocks",
-              "metric_path": "metrics/dfs/FSNamesystem/UnderReplicatedBlocks",
-              "service_name": "ONEFS",
-              "component_name": "ONEFS_CLIENT",
-              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
-            }
-          ],
-          "values": [
-            {
-              "name": "Under Replicated Blocks",
-              "value": "${Hadoop:service=NameNode,name=FSNamesystem.UnderReplicatedBlocks}"
-            }
-          ],
-          "properties": {
-            "warning_threshold": "0",
-            "error_threshold": "50"
-          }
         }
       ]
     }
   ]
-}
+}
\ No newline at end of file

-- 
To stop receiving notification emails like this one, please contact
"commits@ambari.apache.org" <co...@ambari.apache.org>.

[ambari] 11/15: AMBARI-22634. Kerberos support for OneFS (amagyar)

Posted by am...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

amagyar pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/ambari.git

commit f55b56940a76395ead5af82f601324f810bff533
Author: Attila Magyar <am...@hortonworks.com>
AuthorDate: Thu Dec 14 12:59:07 2017 +0100

    AMBARI-22634. Kerberos support for OneFS (amagyar)
---
 .../main/admin/kerberos/step1_controller.js        | 105 +++++++++++----------
 ambari-web/app/messages.js                         |   1 +
 .../app/templates/main/admin/kerberos/step1.hbs    |  14 +--
 .../addon-services/ONEFS/1.0.0/kerberos.json       |  73 ++++++++++++++
 .../ONEFS/1.0.0/package/scripts/params_linux.py    |  12 +--
 .../ONEFS/1.0.0/package/scripts/service_check.py   |  80 +---------------
 6 files changed, 138 insertions(+), 147 deletions(-)

diff --git a/ambari-web/app/controllers/main/admin/kerberos/step1_controller.js b/ambari-web/app/controllers/main/admin/kerberos/step1_controller.js
index 9c864a8..690843b 100644
--- a/ambari-web/app/controllers/main/admin/kerberos/step1_controller.js
+++ b/ambari-web/app/controllers/main/admin/kerberos/step1_controller.js
@@ -18,29 +18,44 @@
 
 var App = require('app');
 
+var PreCondition = Ember.Object.extend({
+  displayText: null,
+  checked: false,
+  visibilityCriteria: function() { return true; },
+
+  hidden: function() {
+    return !this.get('visibilityCriteria')();
+  }.property('visibilityCriteria'),
+
+  satisfied: function() {
+    return this.get('checked') || this.get('hidden');
+  }.property('checked', 'hidden')
+});
+
 App.KerberosWizardStep1Controller = Em.Controller.extend({
   name: "kerberosWizardStep1Controller",
 
   selectedItem: Em.I18n.t('admin.kerberos.wizard.step1.option.kdc'),
 
-  isSubmitDisabled: Em.computed.someBy('selectedOption.preConditions', 'checked', false),
+  isSubmitDisabled: Em.computed.someBy('selectedOption.preConditions', 'satisfied', false),
 
   options: Em.A([
     Em.Object.create({
       displayName: Em.I18n.t('admin.kerberos.wizard.step1.option.kdc'),
       value: Em.I18n.t('admin.kerberos.wizard.step1.option.kdc'),
       preConditions: [
-        Em.Object.create({
-          displayText: Em.I18n.t('admin.kerberos.wizard.step1.option.kdc.condition.1'),
-          checked: false
+        PreCondition.create({
+          displayText: Em.I18n.t('admin.kerberos.wizard.step1.option.kdc.condition.1')
+        }),
+        PreCondition.create({
+          displayText: Em.I18n.t('admin.kerberos.wizard.step1.option.kdc.condition.2')
         }),
-        Em.Object.create({
-          displayText: Em.I18n.t('admin.kerberos.wizard.step1.option.kdc.condition.2'),
-          checked: false
+        PreCondition.create({
+          displayText: Em.I18n.t('admin.kerberos.wizard.step1.option.kdc.condition.3')
         }),
-        Em.Object.create({
-          displayText: Em.I18n.t('admin.kerberos.wizard.step1.option.kdc.condition.3'),
-          checked: false
+        PreCondition.create({
+          displayText: Em.I18n.t('admin.kerberos.wizard.step1.option.kdc.condition.4'),
+          visibilityCriteria: function() { return App.Service.find().someProperty('serviceName', 'ONEFS') }
         })
       ]
     }),
@@ -48,25 +63,20 @@ App.KerberosWizardStep1Controller = Em.Controller.extend({
       displayName: Em.I18n.t('admin.kerberos.wizard.step1.option.ad'),
       value: Em.I18n.t('admin.kerberos.wizard.step1.option.ad'),
       preConditions: [
-        Em.Object.create({
-          displayText: Em.I18n.t('admin.kerberos.wizard.step1.option.ad.condition.1'),
-          checked: false
+        PreCondition.create({
+          displayText: Em.I18n.t('admin.kerberos.wizard.step1.option.ad.condition.1')
         }),
-        Em.Object.create({
-          displayText: Em.I18n.t('admin.kerberos.wizard.step1.option.ad.condition.2'),
-          checked: false
+        PreCondition.create({
+          displayText: Em.I18n.t('admin.kerberos.wizard.step1.option.ad.condition.2')
         }),
-        Em.Object.create({
-          displayText: Em.I18n.t('admin.kerberos.wizard.step1.option.ad.condition.3'),
-          checked: false
+        PreCondition.create({
+          displayText: Em.I18n.t('admin.kerberos.wizard.step1.option.ad.condition.3')
         }),
-        Em.Object.create({
-          displayText: Em.I18n.t('admin.kerberos.wizard.step1.option.ad.condition.4'),
-          checked: false
+        PreCondition.create({
+          displayText: Em.I18n.t('admin.kerberos.wizard.step1.option.ad.condition.4')
         }),
-        Em.Object.create({
-          displayText: Em.I18n.t('admin.kerberos.wizard.step1.option.ad.condition.5'),
-          checked: false
+        PreCondition.create({
+          displayText: Em.I18n.t('admin.kerberos.wizard.step1.option.ad.condition.5')
         })
       ]
     }),
@@ -74,21 +84,17 @@ App.KerberosWizardStep1Controller = Em.Controller.extend({
       displayName: Em.I18n.t('admin.kerberos.wizard.step1.option.ipa'),
       value: Em.I18n.t('admin.kerberos.wizard.step1.option.ipa'),
       preConditions: [
-        Em.Object.create({
-          displayText: Em.I18n.t('admin.kerberos.wizard.step1.option.ipa.condition.1'),
-          checked: false
+        PreCondition.create({
+          dsplayText: Em.I18n.t('admin.kerberos.wizard.step1.option.ipa.condition.1'),
         }),
-        Em.Object.create({
-          displayText: Em.I18n.t('admin.kerberos.wizard.step1.option.ipa.condition.2'),
-          checked: false
+        PreCondition.create({
+          displayText: Em.I18n.t('admin.kerberos.wizard.step1.option.ipa.condition.2')
         }),
-        Em.Object.create({
-          displayText: Em.I18n.t('admin.kerberos.wizard.step1.option.ipa.condition.3'),
-          checked: false
+        PreCondition.create({
+          displayText: Em.I18n.t('admin.kerberos.wizard.step1.option.ipa.condition.3')
         }),
-        Em.Object.create({
-          displayText: Em.I18n.t('admin.kerberos.wizard.step1.option.ipa.condition.4'),
-          checked: false
+        PreCondition.create({
+          displayText: Em.I18n.t('admin.kerberos.wizard.step1.option.ipa.condition.4')
         })
       ]
     }),
@@ -96,25 +102,20 @@ App.KerberosWizardStep1Controller = Em.Controller.extend({
       displayName: Em.I18n.t('admin.kerberos.wizard.step1.option.manual'),
       value: Em.I18n.t('admin.kerberos.wizard.step1.option.manual'),
       preConditions: [
-        Em.Object.create({
-          displayText: Em.I18n.t('admin.kerberos.wizard.step1.option.manual.condition.1'),
-          checked: false
+        PreCondition.create({
+          displayText: Em.I18n.t('admin.kerberos.wizard.step1.option.manual.condition.1')
         }),
-        Em.Object.create({
-          displayText: Em.I18n.t('admin.kerberos.wizard.step1.option.manual.condition.2'),
-          checked: false
+        PreCondition.create({
+          displayText: Em.I18n.t('admin.kerberos.wizard.step1.option.manual.condition.2')
         }),
-        Em.Object.create({
-          displayText: Em.I18n.t('admin.kerberos.wizard.step1.option.manual.condition.3'),
-          checked: false
+        PreCondition.create({
+          displayText: Em.I18n.t('admin.kerberos.wizard.step1.option.manual.condition.3')
         }),
-        Em.Object.create({
-          displayText: Em.I18n.t('admin.kerberos.wizard.step1.option.manual.condition.4'),
-          checked: false
+        PreCondition.create({
+          displayText: Em.I18n.t('admin.kerberos.wizard.step1.option.manual.condition.4')
         }),
-        Em.Object.create({
-          displayText: Em.I18n.t('admin.kerberos.wizard.step1.option.manual.condition.5'),
-          checked: false
+        PreCondition.create({
+          displayText: Em.I18n.t('admin.kerberos.wizard.step1.option.manual.condition.5')
         })
       ]
     })
diff --git a/ambari-web/app/messages.js b/ambari-web/app/messages.js
index 4aec83e..e64dccc 100644
--- a/ambari-web/app/messages.js
+++ b/ambari-web/app/messages.js
@@ -1267,6 +1267,7 @@ Em.I18n.translations = {
   'admin.kerberos.wizard.step1.option.kdc.condition.1': 'Ambari Server and cluster hosts have network access to both the KDC and KDC admin hosts.',
   'admin.kerberos.wizard.step1.option.kdc.condition.2': 'KDC administrative credentials are on-hand.',
   'admin.kerberos.wizard.step1.option.kdc.condition.3': 'The Java Cryptography Extensions (JCE) have been setup on the Ambari Server host and all hosts in the cluster.',
+  'admin.kerberos.wizard.step1.option.kdc.condition.4': 'The Isilon administrator has setup all appropriate principals in OneFS',
   'admin.kerberos.wizard.step1.option.manual': 'Manage Kerberos principals and keytabs manually',
   'admin.kerberos.wizard.step1.option.manual.condition.1': 'Cluster hosts have network access to the KDC',
   'admin.kerberos.wizard.step1.option.manual.condition.2': 'Kerberos client utilities (such as kinit) have been installed on every cluster host',
diff --git a/ambari-web/app/templates/main/admin/kerberos/step1.hbs b/ambari-web/app/templates/main/admin/kerberos/step1.hbs
index 57d0637..df15daf 100644
--- a/ambari-web/app/templates/main/admin/kerberos/step1.hbs
+++ b/ambari-web/app/templates/main/admin/kerberos/step1.hbs
@@ -44,12 +44,14 @@
         <h5>{{selectedOption.displayName}}:</h5>
         <b>{{t admin.kerberos.wizard.step1.prerequisites.label}}</b> <br/> <br/>
         {{#each condition in selectedOption.preConditions}}
-          {{view App.CheckboxView
-            classNames="checkbox"
-            checkedBinding="condition.checked"
-            labelClassNames="kerberos-step1-prerequiste-checkboxes"
-            labelBinding="condition.displayText"
-          }}
+          {{#unless condition.hidden}}
+            {{view App.CheckboxView
+              classNames="checkbox"
+              checkedBinding="condition.checked"
+              labelClassNames="kerberos-step1-prerequiste-checkboxes"
+              labelBinding="condition.displayText"
+            }}
+          {{/unless}}
         {{/each}}
       </div>
     </div>
diff --git a/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/kerberos.json b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/kerberos.json
new file mode 100644
index 0000000..6078985
--- /dev/null
+++ b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/kerberos.json
@@ -0,0 +1,73 @@
+{
+  "services": [
+    {
+      "name": "ONEFS",
+      "identities": [
+        {
+          "name": "hdfs_spnego",
+          "reference": "/spnego",
+          "principal": {
+            "configuration": "hdfs-site/dfs.web.authentication.kerberos.principal"
+          },
+          "keytab": {
+            "configuration": "hdfs-site/dfs.web.authentication.kerberos.keytab"
+          }
+        },
+        {
+          "name": "hdfs_smokeuser",
+          "reference": "/smokeuser"
+        }
+      ],
+      "auth_to_local_properties" : [
+        "core-site/hadoop.security.auth_to_local"
+      ],
+      "configurations": [
+        {
+          "core-site": {
+            "hadoop.security.authentication": "kerberos",
+            "hadoop.security.authorization": "true",
+            "hadoop.proxyuser.HTTP.groups": "${hadoop-env/proxyuser_group}"
+          }
+        },
+        {
+          "ranger-hdfs-audit": {
+            "xasecure.audit.jaas.Client.loginModuleName": "com.sun.security.auth.module.Krb5LoginModule",
+            "xasecure.audit.jaas.Client.loginModuleControlFlag": "required",
+            "xasecure.audit.jaas.Client.option.useKeyTab": "true",
+            "xasecure.audit.jaas.Client.option.storeKey": "false",
+            "xasecure.audit.jaas.Client.option.serviceName": "solr",
+            "xasecure.audit.destination.solr.force.use.inmemory.jaas.config": "true"
+          }
+        }
+      ],
+      "components": [
+        {
+          "name":  "ONEFS_CLIENT",
+          "identities": [
+            {
+              "name": "hdfs_hdfs_client_hdfs",
+              "principal": {
+                "value": "${hadoop-env/hdfs_user}${principal_suffix}@${realm}",
+                "type" : "user" ,
+                "configuration": "hadoop-env/hdfs_principal_name",
+                "local_username" : "${hadoop-env/hdfs_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/hdfs.headless.keytab",
+                "owner": {
+                  "name": "${hadoop-env/hdfs_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "hadoop-env/hdfs_user_keytab"
+              }
+            }
+          ]
+        }
+      ]
+    }
+  ]
+}
diff --git a/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/package/scripts/params_linux.py b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/package/scripts/params_linux.py
index b823fc5..953efdc 100644
--- a/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/package/scripts/params_linux.py
+++ b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/package/scripts/params_linux.py
@@ -32,24 +32,16 @@ config = Script.get_config()
 hostname = config["hostname"]
 hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
 hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
+hdfs_principal_name = default('/configurations/hadoop-env/hdfs_principal_name', None)
 user_group = config['configurations']['cluster-env']['user_group']
 hdfs_tmp_dir = config['configurations']['hadoop-env']['hdfs_tmp_dir']
 security_enabled = config['configurations']['cluster-env']['security_enabled']
 
-namenode_host = default("/clusterHostInfo/namenode_host", [])
-journalnode_hosts = default("/clusterHostInfo/journalnode_hosts", [])
-zkfc_hosts = default("/clusterHostInfo/zkfc_hosts", [])
-
-has_journalnode_hosts = not len(journalnode_hosts)  == 0
-has_zkfc_hosts = not len(zkfc_hosts)  == 0
-is_namenode_master = hostname in namenode_host
-
 dfs_type = default("/commandParams/dfs_type", "")
-hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
 kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
 hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
 hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
-hdfs_principal_name = default('/configurations/hadoop-env/hdfs_principal_name', None)
 hdfs_site = config['configurations']['hdfs-site']
 default_fs = config['configurations']['core-site']['fs.defaultFS']
 
diff --git a/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/package/scripts/service_check.py b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/package/scripts/service_check.py
index 3d798a3..4c92866 100644
--- a/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/package/scripts/service_check.py
+++ b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/package/scripts/service_check.py
@@ -18,11 +18,7 @@ limitations under the License.
 """
 
 from resource_management import *
-from resource_management.core.shell import as_user
 from ambari_commons.os_family_impl import OsFamilyImpl
-from ambari_commons import OSConst
-from resource_management.libraries.functions.curl_krb_request import curl_krb_request
-from resource_management.core.logger import Logger
 
 class HdfsServiceCheck(Script):
   pass
@@ -38,7 +34,7 @@ class HdfsServiceCheckDefault(HdfsServiceCheck):
     tmp_file = format("{dir}/{unique}")
 
     if params.security_enabled:
-      Execute(format("{kinit_path_local} -kt {hdfs_user_keytab} {hdfs_principal_name}"),
+      Execute(format("{params.kinit_path_local} -kt {params.hdfs_user_keytab} {params.hdfs_principal_name}"),
         user=params.hdfs_user
       )
     params.HdfsResource(dir,
@@ -58,80 +54,6 @@ class HdfsServiceCheckDefault(HdfsServiceCheck):
     )
     params.HdfsResource(None, action="execute")
 
-    if params.has_journalnode_hosts:
-      if params.security_enabled:
-        for host in params.journalnode_hosts:
-          if params.https_only:
-            uri = format("https://{host}:{journalnode_port}")
-          else:
-            uri = format("http://{host}:{journalnode_port}")
-          response, errmsg, time_millis = curl_krb_request(params.tmp_dir, params.smoke_user_keytab,
-                                                           params.smokeuser_principal, uri, "jn_service_check",
-                                                           params.kinit_path_local, False, None, params.smoke_user)
-          if not response:
-            Logger.error("Cannot access WEB UI on: {0}. Error : {1}", uri, errmsg)
-            return 1
-      else:
-        journalnode_port = params.journalnode_port
-        checkWebUIFileName = "checkWebUI.py"
-        checkWebUIFilePath = format("{tmp_dir}/{checkWebUIFileName}")
-        comma_sep_jn_hosts = ",".join(params.journalnode_hosts)
-
-        checkWebUICmd = format("ambari-python-wrap {checkWebUIFilePath} -m {comma_sep_jn_hosts} -p {journalnode_port} -s {https_only} -o {script_https_protocol}")
-        File(checkWebUIFilePath,
-             content=StaticFile(checkWebUIFileName),
-             mode=0775)
-
-        Execute(checkWebUICmd,
-                logoutput=True,
-                try_sleep=3,
-                tries=5,
-                user=params.smoke_user
-        )
-
-    if params.is_namenode_master:
-      if params.has_zkfc_hosts:
-        pid_dir = format("{hadoop_pid_dir_prefix}/{hdfs_user}")
-        pid_file = format("{pid_dir}/hadoop-{hdfs_user}-zkfc.pid")
-        check_zkfc_process_cmd = as_user(format(
-          "ls {pid_file} >/dev/null 2>&1 && ps -p `cat {pid_file}` >/dev/null 2>&1"), user=params.hdfs_user)
-        Execute(check_zkfc_process_cmd,
-                logoutput=True,
-                try_sleep=3,
-                tries=5
-        )
-
-@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
-class HdfsServiceCheckWindows(HdfsServiceCheck):
-  def service_check(self, env):
-    import params
-    env.set_params(params)
-
-    unique = functions.get_unique_id_and_date()
-
-    #Hadoop uses POSIX-style paths, separator is always /
-    dir = params.hdfs_tmp_dir
-    tmp_file = dir + '/' + unique
-
-    #commands for execution
-    hadoop_cmd = "cmd /C %s" % (os.path.join(params.hadoop_home, "bin", "hadoop.cmd"))
-    create_dir_cmd = "%s fs -mkdir %s" % (hadoop_cmd, dir)
-    own_dir = "%s fs -chmod 777 %s" % (hadoop_cmd, dir)
-    test_dir_exists = "%s fs -test -e %s" % (hadoop_cmd, dir)
-    cleanup_cmd = "%s fs -rm %s" % (hadoop_cmd, tmp_file)
-    create_file_cmd = "%s fs -put %s %s" % (hadoop_cmd, os.path.join(params.hadoop_conf_dir, "core-site.xml"), tmp_file)
-    test_cmd = "%s fs -test -e %s" % (hadoop_cmd, tmp_file)
-
-    hdfs_cmd = "cmd /C %s" % (os.path.join(params.hadoop_home, "bin", "hdfs.cmd"))
-    safemode_command = "%s dfsadmin -safemode get | %s OFF" % (hdfs_cmd, params.grep_exe)
-
-    Execute(safemode_command, logoutput=True, try_sleep=3, tries=20)
-    Execute(create_dir_cmd, user=params.hdfs_user,logoutput=True, ignore_failures=True)
-    Execute(own_dir, user=params.hdfs_user,logoutput=True)
-    Execute(test_dir_exists, user=params.hdfs_user,logoutput=True)
-    Execute(create_file_cmd, user=params.hdfs_user,logoutput=True)
-    Execute(test_cmd, user=params.hdfs_user,logoutput=True)
-    Execute(cleanup_cmd, user=params.hdfs_user,logoutput=True)
 
 if __name__ == "__main__":
   HdfsServiceCheck().execute()

-- 
To stop receiving notification emails like this one, please contact
"commits@ambari.apache.org" <co...@ambari.apache.org>.

[ambari] 10/15: AMBARI-22507. Ambari UI changes to support Isilon OneFS (amagyar)

Posted by am...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

amagyar pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/ambari.git

commit cbfcbd6c6981c93942b95da4eac720fe7b7c1e16
Author: Attila Magyar <am...@hortonworks.com>
AuthorDate: Tue Nov 28 22:05:49 2017 +0100

    AMBARI-22507. Ambari UI changes to support Isilon OneFS (amagyar)
---
 .../app/controllers/wizard/step4_controller.js     | 13 ++++-
 ambari-web/app/mappers/stack_service_mapper.js     |  3 ++
 ambari-web/app/messages.js                         |  2 +
 ambari-web/app/models/stack_service.js             | 17 +++++++
 ambari-web/app/templates/wizard/step4.hbs          | 58 ++++++++++++++++++----
 ambari-web/test/controllers/wizard/step4_test.js   | 29 +++++++++--
 6 files changed, 107 insertions(+), 15 deletions(-)

diff --git a/ambari-web/app/controllers/wizard/step4_controller.js b/ambari-web/app/controllers/wizard/step4_controller.js
index 8a62f3b..9defa12 100644
--- a/ambari-web/app/controllers/wizard/step4_controller.js
+++ b/ambari-web/app/controllers/wizard/step4_controller.js
@@ -34,11 +34,12 @@ App.WizardStep4Controller = Em.ArrayController.extend({
    */
   isAllChecked: function(key, value) {
     if (arguments.length > 1) {
-      this.filterProperty('isDisabled', false).setEach('isSelected', value);
+      this.filterProperty('isDisabled', false).filterProperty('isDFS', false).setEach('isSelected', value);
       return value;
     }
     return this.filterProperty('isInstalled', false).
       filterProperty('isHiddenOnSelectServicePage', false).
+      filterProperty('isDFS', false).
       everyProperty('isSelected', true);
   }.property('@each.isSelected'),
 
@@ -59,6 +60,16 @@ App.WizardStep4Controller = Em.ArrayController.extend({
   errorStack: [],
 
   /**
+   * Services which are HDFS compatible
+   */
+  fileSystems: function() {
+    var fileSystems = this.filterProperty('isDFS', true);;
+    return fileSystems.map(function(fs) {
+      return App.FileSystem.create({content: fs, services: fileSystems});
+    });
+  }.property('@each.isDFS'),
+
+  /**
    * Drop errorStack content on selected state changes.
    */
   clearErrors: function() {
diff --git a/ambari-web/app/mappers/stack_service_mapper.js b/ambari-web/app/mappers/stack_service_mapper.js
index f49b4d0..368a182 100644
--- a/ambari-web/app/mappers/stack_service_mapper.js
+++ b/ambari-web/app/mappers/stack_service_mapper.js
@@ -119,6 +119,9 @@ App.stackServiceMapper = App.QuickDataMapper.create({
         stackService.is_installable = false;
         stackService.is_selected = false;
       }
+      if (stackService.service_type === 'HCFS' && stackService.service_name !== 'HDFS') {
+        stackService.is_selected = false;
+      }
       if(stackService.selection === "MANDATORY") {
         stackService.is_mandatory = true;
       }
diff --git a/ambari-web/app/messages.js b/ambari-web/app/messages.js
index e59f5fd..4aec83e 100644
--- a/ambari-web/app/messages.js
+++ b/ambari-web/app/messages.js
@@ -879,6 +879,8 @@ Em.I18n.translations = {
 
   'installer.step4.header':'Choose Services',
   'installer.step4.body':'Choose which services you want to install on your cluster.',
+  'installer.step4.headerFS':'Choose File System',
+  'installer.step4.bodyFS':'Choose which file system you want to install on your cluster.',
   'installer.step4.fsCheck.popup.header':'File System Required',
   'installer.step4.fsCheck.popup.body':'You did not select a File System but one is required. We will automatically add {0}. Is this OK?',
   'installer.step4.multipleDFS.popup.header':'Multiple File Systems Selected',
diff --git a/ambari-web/app/models/stack_service.js b/ambari-web/app/models/stack_service.js
index a84816e..1b583c1 100644
--- a/ambari-web/app/models/stack_service.js
+++ b/ambari-web/app/models/stack_service.js
@@ -71,6 +71,23 @@ var MissingDependency = Ember.Object.extend({
   })
 });
 
+App.FileSystem = Ember.ObjectProxy.extend({
+  content: null,
+  services: [],
+
+  isSelected: function(key, aBoolean) {
+    if (arguments.length > 1) {
+      this.clearAllSelection();
+      this.get('content').set('isSelected', aBoolean);
+    }
+    return this.get('content.isSelected');
+  }.property('content.isSelected', 'services.@each.isSelected'),
+
+  clearAllSelection: function() {
+    this.get('services').setEach('isSelected', false);
+  }
+});
+
 /**
  * This model loads all services supported by the stack
  * The model maps to the  http://hostname:8080/api/v1/stacks/HDP/versions/${versionNumber}/services?fields=StackServices/*,serviceComponents/*
diff --git a/ambari-web/app/templates/wizard/step4.hbs b/ambari-web/app/templates/wizard/step4.hbs
index 5a08250..b738f69 100644
--- a/ambari-web/app/templates/wizard/step4.hbs
+++ b/ambari-web/app/templates/wizard/step4.hbs
@@ -17,9 +17,8 @@
 }}
 
 <div id="step4" class="wizard-content col-md-9">
-  <h4 class="step-title">{{t installer.step4.header}}</h4>
-  <p class="step-description">{{t installer.step4.body}}</p>
-
+  <h4 class="step-title">{{t installer.step4.headerFS}}</h4>
+  <p class="step-description">{{t installer.step4.bodyFS}}</p>
   <div class="panel panel-default">
     <div class="panel-body">
       <table class="table table-hover">
@@ -28,23 +27,20 @@
           <th {{QAAttr "service-name-header"}} class="col-md-3">{{t common.service}}</th>
           <th {{QAAttr "service-version-header"}} id="service-version">{{t common.version}}</th>
           <th {{QAAttr "service-description-header"}} id="service-description">{{t common.description}}</th>
-          <th>
-            {{view App.CheckboxView data-qa="toggle-all" disabledBinding="isInstalled" checkedBinding="isAllChecked"}}
-          </th>
         </tr>
         </thead>
         <tbody>
-        {{#each service in controller}}
-          {{#unless isHiddenOnSelectServicePage}}
-            <tr {{QAAttr "service-row"}} {{bindAttr class="service.isSelected:active service.isSelected:service-selected"}} {{action toggleCheckBox service target="view"}}>
+        {{#each service in controller.fileSystems}}
+          {{#unless service.isHiddenOnSelectServicePage}}
+            <tr {{QAAttr "service-row"}} {{bindAttr class="service.isSelected:active service.isSelected:service-selected"}} {{action toggleCheckBox service target="view"}} >
               <td {{QAAttr "service-name"}}>{{service.displayNameOnSelectServicePage}}</td>
               <td {{QAAttr "service-version"}}>{{service.serviceVersionDisplay}}</td>
               <td {{QAAttr "service-description"}}>{{{service.comments}}}</td>
               <td>
                 <div class="checkbox">
                   {{view App.CheckboxView checkboxClassNamesBinding="service.serviceName" data-qa="toggle-service"
-                  disabledBinding="service.isDisabled"
-                  checkedBinding="service.isSelected"
+                         disabledBinding="service.isDisabled"
+                         checkedBinding="service.isSelected"
                   }}
                 </div>
               </td>
@@ -55,6 +51,46 @@
       </table>
     </div>
   </div>
+
+  <h4 class="step-title">{{t installer.step4.header}}</h4>
+  <p class="step-description">{{t installer.step4.body}}</p>
+
+  <div class="panel panel-default">
+    <div class="panel-body">
+      <table class="table table-hover">
+        <thead>
+        <tr>
+          <th {{QAAttr "service-name-header"}} class="col-md-3">{{t common.service}}</th>
+          <th {{QAAttr "service-version-header"}} id="service-version">{{t common.version}}</th>
+          <th {{QAAttr "service-description-header"}} id="service-description">{{t common.description}}</th>
+          <th>
+            {{view App.CheckboxView data-qa="toggle-all" disabledBinding="isInstalled" checkedBinding="isAllChecked"}}
+          </th>
+        </tr>
+        </thead>
+        <tbody>
+        {{#each service in controller}}
+          {{#unless service.isHiddenOnSelectServicePage}}
+            {{#unless service.isDFS}}
+              <tr {{QAAttr "service-row"}} {{bindAttr class="service.isSelected:active service.isSelected:service-selected"}} {{action toggleCheckBox service target="view"}}>
+                <td {{QAAttr "service-name"}}>{{service.displayNameOnSelectServicePage}}</td>
+                <td {{QAAttr "service-version"}}>{{service.serviceVersionDisplay}}</td>
+                <td {{QAAttr "service-description"}}>{{{service.comments}}}</td>
+                <td>
+                  <div class="checkbox">
+                    {{view App.CheckboxView checkboxClassNamesBinding="service.serviceName" data-qa="toggle-service"
+                        disabledBinding="service.isDisabled"
+                        checkedBinding="service.isSelected" }}
+                  </div>
+                </td>
+              </tr>
+            {{/unless}}
+          {{/unless}}
+        {{/each}}
+        </tbody>
+      </table>
+    </div>
+  </div>
 </div>
 
 
diff --git a/ambari-web/test/controllers/wizard/step4_test.js b/ambari-web/test/controllers/wizard/step4_test.js
index 1cd4863..e9942e0 100644
--- a/ambari-web/test/controllers/wizard/step4_test.js
+++ b/ambari-web/test/controllers/wizard/step4_test.js
@@ -78,18 +78,41 @@ describe('App.WizardStep4Controller', function () {
   });
 
   describe('#isAllChecked', function () {
-    it('should return true if all services are selected', function () {
+    it('should return true if all non DFS services are selected', function () {
       controller.setEach('isInstalled', false);
-      controller.findProperty('serviceName', 'HDFS').set('isSelected', true);
+      controller.findProperty('serviceName', 'YARN').set('isSelected', true);
+      controller.findProperty('serviceName', 'HDFS').set('isSelected', false);
       expect(controller.get('isAllChecked')).to.equal(true);
     });
 
     it('should return false if at least one service is not selected', function () {
-      controller.findProperty('serviceName', 'HDFS').set('isSelected', false);
+      controller.findProperty('serviceName', 'YARN').set('isSelected', false);
       expect(controller.get('isAllChecked')).to.equal(false);
     });
   });
 
+  describe('#fileSystems', function () {
+    beforeEach(function () {
+      controller.clear();
+      controller.set('content', generateSelectedServicesContent(['HDFS', 'GLUSTERFS', 'YARN']));
+    });
+
+    it('returns only DFS services', function () {
+      expect(controller.get('fileSystems')).to.have.length(2);
+      expect(controller.get('fileSystems').mapProperty('serviceName')).to.contain('GLUSTERFS');
+      expect(controller.get('fileSystems').mapProperty('serviceName')).to.contain('HDFS');
+    });
+
+    it('allows selecting only one DFS at a time', function () {
+      var fileSystems = controller.get('fileSystems');
+      fileSystems[0].set('isSelected', true);
+      expect(fileSystems[0].get('isSelected')).to.equal(true);
+      expect(fileSystems[1].get('isSelected')).to.equal(false);
+      fileSystems[1].set('isSelected', true);
+      expect(fileSystems[0].get('isSelected')).to.equal(false);
+      expect(fileSystems[1].get('isSelected')).to.equal(true);
+    });
+  });
   describe('#multipleDFSs()', function () {
     it('should return true if HDFS is selected and GLUSTERFS is selected', function () {
       controller.set('content', generateSelectedServicesContent(['HDFS', 'GLUSTERFS']));

-- 
To stop receiving notification emails like this one, please contact
"commits@ambari.apache.org" <co...@ambari.apache.org>.

[ambari] 12/15: AMBARI-22674. ServiceAdvisor for OneFS (amagyar)

Posted by am...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

amagyar pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/ambari.git

commit 89f3bba6b100288f736a73096ea3578153b20b1c
Author: Attila Magyar <am...@hortonworks.com>
AuthorDate: Fri Dec 22 09:54:10 2017 +0100

    AMBARI-22674. ServiceAdvisor for OneFS (amagyar)
---
 .../main/python/ambari_agent/alerts/ams_alert.py   |   4 +-
 .../main/python/ambari_agent/alerts/base_alert.py  |  47 +-------
 .../python/ambari_agent/alerts/metric_alert.py     |   3 +-
 .../main/python/ambari_agent/alerts/port_alert.py  |   4 +-
 .../main/python/ambari_agent/alerts/web_alert.py   |   6 +-
 .../src/main/python/ambari_commons/inet_utils.py   |  46 +++++++
 .../management-packs/isilon-onefs-mpack/pom.xml    |  23 ++++
 .../ONEFS/1.0.0/configuration/core-site.xml        |  14 +--
 .../ONEFS/1.0.0/configuration/hdfs-site.xml        |  24 ++--
 .../ONEFS/1.0.0/configuration/onefs.xml            |  30 +++++
 .../addon-services/ONEFS/1.0.0/metainfo.xml        |  13 ++
 .../ONEFS/1.0.0/package/scripts/params_linux.py    |  12 +-
 .../addon-services/ONEFS/1.0.0/service_advisor.py  | 122 +++++++++++++++++++
 .../addon-services/ONEFS/1.0.0/themes/theme.json   |  60 ++++++++++
 .../src/test/python/TestServiceAdvisor.py          |  67 +++++++++++
 .../src/test/python/unitTests.py                   | 132 +++++++++++++++++++++
 16 files changed, 529 insertions(+), 78 deletions(-)

diff --git a/ambari-agent/src/main/python/ambari_agent/alerts/ams_alert.py b/ambari-agent/src/main/python/ambari_agent/alerts/ams_alert.py
index 29b885b..cc2884e 100644
--- a/ambari-agent/src/main/python/ambari_agent/alerts/ams_alert.py
+++ b/ambari-agent/src/main/python/ambari_agent/alerts/ams_alert.py
@@ -22,7 +22,6 @@ import httplib
 import imp
 import time
 import urllib
-from alerts.base_alert import BaseAlert
 from alerts.metric_alert import MetricAlert
 import ambari_simplejson as json
 import logging
@@ -30,6 +29,7 @@ import re
 import uuid
 
 from resource_management.libraries.functions.get_port_from_url import get_port_from_url
+from ambari_commons import inet_utils
 
 logger = logging.getLogger()
 
@@ -68,7 +68,7 @@ class AmsAlert(MetricAlert):
       logger.debug("[Alert][{0}] Calculated metric URI to be {1} (ssl={2})".format(
         self.get_name(), alert_uri.uri, str(alert_uri.is_ssl_enabled)))
 
-    host = BaseAlert.get_host_from_url(alert_uri.uri)
+    host = inet_utils.get_host_from_url(alert_uri.uri)
     if host is None:
       host = self.host_name
 
diff --git a/ambari-agent/src/main/python/ambari_agent/alerts/base_alert.py b/ambari-agent/src/main/python/ambari_agent/alerts/base_alert.py
index 5c0305e..cdbe316 100644
--- a/ambari-agent/src/main/python/ambari_agent/alerts/base_alert.py
+++ b/ambari-agent/src/main/python/ambari_agent/alerts/base_alert.py
@@ -510,49 +510,4 @@ class BaseAlert(object):
     :param state: the state of the alert in uppercase (such as OK, WARNING, etc)
     :return:  the parameterized text
     '''
-    return '{0}'
-
-  """
-  See RFC3986, Appendix B
-  Tested on the following cases:
-    "192.168.54.1"
-    "192.168.54.2:7661
-    "hdfs://192.168.54.3/foo/bar"
-    "ftp://192.168.54.4:7842/foo/bar"
-
-    Returns None if only a port is passed in
-  """
-  @staticmethod
-  def get_host_from_url(uri):
-    if uri is None:
-      return None
-    
-    # if not a string, return None
-    if not isinstance(uri, basestring):
-      return None    
-        
-    # RFC3986, Appendix B
-    parts = re.findall('^(([^:/?#]+):)?(//([^/?#]*))?([^?#]*)(\?([^#]*))?(#(.*))?', uri)
-
-    # index of parts
-    # scheme    = 1
-    # authority = 3
-    # path      = 4
-    # query     = 6
-    # fragment  = 8
-
-    host_and_port = uri
-    if 0 == len(parts[0][1]):
-      host_and_port = parts[0][4]
-    elif 0 == len(parts[0][2]):
-      host_and_port = parts[0][1]
-    elif parts[0][2].startswith("//"):
-      host_and_port = parts[0][3]
-
-    if -1 == host_and_port.find(':'):
-      if host_and_port.isdigit():
-        return None    
-      
-      return host_and_port
-    else:
-      return host_and_port.split(':')[0]
+    return '{0}'
\ No newline at end of file
diff --git a/ambari-agent/src/main/python/ambari_agent/alerts/metric_alert.py b/ambari-agent/src/main/python/ambari_agent/alerts/metric_alert.py
index 66a1d05..64a1a29 100644
--- a/ambari-agent/src/main/python/ambari_agent/alerts/metric_alert.py
+++ b/ambari-agent/src/main/python/ambari_agent/alerts/metric_alert.py
@@ -30,6 +30,7 @@ from alerts.base_alert import BaseAlert
 from ambari_commons.urllib_handlers import RefreshHeaderProcessor
 from resource_management.libraries.functions.get_port_from_url import get_port_from_url
 from resource_management.libraries.functions.curl_krb_request import curl_krb_request
+from ambari_commons import inet_utils
 from ambari_agent import Constants
 
 logger = logging.getLogger()
@@ -80,7 +81,7 @@ class MetricAlert(BaseAlert):
     logger.debug("[Alert][{0}] Calculated metric URI to be {1} (ssl={2})".format(
         self.get_name(), alert_uri.uri, str(alert_uri.is_ssl_enabled)))
 
-    host = BaseAlert.get_host_from_url(alert_uri.uri)
+    host = inet_utils.get_host_from_url(alert_uri.uri)
     if host is None:
       host = self.host_name
 
diff --git a/ambari-agent/src/main/python/ambari_agent/alerts/port_alert.py b/ambari-agent/src/main/python/ambari_agent/alerts/port_alert.py
index 02cc91c..1e2e581 100644
--- a/ambari-agent/src/main/python/ambari_agent/alerts/port_alert.py
+++ b/ambari-agent/src/main/python/ambari_agent/alerts/port_alert.py
@@ -24,7 +24,7 @@ import time
 from alerts.base_alert import BaseAlert
 from resource_management.libraries.functions.get_port_from_url import get_port_from_url
 from ambari_commons import OSCheck
-from ambari_commons.inet_utils import resolve_address
+from ambari_commons.inet_utils import resolve_address, get_host_from_url
 logger = logging.getLogger()
 
 # default timeouts
@@ -111,7 +111,7 @@ class PortAlert(BaseAlert):
           break
 
 
-    host = BaseAlert.get_host_from_url(uri_value)
+    host = get_host_from_url(uri_value)
     if host is None or host == "localhost" or host == "0.0.0.0":
       host = self.host_name
       host_not_specified = True
diff --git a/ambari-agent/src/main/python/ambari_agent/alerts/web_alert.py b/ambari-agent/src/main/python/ambari_agent/alerts/web_alert.py
index 0e400f7..6e01b89 100644
--- a/ambari-agent/src/main/python/ambari_agent/alerts/web_alert.py
+++ b/ambari-agent/src/main/python/ambari_agent/alerts/web_alert.py
@@ -21,9 +21,7 @@ limitations under the License.
 import logging
 import time
 import urllib2
-import ssl
 
-from functools import wraps
 from urllib2 import HTTPError
 
 from tempfile import gettempdir
@@ -33,7 +31,7 @@ from resource_management.libraries.functions.get_port_from_url import get_port_f
 from resource_management.libraries.functions.get_path_from_url import get_path_from_url
 from resource_management.libraries.functions.curl_krb_request import curl_krb_request
 from ambari_commons import OSCheck
-from ambari_commons.inet_utils import resolve_address, ensure_ssl_using_protocol
+from ambari_commons.inet_utils import resolve_address, ensure_ssl_using_protocol, get_host_from_url
 from ambari_agent import Constants
 from ambari_agent.AmbariConfig import AmbariConfig
 
@@ -135,7 +133,7 @@ class WebAlert(BaseAlert):
       uri_path = get_path_from_url(string_uri)
 
     # start building the URL manually
-    host = BaseAlert.get_host_from_url(alert_uri.uri)
+    host = get_host_from_url(alert_uri.uri)
     if host is None:
       host = self.host_name
 
diff --git a/ambari-common/src/main/python/ambari_commons/inet_utils.py b/ambari-common/src/main/python/ambari_commons/inet_utils.py
index 5fd3e88..a226235 100644
--- a/ambari-common/src/main/python/ambari_commons/inet_utils.py
+++ b/ambari-common/src/main/python/ambari_commons/inet_utils.py
@@ -23,6 +23,7 @@ import time
 import sys
 import urllib2
 import socket
+import re
 from ambari_commons import OSCheck
 from functools import wraps
 
@@ -266,3 +267,48 @@ def ensure_ssl_using_protocol(protocol="PROTOCOL_TLSv1", ca_certs=None):
         return context
       _create_default_https_context_patched._ambari_patched = True
       ssl._create_default_https_context = _create_default_https_context_patched
+
+"""
+See RFC3986, Appendix B
+Tested on the following cases:
+  "192.168.54.1"
+  "192.168.54.2:7661
+  "hdfs://192.168.54.3/foo/bar"
+  "ftp://192.168.54.4:7842/foo/bar"
+
+  Returns None if only a port is passed in
+"""
+def get_host_from_url(uri):
+  if uri is None:
+    return None
+
+  # if not a string, return None
+  if not isinstance(uri, basestring):
+    return None
+
+    # RFC3986, Appendix B
+  parts = re.findall('^(([^:/?#]+):)?(//([^/?#]*))?([^?#]*)(\?([^#]*))?(#(.*))?', uri)
+
+  # index of parts
+  # scheme    = 1
+  # authority = 3
+  # path      = 4
+  # query     = 6
+  # fragment  = 8
+
+  host_and_port = uri
+  if 0 == len(parts[0][1]):
+    host_and_port = parts[0][4]
+  elif 0 == len(parts[0][2]):
+    host_and_port = parts[0][1]
+  elif parts[0][2].startswith("//"):
+    host_and_port = parts[0][3]
+
+  if -1 == host_and_port.find(':'):
+    if host_and_port.isdigit():
+      return None
+
+    return host_and_port
+  else:
+    return host_and_port.split(':')[0]
+
diff --git a/contrib/management-packs/isilon-onefs-mpack/pom.xml b/contrib/management-packs/isilon-onefs-mpack/pom.xml
index 5d8f215..6d5411c 100644
--- a/contrib/management-packs/isilon-onefs-mpack/pom.xml
+++ b/contrib/management-packs/isilon-onefs-mpack/pom.xml
@@ -27,6 +27,7 @@
     <minAmbariVersion>3.0.0.0</minAmbariVersion>
     <maxAmbariVersion></maxAmbariVersion>
     <nifiversion>1.0.0</nifiversion>
+    <custom.tests>false</custom.tests>
   </properties>
   <parent>
     <groupId>org.apache.ambari.contrib.mpacks</groupId>
@@ -88,6 +89,28 @@
         </executions>
       </plugin>
       <plugin>
+        <groupId>org.codehaus.mojo</groupId>
+        <artifactId>exec-maven-plugin</artifactId>
+        <version>1.2.1</version>
+        <executions>
+          <execution>
+            <configuration>
+              <executable>python</executable>
+              <workingDirectory>src/test/python</workingDirectory>
+              <arguments>
+                <argument>unitTests.py</argument>
+                <argument>${custom.tests}</argument>
+              </arguments>
+            </configuration>
+            <id>python-test</id>
+            <phase>test</phase>
+            <goals>
+              <goal>exec</goal>
+            </goals>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
         <artifactId>maven-assembly-plugin</artifactId>
         <configuration>
           <tarLongFileMode>gnu</tarLongFileMode>
diff --git a/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/configuration/core-site.xml b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/configuration/core-site.xml
index 7d3acd7..e2a7922 100644
--- a/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/configuration/core-site.xml
+++ b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/configuration/core-site.xml
@@ -27,6 +27,12 @@
   literal string "local" or a host:port for HDFS.</description>
     <final>true</final>
     <on-ambari-upgrade add="false"/>
+    <depends-on>
+      <property>
+          <type>onefs</type>
+          <name>onefs_host</name>
+      </property>
+    </depends-on>
   </property>
   <property>
     <name>hadoop.security.authentication</name>
@@ -91,12 +97,4 @@ DEFAULT
     </value-attributes>
     <on-ambari-upgrade add="true"/>
   </property>
-  <property>
-    <name>net.topology.script.file.name</name>
-    <value>/etc/hadoop/conf/topology_script.py</value>
-    <description>
-      Location of topology script used by Hadoop to determine the rack location of nodes.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
 </configuration>
diff --git a/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/configuration/hdfs-site.xml b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/configuration/hdfs-site.xml
index cb6544f..727f450 100644
--- a/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/configuration/hdfs-site.xml
+++ b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/configuration/hdfs-site.xml
@@ -31,26 +31,30 @@
     <on-ambari-upgrade add="true"/>
   </property>
   <property>
-    <name>dfs.datanode.http.address</name>
-    <value>0.0.0.0:8082</value>
-    <description>
-      The datanode http server address and port.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
     <name>dfs.namenode.http-address</name>
     <value>localhost:8082</value>
     <description>The name of the default file system.  Either the
       literal string "local" or a host:port for HDFS.</description>
     <final>true</final>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
+    <depends-on>
+      <property>
+        <type>onefs</type>
+        <name>onefs_host</name>
+      </property>
+    </depends-on>
   </property>
   <property>
     <name>dfs.namenode.https-address</name>
     <value>localhost:8080</value>
     <description>The https address where namenode binds</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
+    <depends-on>
+      <property>
+        <type>onefs</type>
+        <name>onefs_host</name>
+      </property>
+    </depends-on>
   </property>
   <property>
     <name>dfs.client-write-packet-size</name>
diff --git a/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/configuration/onefs.xml b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/configuration/onefs.xml
new file mode 100644
index 0000000..bcbf2c8
--- /dev/null
+++ b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/configuration/onefs.xml
@@ -0,0 +1,30 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property>
+    <name>onefs_host</name>
+    <value/>
+    <description>SmartConnect Zone Address of OneFS</description>
+    <display-name>OneFS Host</display-name>
+    <on-ambari-upgrade add="true"/>
+  </property>
+</configuration>
diff --git a/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/metainfo.xml b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/metainfo.xml
index 5ba562e..06a90d9 100644
--- a/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/metainfo.xml
+++ b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/metainfo.xml
@@ -41,6 +41,11 @@
           <configFiles>
             <configFile>
               <type>xml</type>
+              <fileName>onefs.xml</fileName>
+              <dictionaryName>onefs</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>xml</type>
               <fileName>hdfs-site.xml</fileName>
               <dictionaryName>hdfs-site</dictionaryName>
             </configFile>
@@ -80,9 +85,17 @@
         <timeout>300</timeout>
       </commandScript>
 
+      <themes>
+        <theme>
+          <fileName>theme.json</fileName>
+          <default>true</default>
+        </theme>
+      </themes>
+
       <configuration-dependencies>
         <config-type>core-site</config-type>
         <config-type>hdfs-site</config-type>
+        <config-type>onefs</config-type>
         <config-type>hadoop-env</config-type>
       </configuration-dependencies>
       <restartRequiredAfterRackChange>true</restartRequiredAfterRackChange>
diff --git a/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/package/scripts/params_linux.py b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/package/scripts/params_linux.py
index 953efdc..79aff8f 100644
--- a/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/package/scripts/params_linux.py
+++ b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/package/scripts/params_linux.py
@@ -17,15 +17,14 @@ limitations under the License.
 
 """
 
-from resource_management.libraries.functions.version import format_stack_version, compare_versions
-from resource_management import *
-import os
-import itertools
-import re
+from resource_management import Script
 from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.functions.default import default
 from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
 from resource_management.libraries.resources.hdfs_resource import HdfsResource
 from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import format
 
 config = Script.get_config()
 
@@ -45,6 +44,9 @@ hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
 hdfs_site = config['configurations']['hdfs-site']
 default_fs = config['configurations']['core-site']['fs.defaultFS']
 
+java64_home = config['hostLevelParams']['java_home']
+java_exec = format("{java64_home}/bin/java")
+
 import functools
 #create partial functions with common arguments for every HdfsResource call
 #to create/delete/copyfromlocal hdfs directories/files we need to call params.HdfsResource in code
diff --git a/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/service_advisor.py b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/service_advisor.py
new file mode 100644
index 0000000..630ff62
--- /dev/null
+++ b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/service_advisor.py
@@ -0,0 +1,122 @@
+#!/usr/bin/env ambari-python-wrap
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+import os
+from ambari_commons import inet_utils
+import imp
+import traceback
+
+def error(message): return {"level": "ERROR", "message": message}
+
+class Uri:
+  @classmethod
+  def default_fs(self, configs):
+    return self.from_config(configs, 'core-site', 'fs.defaultFS')
+
+  @classmethod
+  def http_namenode(self, configs):
+    return self.from_config(configs, 'hdfs-site', 'dfs.namenode.http-address')
+
+  @classmethod
+  def https_namenode(self, configs):
+    return self.from_config(configs, 'hdfs-site', 'dfs.namenode.https-address')
+
+  @classmethod
+  def onefs(self, configs):
+    return self.from_config(configs, 'onefs', 'onefs_host')
+
+  @staticmethod
+  def from_config(configs, config_type, property_name):
+    return Uri(configs['configurations'][config_type]['properties'][property_name])
+
+  def __init__(self, address):
+    self.address = address
+
+  def has_host(self, uri):
+    return uri.hostname() == self.hostname()
+
+  def hostname(self):
+    return inet_utils.get_host_from_url(self.address)
+
+  def fix_host(self, uri):
+    if not uri.hostname() or not self.hostname():
+      return self.address
+    return self.address.replace(self.hostname(), uri.hostname())
+
+  def __str__(self):
+    return self.address
+
+class CoreSite:
+  def __init__(self, configs):
+    self.configs = configs
+
+  def validate(self):
+    invalid_configs = []
+    onefs_host = Uri.onefs(self.configs)
+    if not Uri.default_fs(self.configs).has_host(onefs_host):
+      invalid_configs.append({
+        'config-name': 'fs.defaultFS',
+        'item': error('Hostname should match OneFS host: {0}'.format(onefs_host))
+      })
+    return invalid_configs
+
+class HdfsSite:
+  def __init__(self, configs):
+    self.configs = configs
+
+  def validate(self):
+    invalid_configs = []
+    onefs_host = Uri.onefs(self.configs)
+    if not Uri.http_namenode(self.configs).has_host(onefs_host):
+      invalid_configs.append({
+        'config-name': 'dfs.namenode.http-address',
+        'item': error('Hostname should match OneFS host: {0}'.format(onefs_host))
+      })
+    if not Uri.https_namenode(self.configs).has_host(onefs_host):
+      invalid_configs.append({
+        'config-name': 'dfs.namenode.https-address',
+        'item': error('Hostname should match OneFS host: {0}'.format(onefs_host))
+      })
+    return invalid_configs
+
+SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
+STACKS_DIR = os.path.join(SCRIPT_DIR, '../../../../../stacks/')
+PARENT_FILE = os.path.join(STACKS_DIR, 'service_advisor.py')
+
+try:
+  with open(PARENT_FILE, 'rb') as fp:
+    service_advisor = imp.load_module('service_advisor', fp, PARENT_FILE, ('.py', 'rb', imp.PY_SOURCE))
+except Exception as e:
+  traceback.print_exc()
+  print "Failed to load parent"
+else:
+  class HDP26ONEFSServiceAdvisor(service_advisor.ServiceAdvisor):
+    def getServiceConfigurationRecommendations(self, configs, clusterData, services, hosts):
+      putCoreSiteProperty = self.putProperty(configs, "core-site", services)
+      putHdfsSiteProperty = self.putProperty(configs, "hdfs-site", services)
+      onefs_host = Uri.onefs(services)
+      putCoreSiteProperty("fs.defaultFS", Uri.default_fs(services).fix_host(onefs_host))
+      putHdfsSiteProperty("dfs.namenode.http-address", Uri.http_namenode(services).fix_host(onefs_host))
+      putHdfsSiteProperty("dfs.namenode.https-address", Uri.https_namenode(services).fix_host(onefs_host))
+
+    def getServiceConfigurationsValidationItems(self, configs, recommendedDefaults, services, hosts):
+      validation_errors = []
+      validation_errors.extend(self.toConfigurationValidationProblems(CoreSite(services).validate(), 'core-site'))
+      validation_errors.extend(self.toConfigurationValidationProblems(HdfsSite(services).validate(), 'hdfs-site'))
+      return validation_errors
+
diff --git a/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/themes/theme.json b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/themes/theme.json
new file mode 100644
index 0000000..8301bf0
--- /dev/null
+++ b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/themes/theme.json
@@ -0,0 +1,60 @@
+{
+  "name": "default",
+  "description": "Default theme for OneFS service",
+  "configuration": {
+    "layouts": [
+      {
+        "name": "default",
+        "tabs": [
+          {
+            "name": "settings",
+            "display-name": "Settings",
+            "layout": {
+              "tab-columns": "1",
+              "tab-rows": "1",
+              "sections": [
+                {
+                  "name": "section-onefs",
+                  "display-name": "OneFS",
+                  "row-index": "0",
+                  "column-index": "0",
+                  "row-span": "1",
+                  "column-span": "1",
+                  "section-columns": "1",
+                  "section-rows": "1",
+                  "subsections": [
+                    {
+                      "name": "subsection-onefs-col1",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    }
+                  ]
+                }
+              ]
+            }
+          }
+        ]
+      }
+    ],
+    "placement": {
+      "configuration-layout": "default",
+      "configs": [
+        {
+          "config": "onefs/onefs_host",
+          "subsection-name": "subsection-onefs-col1"
+        }
+      ]
+    },
+    "widgets": [
+      {
+        "config": "onefs/onefs_host",
+        "widget": {
+          "type": "text-field"
+        }
+      }
+    ]
+  }
+}
+
diff --git a/contrib/management-packs/isilon-onefs-mpack/src/test/python/TestServiceAdvisor.py b/contrib/management-packs/isilon-onefs-mpack/src/test/python/TestServiceAdvisor.py
new file mode 100644
index 0000000..26714b3
--- /dev/null
+++ b/contrib/management-packs/isilon-onefs-mpack/src/test/python/TestServiceAdvisor.py
@@ -0,0 +1,67 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+from unittest import TestCase
+from service_advisor import Uri, CoreSite, HdfsSite
+
+configs = {
+  'configurations': {
+    'core-site': {
+      'properties': {
+        'fs.defaultFS': 'hdfs://localhost:8020'
+      }
+    },
+    'hdfs-site': {
+      'properties': {
+        'dfs.namenode.http-address'  : 'scisilon.fqdn:8082',
+        'dfs.namenode.https-address' : 'scisilon.fqdn:8080',
+      }
+    },
+    'onefs': {
+      'properties': {
+        'onefs_host': 'scisilon.fqdn'
+      }
+    }
+  }
+}
+
+class TestUri(TestCase):
+  def test_fix_host(self):
+    onefs_host = Uri.onefs(configs)
+    default_fs = Uri.default_fs(configs)
+    fixed = default_fs.fix_host(onefs_host)
+    self.assertEquals('hdfs://scisilon.fqdn:8020', fixed)
+
+  def test_skip_replacing_to_empty_host(self):
+    default_fs = Uri.default_fs(configs)
+    self.assertEquals('hdfs://localhost:8020', default_fs.fix_host(Uri("")))
+
+  def test_skip_fixing_invalid_host(self):
+    default_fs = Uri("hdfs://:8080")
+    self.assertEquals('hdfs://:8080', default_fs.fix_host(Uri("host")))
+
+  def test_core_site_validation_error_on_host_mismatch(self):
+    core_site = CoreSite(configs)
+    erros = core_site.validate()
+    self.assertEquals(len(erros), 1)
+    self.assertEquals(erros[0]['config-name'], 'fs.defaultFS')
+
+  def test_hdfs_site_no_validation_error(self):
+    hdfs_site = HdfsSite(configs)
+    erros = hdfs_site.validate()
+    self.assertEquals(len(erros), 0)
\ No newline at end of file
diff --git a/contrib/management-packs/isilon-onefs-mpack/src/test/python/unitTests.py b/contrib/management-packs/isilon-onefs-mpack/src/test/python/unitTests.py
new file mode 100644
index 0000000..03bc923
--- /dev/null
+++ b/contrib/management-packs/isilon-onefs-mpack/src/test/python/unitTests.py
@@ -0,0 +1,132 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import unittest
+import os
+import sys
+from random import shuffle
+import fnmatch
+
+#excluded directories with non-test staff from stack and service scanning,
+#also we can add service or stack to skip here
+STACK_EXCLUDE = ["utils"]
+SERVICE_EXCLUDE = ["configs"]
+
+TEST_MASK = '[Tt]est*.py'
+CUSTOM_TEST_MASK = '_[Tt]est*.py'
+def get_parent_path(base, directory_name):
+  """
+  Returns absolute path for directory_name, if directory_name present in base.
+  For example, base=/home/user/test2, directory_name=user - will return /home/user
+  """
+  done = False
+  while not done:
+    base = os.path.dirname(base)
+    if base == "/":
+      return None
+    if os.path.split(base)[-1] == directory_name:
+      done = True
+    else:
+      done = False
+  return base
+
+def get_test_files(path, mask = None, recursive=True):
+  """
+  Returns test files for path recursively
+  """
+  current = []
+  directory_items = os.listdir(path)
+  directory_items.sort()
+
+  for item in directory_items:
+    add_to_pythonpath = False
+    item_path = os.path.join(path, item)
+    if os.path.isfile(item_path):
+      if fnmatch.fnmatch(item, mask):
+        add_to_pythonpath = True
+        current.append(item)
+    elif os.path.isdir(item_path):
+      if recursive:
+        current.extend(get_test_files(item_path, mask = mask))
+    if add_to_pythonpath:
+      sys.path.append(path)
+  return current
+
+
+def main():
+  custom_tests = False
+  if len(sys.argv) > 1:
+    if sys.argv[1] == "true":
+      custom_tests = True
+  pwd = os.path.abspath(os.path.dirname(__file__))
+
+  project_folder = get_parent_path(pwd,'isilon-onefs-mpack')
+  sys.path.append(project_folder + "/src/main/resources/addon-services/ONEFS/1.0.0")
+  sys.path.append(project_folder + "/src/test/python")
+  sys.path.append(project_folder + "/../../../ambari-agent/src/main/python")
+  sys.path.append(project_folder + "/../../../ambari-common/src/main/python")
+
+  has_failures = False
+  test_runs = 0
+  test_failures = []
+  test_errors = []
+  sys.stderr.write("Running tests\n")
+  if custom_tests:
+    test_mask = CUSTOM_TEST_MASK
+  else:
+    test_mask = TEST_MASK
+
+  tests = get_test_files(pwd, mask=test_mask, recursive=True)
+  shuffle(tests)
+  modules = [os.path.basename(s)[:-3] for s in tests]
+  suites = [unittest.defaultTestLoader.loadTestsFromName(name) for name in
+            modules]
+  testSuite = unittest.TestSuite(suites)
+  textRunner = unittest.TextTestRunner(verbosity=2).run(testSuite)
+  test_runs += textRunner.testsRun
+  test_errors.extend([(str(item[0]),str(item[1]),"ERROR") for item in textRunner.errors])
+  test_failures.extend([(str(item[0]),str(item[1]),"FAIL") for item in textRunner.failures])
+  tests_status = textRunner.wasSuccessful() and not has_failures
+
+  if not tests_status:
+    sys.stderr.write("----------------------------------------------------------------------\n")
+    sys.stderr.write("Failed tests:\n")
+  for failed_tests in [test_errors,test_failures]:
+    for err in failed_tests:
+      sys.stderr.write("{0}: {1}\n".format(err[2],err[0]))
+      sys.stderr.write("----------------------------------------------------------------------\n")
+      sys.stderr.write("{0}\n".format(err[1]))
+  sys.stderr.write("----------------------------------------------------------------------\n")
+  sys.stderr.write("Total run:{0}\n".format(test_runs))
+  sys.stderr.write("Total errors:{0}\n".format(len(test_errors)))
+  sys.stderr.write("Total failures:{0}\n".format(len(test_failures)))
+
+  if tests_status:
+    sys.stderr.write("OK\n")
+    exit_code = 0
+  else:
+    sys.stderr.write("ERROR\n")
+    exit_code = 1
+  return exit_code
+
+
+if __name__ == "__main__":
+  sys.exit(main())
+

-- 
To stop receiving notification emails like this one, please contact
"commits@ambari.apache.org" <co...@ambari.apache.org>.

[ambari] 14/15: AMBARI-22121. Create mpack for Isilon OneFS (amagyar)

Posted by am...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

amagyar pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/ambari.git

commit 97bd0d8623620f8689923b3732b642f2baea477b
Author: Attila Magyar <am...@hortonworks.com>
AuthorDate: Fri Jan 5 16:28:56 2018 +0100

    AMBARI-22121. Create mpack for Isilon OneFS (amagyar)
---
 .../src/main/resources/addon-services/ONEFS/1.0.0/metainfo.xml     | 2 +-
 .../addon-services/ONEFS/1.0.0/package/scripts/onefs_client.py     | 7 +++++++
 .../addon-services/ONEFS/1.0.0/package/scripts/params_linux.py     | 2 ++
 3 files changed, 10 insertions(+), 1 deletion(-)

diff --git a/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/metainfo.xml b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/metainfo.xml
index 06a90d9..907ef7c 100644
--- a/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/metainfo.xml
+++ b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/metainfo.xml
@@ -32,7 +32,7 @@
           <category>CLIENT</category>
           <componentType>HCFS_CLIENT</componentType>
           <cardinality>1+</cardinality>
-          <versionAdvertised>true</versionAdvertised>
+          <versionAdvertised>false</versionAdvertised>
           <commandScript>
             <script>scripts/onefs_client.py</script>
             <scriptType>PYTHON</scriptType>
diff --git a/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/package/scripts/onefs_client.py b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/package/scripts/onefs_client.py
index dbf1331..b8ae3de 100644
--- a/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/package/scripts/onefs_client.py
+++ b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/package/scripts/onefs_client.py
@@ -16,6 +16,8 @@ See the License for the specific language governing permissions and
 limitations under the License.
 
 """
+from resource_management.libraries.functions import format
+from resource_management import File, StaticFile
 from resource_management.core.exceptions import ClientComponentHasNoStatus
 from resource_management.libraries.script import Script
 from resource_management.libraries.resources.xml_config import XmlConfig
@@ -51,6 +53,11 @@ class OneFsClient(Script):
             mode=0644
     )
 
+    File(format("{params.ambari_libs_dir}/fast-hdfs-resource.jar"),
+         mode=0644,
+         content=StaticFile("/var/lib/ambari-agent/cache/stack-hooks/before-START/files/fast-hdfs-resource.jar")
+         )
+
 if __name__ == "__main__":
   OneFsClient().execute()
 
diff --git a/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/package/scripts/params_linux.py b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/package/scripts/params_linux.py
index 79aff8f..bfed8cf 100644
--- a/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/package/scripts/params_linux.py
+++ b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/package/scripts/params_linux.py
@@ -47,6 +47,8 @@ default_fs = config['configurations']['core-site']['fs.defaultFS']
 java64_home = config['hostLevelParams']['java_home']
 java_exec = format("{java64_home}/bin/java")
 
+ambari_libs_dir = "/var/lib/ambari-agent/lib"
+
 import functools
 #create partial functions with common arguments for every HdfsResource call
 #to create/delete/copyfromlocal hdfs directories/files we need to call params.HdfsResource in code

-- 
To stop receiving notification emails like this one, please contact
"commits@ambari.apache.org" <co...@ambari.apache.org>.

[ambari] 03/15: AMBARI-22236. Expression parser support for JMXServerSide alerts (amagyar)

Posted by am...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

amagyar pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/ambari.git

commit 64bc95ca3142ceb05eae00f39566da04be21208b
Author: Attila Magyar <am...@hortonworks.com>
AuthorDate: Tue Oct 17 16:15:15 2017 +0200

    AMBARI-22236. Expression parser support for JMXServerSide alerts (amagyar)
---
 .../ambari/server/alerts/JmxServerSideAlert.java   | 42 ++++++-------
 .../ambari/server/state/alert/AlertDefinition.java | 13 +++++
 .../ambari/server/state/alert/MetricSource.java    | 59 ++++++++++++++++++-
 .../ambari/server/state/alert/Reporting.java       | 16 ++---
 .../AlertDefinitionResourceProviderTest.java       |  4 +-
 .../ambari/server/state/alert/JmxInfoTest.java     | 68 ++++++++++++++++++++++
 6 files changed, 162 insertions(+), 40 deletions(-)

diff --git a/ambari-server/src/main/java/org/apache/ambari/server/alerts/JmxServerSideAlert.java b/ambari-server/src/main/java/org/apache/ambari/server/alerts/JmxServerSideAlert.java
index a4b86f8..09eb0a4 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/alerts/JmxServerSideAlert.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/alerts/JmxServerSideAlert.java
@@ -23,6 +23,7 @@ import static java.util.Collections.singletonList;
 import java.net.URI;
 import java.util.List;
 import java.util.Map;
+import java.util.Optional;
 
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.configuration.ComponentSSLConfiguration;
@@ -35,7 +36,6 @@ import org.apache.ambari.server.state.ConfigHelper;
 import org.apache.ambari.server.state.alert.AlertDefinition;
 import org.apache.ambari.server.state.alert.AlertDefinitionFactory;
 import org.apache.ambari.server.state.alert.MetricSource;
-import org.apache.ambari.server.state.alert.Reporting;
 import org.apache.ambari.server.state.alert.ServerSource;
 import org.apache.ambari.server.state.services.MetricsRetrievalService;
 import org.slf4j.Logger;
@@ -64,41 +64,35 @@ public class JmxServerSideAlert extends AlertRunnable {
   List<Alert> execute(Cluster cluster, AlertDefinitionEntity entity) throws AmbariException {
     AlertDefinition alertDef = definitionFactory.coerce(entity);
     ServerSource serverSource = (ServerSource) alertDef.getSource();
-    URI jmxUrl = jmxUrl(cluster, serverSource);
-    JMXMetricHolder metricHolder = jmxMetric(serverSource, jmxUrl);
-    return metricHolder == null
-      ? emptyList()
-      : alerts(alertDef, serverSource.getJmxInfo(), metricHolder, serverSource.getReporting());
+    return buildAlert(jmxMetric(serverSource, cluster), serverSource.getJmxInfo(), alertDef)
+      .map(alert -> singletonList(alert))
+      .orElse(emptyList());
   }
 
-  private URI jmxUrl(Cluster cluster, ServerSource serverSource) throws AmbariException {
-    return serverSource.getUri().resolve(config(cluster)).resolve(serverSource.getJmxInfo().getUrlSuffix());
+  public Optional<Alert> buildAlert(Optional<JMXMetricHolder> metricHolder, MetricSource.JmxInfo jmxInfo, AlertDefinition alertDef) {
+    return metricHolder.flatMap(metric -> buildAlert(metric, jmxInfo, alertDef));
   }
 
-  private Map<String, Map<String, String>> config(Cluster cluster) throws AmbariException {
-    return configHelper.getEffectiveConfigProperties(cluster, configHelper.getEffectiveDesiredTags(cluster, null));
+  private Optional<Alert> buildAlert(JMXMetricHolder metricHolder, MetricSource.JmxInfo jmxInfo, AlertDefinition alertDef) {
+    List<Object> allMetrics = metricHolder.findAll(jmxInfo.getPropertyList());
+    return jmxInfo.eval(metricHolder).map(val -> alertDef.buildAlert(val.doubleValue(), allMetrics));
   }
 
-  private JMXMetricHolder jmxMetric(ServerSource serverSource, URI jmxUri) {
+  private Optional<JMXMetricHolder> jmxMetric(ServerSource serverSource, Cluster cluster) throws AmbariException {
+    URI jmxUri = jmxUrl(cluster, serverSource);
     URLStreamProvider streamProvider = new URLStreamProvider(
       serverSource.getUri().getConnectionTimeoutMsec(),
       serverSource.getUri().getReadTimeoutMsec(),
       ComponentSSLConfiguration.instance());
     metricsRetrievalService.submitRequest(MetricsRetrievalService.MetricSourceType.JMX, streamProvider, jmxUri.toString());
-    return metricsRetrievalService.getCachedJMXMetric(jmxUri.toString());
+    return Optional.ofNullable(metricsRetrievalService.getCachedJMXMetric(jmxUri.toString()));
+  }
+
+  private URI jmxUrl(Cluster cluster, ServerSource serverSource) throws AmbariException {
+    return serverSource.getUri().resolve(config(cluster)).resolve(serverSource.getJmxInfo().getUrlSuffix());
   }
 
-  private List<Alert> alerts(AlertDefinition alertDef, MetricSource.JmxInfo jmxInfo, JMXMetricHolder jmxMetricHolder, Reporting reporting) throws AmbariException {
-    List<Object> metrics = jmxMetricHolder.findAll(jmxInfo.getPropertyList());
-    if (metrics.isEmpty()) {
-      return emptyList();
-    }
-    if (metrics.get(0) instanceof Number) {
-      Alert alert = reporting.alert(((Number) metrics.get(0)).doubleValue(), metrics, alertDef);
-      return singletonList(alert);
-    } else {
-      LOG.info("Unsupported metrics value: {} when running alert: {}", metrics.get(0), alertDef);
-      return emptyList();
-    }
+  private Map<String, Map<String, String>> config(Cluster cluster) throws AmbariException {
+    return configHelper.getEffectiveConfigProperties(cluster, configHelper.getEffectiveDesiredTags(cluster, null));
   }
 }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/alert/AlertDefinition.java b/ambari-server/src/main/java/org/apache/ambari/server/state/alert/AlertDefinition.java
index 665430d..f1f21a4 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/alert/AlertDefinition.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/alert/AlertDefinition.java
@@ -18,7 +18,10 @@
 package org.apache.ambari.server.state.alert;
 
 import java.util.HashSet;
+import java.util.List;
 
+import org.apache.ambari.server.state.Alert;
+import org.apache.ambari.server.state.AlertState;
 import org.apache.commons.lang.StringUtils;
 
 import com.google.gson.annotations.SerializedName;
@@ -354,6 +357,16 @@ public class AlertDefinition {
   }
 
   /**
+   * Map the incoming value to {@link AlertState} and generate an alert with that state.
+   */
+  public Alert buildAlert(double value, List<Object> args) {
+    Reporting reporting = source.getReporting();
+    Alert alert = new Alert(name, null, serviceName, componentName, null, reporting.state(value));
+    alert.setText(reporting.formatMessage(value, args));
+    return alert;
+  }
+
+  /**
    * Gets equality based on name only.
    *
    * @see #deeplyEquals(Object)
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/alert/MetricSource.java b/ambari-server/src/main/java/org/apache/ambari/server/state/alert/MetricSource.java
index d7283fe..019f3b1 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/alert/MetricSource.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/alert/MetricSource.java
@@ -17,8 +17,16 @@
  */
 package org.apache.ambari.server.state.alert;
 
+import static java.util.stream.Collectors.toMap;
+import static java.util.stream.IntStream.range;
+
 import java.util.ArrayList;
 import java.util.List;
+import java.util.Optional;
+
+import org.apache.ambari.server.controller.jmx.JMXMetricHolder;
+import org.springframework.expression.spel.standard.SpelExpressionParser;
+import org.springframework.expression.spel.support.StandardEvaluationContext;
 
 import com.google.gson.annotations.SerializedName;
 
@@ -127,7 +135,7 @@ public class MetricSource extends Source {
     @SerializedName("property_list")
     private List<String> propertyList;
 
-    private String value;
+    private String value = "{0}";
 
     @SerializedName("url_suffix")
     private String urlSuffix = "/jmx";
@@ -136,8 +144,16 @@ public class MetricSource extends Source {
       return propertyList;
     }
 
-    public String getValue() {
-      return value;
+    public void setPropertyList(List<String> propertyList) {
+      this.propertyList = propertyList;
+    }
+
+    public void setValue(String value) {
+      this.value = value;
+    }
+
+    public Value getValue() {
+      return new Value(value);
     }
 
     @Override
@@ -159,5 +175,42 @@ public class MetricSource extends Source {
     public String getUrlSuffix() {
       return urlSuffix;
     }
+
+    public Optional<Number> eval(JMXMetricHolder jmxMetricHolder) {
+      List<Object> metrics = jmxMetricHolder.findAll(propertyList);
+      if (metrics.isEmpty()) {
+        return Optional.empty();
+      } else {
+        Object value = getValue().eval(metrics);
+        return value instanceof Number ? Optional.of((Number)value) : Optional.empty();
+      }
+    }
+  }
+
+  public static class Value {
+    private final String value;
+
+    public Value(String value) {
+      this.value = value;
+    }
+
+    /**
+     * Evaluate an expression like "{0}/({0} + {1}) * 100.0" where each positional argument represent a metrics value.
+     * The value is converted to SpEL syntax:
+     *  #var0/(#var0 + #var1) * 100.0
+     * then it is evaluated in the context of the metrics parameters.
+     */
+    public Object eval(List<Object> metrics) {
+      StandardEvaluationContext context = new StandardEvaluationContext();
+      context.setVariables(range(0, metrics.size()).boxed().collect(toMap(i -> "var" + i, metrics::get)));
+      return new SpelExpressionParser()
+        .parseExpression(value.replaceAll("(\\{(\\d+)\\})", "#var$2"))
+        .getValue(context);
+    }
+
+    @Override
+    public String toString() {
+      return value;
+    }
   }
 }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/alert/Reporting.java b/ambari-server/src/main/java/org/apache/ambari/server/state/alert/Reporting.java
index 51d074e..a7e11e1 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/alert/Reporting.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/alert/Reporting.java
@@ -23,7 +23,6 @@ import java.text.MessageFormat;
 import java.util.List;
 
 import org.apache.ambari.server.alerts.Threshold;
-import org.apache.ambari.server.state.Alert;
 import org.apache.ambari.server.state.AlertState;
 
 import com.google.gson.annotations.SerializedName;
@@ -208,16 +207,7 @@ public class Reporting {
     return true;
   }
 
-  /**
-   * Map the incoming value to {@link AlertState} and generate an alert with that state.
-   */
-  public Alert alert(double value, List<Object> args, AlertDefinition alertDef) {
-    Alert alert = new Alert(alertDef.getName(), null, alertDef.getServiceName(), alertDef.getComponentName(), null, state(value));
-    alert.setText(MessageFormat.format(message(value), args.toArray()));
-    return alert;
-  }
-
-  private AlertState state(double value) {
+  public AlertState state(double value) {
     return getThreshold().state(PERCENT == getType() ? value * 100 : value);
   }
 
@@ -225,6 +215,10 @@ public class Reporting {
     return new Threshold(getOk().getValue(), getWarning().getValue(), getCritical().getValue());
   }
 
+  public String formatMessage(double value, List<Object> args) {
+    return MessageFormat.format(message(value), args.toArray());
+  }
+
   private String message(double value) {
     switch (state(value)) {
       case OK:
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/AlertDefinitionResourceProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/AlertDefinitionResourceProviderTest.java
index 3ef2c48..e8ad651 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/AlertDefinitionResourceProviderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/AlertDefinitionResourceProviderTest.java
@@ -457,7 +457,7 @@ public class AlertDefinitionResourceProviderTest {
 
     // JMX
     requestProps.put("AlertDefinition/source/jmx/value",
-        source.getJmxInfo().getValue());
+        source.getJmxInfo().getValue().toString());
     requestProps.put("AlertDefinition/source/jmx/property_list",
         source.getJmxInfo().getPropertyList());
 
@@ -600,7 +600,7 @@ public class AlertDefinitionResourceProviderTest {
 
     // JMX
     requestProps.put("AlertDefinition/source/jmx/value",
-        source.getJmxInfo().getValue());
+        source.getJmxInfo().getValue().toString());
     requestProps.put("AlertDefinition/source/jmx/property_list",
         source.getJmxInfo().getPropertyList());
 
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/alert/JmxInfoTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/alert/JmxInfoTest.java
new file mode 100644
index 0000000..308ab4f
--- /dev/null
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/alert/JmxInfoTest.java
@@ -0,0 +1,68 @@
+package org.apache.ambari.server.state.alert;
+
+import static java.util.Arrays.asList;
+import static org.hamcrest.core.Is.is;
+import static org.junit.Assert.assertThat;
+
+import java.util.HashMap;
+import java.util.Optional;
+
+import org.apache.ambari.server.controller.jmx.JMXMetricHolder;
+import org.junit.Test;
+
+public class JmxInfoTest {
+  private static final String JMX_PROP_NAME1 = "Hadoop:service=NameNode,name=FSNamesystem/CapacityUsed";
+  private static final String JMX_PROP_NAME2 = "Hadoop:service=NameNode,name=FSNamesystem/CapacityRemaining";
+
+  @Test
+  public void testFindJmxMetricsAndCalculateSimpleValue() throws Exception {
+    MetricSource.JmxInfo jmxInfo = jmxInfoWith("{1}");
+    JMXMetricHolder metrics = metrics(12.5, 3.5);
+    assertThat(jmxInfo.eval(metrics), is(Optional.of(3.5)));
+  }
+
+  @Test
+  public void testFindJmxMetricsAndCalculateComplexValue() throws Exception {
+    MetricSource.JmxInfo jmxInfo = jmxInfoWith("2 * ({0} + {1})");
+    JMXMetricHolder metrics = metrics(12.5, 2.5);
+    assertThat(jmxInfo.eval(metrics), is(Optional.of(30.0)));
+  }
+
+  @Test
+  public void testReturnsEmptyWhenJmxPropertyWasNotFound() throws Exception {
+    MetricSource.JmxInfo jmxInfo = new MetricSource.JmxInfo();
+    jmxInfo.setPropertyList(asList("notfound/notfound"));
+    JMXMetricHolder metrics = metrics(1, 2);
+    assertThat(jmxInfo.eval(metrics), is(Optional.empty()));
+  }
+
+  private MetricSource.JmxInfo jmxInfoWith(String value) {
+    MetricSource.JmxInfo jmxInfo = new MetricSource.JmxInfo();
+    jmxInfo.setValue(value);
+    jmxInfo.setPropertyList(asList(JMX_PROP_NAME1, JMX_PROP_NAME2));
+    return jmxInfo;
+  }
+
+  private JMXMetricHolder metrics(final double jmxValue1, final double jmxValue2) {
+    JMXMetricHolder metrics = new JMXMetricHolder();
+    metrics.setBeans(asList(
+      new HashMap<String,Object>() {{
+        put("name", name(JMX_PROP_NAME1));
+        put(key(JMX_PROP_NAME1), jmxValue1);
+      }},
+      new HashMap<String,Object>() {{
+        put("name", name(JMX_PROP_NAME2));
+        put(key(JMX_PROP_NAME2), jmxValue2);
+      }}
+    ));
+    return metrics;
+  }
+
+  private String name(String jmxProp) {
+    return jmxProp.split("/")[0];
+  }
+
+  private String key(String jmxProp) {
+    return jmxProp.split("/")[1];
+  }
+}
\ No newline at end of file

-- 
To stop receiving notification emails like this one, please contact
"commits@ambari.apache.org" <co...@ambari.apache.org>.

[ambari] 15/15: AMBARI-22339. Metrics for OneFS mpack (amagyar)

Posted by am...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

amagyar pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/ambari.git

commit 859d0503aa1ee829455c70e9eb475be75783a366
Author: Attila Magyar <am...@hortonworks.com>
AuthorDate: Tue Jan 9 14:25:32 2018 +0100

    AMBARI-22339. Metrics for OneFS mpack (amagyar)
---
 .../addon-services/ONEFS/1.0.0/metrics.json        |  6 ++
 .../addon-services/ONEFS/1.0.0/widgets.json        | 65 ++++++----------------
 2 files changed, 24 insertions(+), 47 deletions(-)

diff --git a/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/metrics.json b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/metrics.json
index 30a9f5c..daba529 100644
--- a/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/metrics.json
+++ b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/metrics.json
@@ -193,6 +193,12 @@
               "metric" : "disk_free",
               "amsHostMetric" : true
             },
+            "metrics/disk/disk_percent" : {
+              "temporal" : true,
+              "pointInTime" : true,
+              "metric" : "disk_percent",
+              "amsHostMetric" : true
+            },
             "metrics/disk/read_count" : {
               "temporal" : true,
               "pointInTime" : true,
diff --git a/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/widgets.json b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/widgets.json
index fe2c52e..95aed22 100644
--- a/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/widgets.json
+++ b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/widgets.json
@@ -6,67 +6,38 @@
       "section_name": "ONEFS_SUMMARY",
       "widgetLayoutInfo": [
         {
-          "widget_name": "NameNode GC count",
-          "description": "Count of total garbage collections and count of major type garbage collections of the JVM.",
-          "widget_type": "GRAPH",
+          "widget_name": "OneFS Host Free Disk Space",
+          "description": "Percentage of available space on OneFS host",
+          "widget_type": "GAUGE",
           "is_visible": true,
           "metrics": [
             {
-              "name": "jvm.JvmMetrics.GcCount._rate",
-              "metric_path": "metrics/jvm/gcCount._rate",
+              "name": "disk_free",
+              "metric_path": "metrics/disk/disk_free",
               "service_name": "ONEFS",
               "component_name": "ONEFS_CLIENT"
             },
             {
-              "name": "jvm.JvmMetrics.GcCountConcurrentMarkSweep._rate",
-              "metric_path": "metrics/jvm/GcCountConcurrentMarkSweep._rate",
+              "name": "disk_total",
+              "metric_path": "metrics/disk/disk_total",
               "service_name": "ONEFS",
               "component_name": "ONEFS_CLIENT"
             }
           ],
           "values": [
             {
-              "name": "GC total count",
-              "value": "${jvm.JvmMetrics.GcCount._rate}"
-            },
-            {
-              "name": "GC count of type major collection",
-              "value": "${jvm.JvmMetrics.GcCountConcurrentMarkSweep._rate}"
+              "name": "OneFS Host Disk Free Space",
+              "value": "${disk_free/disk_total}"
             }
           ],
           "properties": {
-            "graph_type": "LINE",
-            "time_range": "1"
-          }
-        },
-        {
-          "widget_name": "NameNode GC time",
-          "description": "Total time taken by major type garbage collections in milliseconds.",
-          "widget_type": "GRAPH",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "jvm.JvmMetrics.GcTimeMillisConcurrentMarkSweep._rate",
-              "metric_path": "metrics/jvm/JvmMetrics/GcTimeMillisConcurrentMarkSweep._rate",
-              "service_name": "ONEFS",
-              "component_name": "ONEFS_CLIENT"
-            }
-          ],
-          "values": [
-            {
-              "name": "GC time in major collection",
-              "value": "${jvm.JvmMetrics.GcTimeMillisConcurrentMarkSweep._rate}"
-            }
-          ],
-          "properties": {
-            "display_unit": "ms",
-            "graph_type": "LINE",
-            "time_range": "1"
+            "warning_threshold": "0.25",
+            "error_threshold": "0.1"
           }
         },
         {
-          "widget_name": "NameNode Heap",
-          "description": "Heap memory committed and Heap memory used with respect to time.",
+          "widget_name": "OneFS NameNode Memory",
+          "description": "The sum of the current memory allocated by the HDFS process and the sum of the memory limit that the HDFS process can allocate.",
           "widget_type": "GRAPH",
           "is_visible": true,
           "metrics": [
@@ -85,11 +56,11 @@
           ],
           "values": [
             {
-              "name": "JVM heap committed",
+              "name": "Heap committed",
               "value": "${jvm.JvmMetrics.MemHeapCommittedM}"
             },
             {
-              "name": "JVM heap used",
+              "name": "Heap used",
               "value": "${jvm.JvmMetrics.MemHeapUsedM}"
             }
           ],
@@ -100,8 +71,8 @@
           }
         },
         {
-          "widget_name": "NameNode Host Load",
-          "description": "Percentage of CPU and Memory resources being consumed on NameNode host.",
+          "widget_name": "OneFS Host Load",
+          "description": "Percentage of CPU and Memory resources being consumed on OneFS host.",
           "widget_type": "GRAPH",
           "is_visible": true,
           "metrics": [
@@ -167,4 +138,4 @@
       ]
     }
   ]
-}
\ No newline at end of file
+}

-- 
To stop receiving notification emails like this one, please contact
"commits@ambari.apache.org" <co...@ambari.apache.org>.

[ambari] 09/15: AMBARI-22449. Improved service/component dependency support (amagyar)

Posted by am...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

amagyar pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/ambari.git

commit 145ebc0e447003cf3e5bc5dcb8508ef9cd979d89
Author: Attila Magyar <am...@hortonworks.com>
AuthorDate: Thu Nov 16 15:35:41 2017 +0100

    AMBARI-22449. Improved service/component dependency support (amagyar)
---
 .../app/controllers/wizard/step4_controller.js     |  4 +--
 ambari-web/app/messages.js                         |  3 ++-
 ambari-web/app/models/stack_service.js             | 30 ++++++++++++----------
 3 files changed, 21 insertions(+), 16 deletions(-)

diff --git a/ambari-web/app/controllers/wizard/step4_controller.js b/ambari-web/app/controllers/wizard/step4_controller.js
index d39164b..8a62f3b 100644
--- a/ambari-web/app/controllers/wizard/step4_controller.js
+++ b/ambari-web/app/controllers/wizard/step4_controller.js
@@ -480,11 +480,11 @@ App.WizardStep4Controller = Em.ArrayController.extend({
   needToAddMissingDependency: function (missingDependency, i18nSuffix, callback, id) {
     var self = this;
     var displayName = missingDependency.get('displayName');
-    if (missingDependency.hasMultipleOptions()) {
+    if (missingDependency.get('hasMultipleOptions')) {
       return this.showDependencyPopup(
         id,
         Em.I18n.t('installer.step4.' + i18nSuffix + '.popup.header').format(displayName),
-        Em.I18n.t('installer.step4.' + i18nSuffix + '.popup.body2').format(displayName, missingDependency.displayOptions()),
+        Em.I18n.t('installer.step4.' + i18nSuffix + '.popup.body.multiOptions').format(displayName, missingDependency.get('displayOptions')),
         callback
       );
     } else {
diff --git a/ambari-web/app/messages.js b/ambari-web/app/messages.js
index 9b9bd9f..e59f5fd 100644
--- a/ambari-web/app/messages.js
+++ b/ambari-web/app/messages.js
@@ -885,7 +885,8 @@ Em.I18n.translations = {
   'installer.step4.multipleDFS.popup.body':'You selected more than one file system. We will automatically select only {0}. Is this OK?',
   'installer.step4.serviceCheck.popup.header':'{0} Needed',
   'installer.step4.serviceCheck.popup.body':'You did not select {0}, but it is needed by other services you selected. We will automatically add {1}. Is this OK?',
-  'installer.step4.serviceCheck.popup.body2':'You did not select {0}, but it is needed by other services you selected. Select a compatible service from the following list: {1}',
+  'installer.step4.serviceCheck.popup.body.multiOptions':'You did not select {0}, but it is needed by other services you selected. Select a compatible service from the following list: {1}',
+  'installer.step4.hcfs.displayName':'a Hadoop Compatible File System',
   'installer.step4.limitedFunctionality.popup.header':'Limited Functionality Warning',
   'installer.step4.ambariMetricsCheck.popup.body':'Ambari Metrics collects metrics from the cluster and makes them available to Ambari.  If you do not install Ambari Metrics service, metrics will not be accessible from Ambari.  Are you sure you want to proceed without Ambari Metrics?',
   'installer.step4.ambariInfraCheck.popup.body':'Since Ambari Infra is not selected, you must supply your own Solr to make Atlas work. Are you sure you want to proceed?',
diff --git a/ambari-web/app/models/stack_service.js b/ambari-web/app/models/stack_service.js
index 46dfee5..a84816e 100644
--- a/ambari-web/app/models/stack_service.js
+++ b/ambari-web/app/models/stack_service.js
@@ -23,11 +23,11 @@ require('models/configs/objects/service_config_category');
 
 var Dependency = Ember.Object.extend({
   name: Ember.computed('service', function() {
-    return this.get('service').get('serviceName');
+    return this.get('service.serviceName');
   }),
 
   displayName: Ember.computed('service', function() {
-    return this.get('service').get('displayNameOnSelectServicePage');
+    return this.get('service.displayNameOnSelectServicePage');
   }),
 
   compatibleServices: function(services) {
@@ -40,8 +40,8 @@ var Dependency = Ember.Object.extend({
 });
 
 var HcfsDependency = Dependency.extend({
-  displayName: Ember.computed('service', function() {
-    return 'a Hadoop Compatible File System';
+  displayName: Ember.computed(function() {
+    return Em.I18n.t('installer.step4.hcfs.displayName');
   }),
 
   compatibleServices: function(services) {
@@ -58,17 +58,17 @@ Dependency.reopenClass({
 });
 
 var MissingDependency = Ember.Object.extend({
-  hasMultipleOptions: function() {
+  hasMultipleOptions: Ember.computed('compatibleServices', function() {
     return this.get('compatibleServices').length > 1;
-  },
+  }),
 
   selectFirstCompatible: function() {
     this.get('compatibleServices')[0].set('isSelected', true);
   },
 
-  displayOptions: function() {
+  displayOptions: Ember.computed('compatibleServices', function() {
     return this.get('compatibleServices').mapProperty('serviceName').join(', ');
-  }
+  })
 });
 
 /**
@@ -122,10 +122,14 @@ App.StackService = DS.Model.extend({
   dependentServiceNames: DS.attr('array', {defaultValue: []}),
 
   dependencies: function(availableServices) {
-    return this.get('requiredServices')
-      .map(function (serviceName) { return availableServices.findProperty('serviceName', serviceName)})
-      .filter(function (each) { return !!each })
-      .map(function (service) { return Dependency.fromService(service); });
+    var result = [];
+    this.get('requiredServices').forEach(function(serviceName) {
+      var service = availableServices.findProperty('serviceName', serviceName);
+      if (service) {
+        result.push(Dependency.fromService(service));
+      }
+    });
+    return result;
   },
 
   /**
@@ -144,7 +148,7 @@ App.StackService = DS.Model.extend({
   },
 
   _addMissingDependency: function(dependency, availableServices, missingDependencies) {
-    if(!missingDependencies.some(function(each) { return each.get('serviceName') == dependency.get('name'); })) {
+    if(!missingDependencies.someProperty('serviceName', dependency.get('name'))) {
       missingDependencies.push(MissingDependency.create({
          serviceName: dependency.get('name'),
          displayName: dependency.get('displayName'),

-- 
To stop receiving notification emails like this one, please contact
"commits@ambari.apache.org" <co...@ambari.apache.org>.

[ambari] 08/15: AMBARI-22449. Improved service/component dependency support (amagyar)

Posted by am...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

amagyar pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/ambari.git

commit e1c9e86d51d8a2da3f39f2e9eea7be8f3dabd26e
Author: Attila Magyar <am...@hortonworks.com>
AuthorDate: Thu Nov 16 12:27:13 2017 +0100

    AMBARI-22449. Improved service/component dependency support (amagyar)
---
 .../app/controllers/wizard/step4_controller.js     | 115 +++++++++++----------
 ambari-web/app/messages.js                         |   3 +-
 ambari-web/app/models/stack_service.js             |  82 +++++++++++++++
 ambari-web/test/controllers/wizard/step4_test.js   |  16 +--
 4 files changed, 155 insertions(+), 61 deletions(-)

diff --git a/ambari-web/app/controllers/wizard/step4_controller.js b/ambari-web/app/controllers/wizard/step4_controller.js
index d82bf34..d39164b 100644
--- a/ambari-web/app/controllers/wizard/step4_controller.js
+++ b/ambari-web/app/controllers/wizard/step4_controller.js
@@ -386,61 +386,29 @@ App.WizardStep4Controller = Em.ArrayController.extend({
    */
   serviceDependencyValidation: function(callback) {
     var selectedServices = this.filterProperty('isSelected', true);
+    var availableServices = this.get('content');
     var missingDependencies = [];
-    var missingDependenciesDisplayName = [];
     selectedServices.forEach(function(service) {
-      var requiredServices = service.get('requiredServices');
-      if (!!requiredServices && requiredServices.length) {
-        requiredServices.forEach(function(_requiredService){
-          var requiredService = this.findProperty('serviceName', _requiredService);
-          if (requiredService) {
-            if(requiredService.get('isSelected') === false) {
-               if(missingDependencies.indexOf(_requiredService) === -1) {
-                 missingDependencies.push(_requiredService);
-                 missingDependenciesDisplayName.push(requiredService.get('displayNameOnSelectServicePage'));
-               }
-            }
-            else {
-               //required service is selected, remove the service error from errorObject array
-               var serviceName = requiredService.get('serviceName');
-               var serviceError = this.get('errorStack').filterProperty('id',"serviceCheck_"+serviceName);
-               if(serviceError) {
-                  this.get('errorStack').removeObject(serviceError[0]);
-               }
-            }
-          }
-        },this);
-      }
-    },this);
+      service.collectMissingDependencies(selectedServices, availableServices, missingDependencies);
+    });
+    this.cleanExistingServiceCheckErrors();
+    this.addServiceCheckErrors(missingDependencies, callback);
+  },
 
-    //create a copy of the errorStack, reset it
-    //and add the dependencies in the correct order
-    var errorStackCopy = this.get('errorStack');
-    this.set('errorStack', []);
+  cleanExistingServiceCheckErrors() {
+    var existingServiceCheckErrors = this.get('errorStack').filter(function (error) {
+      return error.id.startsWith('serviceCheck_');
+    });
+    this.get('errorStack').removeObjects(existingServiceCheckErrors);
+  },
 
-    if (missingDependencies.length > 0) {
-      for(var i = 0; i < missingDependencies.length; i++) {
-        this.addValidationError({
-          id: 'serviceCheck_' + missingDependencies[i],
-          callback: this.needToAddServicePopup,
-          callbackParams: [{serviceName: missingDependencies[i], selected: true}, 'serviceCheck', missingDependenciesDisplayName[i], callback]
-        });
-      }
-    }
-
-    //iterate through the errorStackCopy array and add to errorStack array, the error objects that have no matching entry in the errorStack 
-    //and that are not related to serviceChecks since serviceCheck errors have already been added when iterating through the missing dependencies list
-    //Only add Ranger, Ambari Metrics, Spark and file system service validation errors if they exist in the errorStackCopy array
-    var ctr = 0;
-    while(ctr < errorStackCopy.length) {
-      //no matching entry in errorStack array
-      if (!this.get('errorStack').someProperty('id', errorStackCopy[ctr].id)) {
-        //not serviceCheck error
-        if(!errorStackCopy[ctr].id.startsWith('serviceCheck_')) {
-          this.get('errorStack').push(this.createError(errorStackCopy[ctr]));
-        }
-      }
-      ctr++;
+  addServiceCheckErrors(missingDependencies, callback) {
+    for(var i = 0; i < missingDependencies.length; i++) {
+      this.addValidationError({
+        id: 'serviceCheck_' + missingDependencies[i].get('serviceName'),
+        callback: this.needToAddMissingDependency,
+        callbackParams: [missingDependencies[i], 'serviceCheck', callback]
+      });
     }
   },
 
@@ -486,7 +454,7 @@ App.WizardStep4Controller = Em.ArrayController.extend({
     return App.ModalPopup.show({
       'data-qa': 'need-add-service-confirmation-modal',
       header: Em.I18n.t('installer.step4.' + i18nSuffix + '.popup.header').format(serviceName),
-      body: Em.I18n.t('installer.step4.' + i18nSuffix + '.popup.body').format(serviceName),
+      body: Em.I18n.t('installer.step4.' + i18nSuffix + '.popup.body').format(serviceName, serviceName),
       onPrimary: function () {
         Em.makeArray(services).forEach(function (service) {
           self.findProperty('serviceName', service.serviceName).set('isSelected', service.selected);
@@ -509,6 +477,49 @@ App.WizardStep4Controller = Em.ArrayController.extend({
     });
   },
 
+  needToAddMissingDependency: function (missingDependency, i18nSuffix, callback, id) {
+    var self = this;
+    var displayName = missingDependency.get('displayName');
+    if (missingDependency.hasMultipleOptions()) {
+      return this.showDependencyPopup(
+        id,
+        Em.I18n.t('installer.step4.' + i18nSuffix + '.popup.header').format(displayName),
+        Em.I18n.t('installer.step4.' + i18nSuffix + '.popup.body2').format(displayName, missingDependency.displayOptions()),
+        callback
+      );
+    } else {
+      return this.showDependencyPopup(
+        id,
+        Em.I18n.t('installer.step4.' + i18nSuffix + '.popup.header').format(displayName),
+        Em.I18n.t('installer.step4.' + i18nSuffix + '.popup.body').format(displayName, missingDependency.get('serviceName')),
+        callback,
+        function () {
+          missingDependency.selectFirstCompatible();
+          self.onPrimaryPopupCallback(callback, id);
+          this.hide();
+        }
+      );
+    }
+  },
+
+  showDependencyPopup: function(id, header, body, callback, primaryAction) {
+    return App.ModalPopup.show({
+        'data-qa': 'need-add-service-confirmation-modal',
+        header: header,
+        body: body,
+        onPrimary: primaryAction || function() { this.onClose(); },
+        onSecondary: function() {
+          this.onClose();
+        },
+        onClose: function() {
+          if (callback) {
+            callback(id);
+          }
+          this._super();
+        }
+      });
+  },
+
   /**
    * Show popup with info about not selected service
    * @param {function} callback
diff --git a/ambari-web/app/messages.js b/ambari-web/app/messages.js
index bfd4264..9b9bd9f 100644
--- a/ambari-web/app/messages.js
+++ b/ambari-web/app/messages.js
@@ -884,7 +884,8 @@ Em.I18n.translations = {
   'installer.step4.multipleDFS.popup.header':'Multiple File Systems Selected',
   'installer.step4.multipleDFS.popup.body':'You selected more than one file system. We will automatically select only {0}. Is this OK?',
   'installer.step4.serviceCheck.popup.header':'{0} Needed',
-  'installer.step4.serviceCheck.popup.body':'You did not select {0}, but it is needed by other services you selected. We will automatically add {0}. Is this OK?',
+  'installer.step4.serviceCheck.popup.body':'You did not select {0}, but it is needed by other services you selected. We will automatically add {1}. Is this OK?',
+  'installer.step4.serviceCheck.popup.body2':'You did not select {0}, but it is needed by other services you selected. Select a compatible service from the following list: {1}',
   'installer.step4.limitedFunctionality.popup.header':'Limited Functionality Warning',
   'installer.step4.ambariMetricsCheck.popup.body':'Ambari Metrics collects metrics from the cluster and makes them available to Ambari.  If you do not install Ambari Metrics service, metrics will not be accessible from Ambari.  Are you sure you want to proceed without Ambari Metrics?',
   'installer.step4.ambariInfraCheck.popup.body':'Since Ambari Infra is not selected, you must supply your own Solr to make Atlas work. Are you sure you want to proceed?',
diff --git a/ambari-web/app/models/stack_service.js b/ambari-web/app/models/stack_service.js
index 241f6ab..46dfee5 100644
--- a/ambari-web/app/models/stack_service.js
+++ b/ambari-web/app/models/stack_service.js
@@ -21,6 +21,56 @@ var stringUtils = require('utils/string_utils');
 require('utils/helper');
 require('models/configs/objects/service_config_category');
 
+var Dependency = Ember.Object.extend({
+  name: Ember.computed('service', function() {
+    return this.get('service').get('serviceName');
+  }),
+
+  displayName: Ember.computed('service', function() {
+    return this.get('service').get('displayNameOnSelectServicePage');
+  }),
+
+  compatibleServices: function(services) {
+    return services.filterProperty('serviceName', this.get('name'));
+  },
+
+  isMissing: function(selectedServices) {
+    return Em.isEmpty(this.compatibleServices(selectedServices));
+  }
+});
+
+var HcfsDependency = Dependency.extend({
+  displayName: Ember.computed('service', function() {
+    return 'a Hadoop Compatible File System';
+  }),
+
+  compatibleServices: function(services) {
+    return services.filterProperty("isDFS", true);
+  }
+});
+
+Dependency.reopenClass({
+  fromService: function(service) {
+    return service.get('isDFS')
+      ? HcfsDependency.create({service: service})
+      : Dependency.create({service: service});
+  }
+});
+
+var MissingDependency = Ember.Object.extend({
+  hasMultipleOptions: function() {
+    return this.get('compatibleServices').length > 1;
+  },
+
+  selectFirstCompatible: function() {
+    this.get('compatibleServices')[0].set('isSelected', true);
+  },
+
+  displayOptions: function() {
+    return this.get('compatibleServices').mapProperty('serviceName').join(', ');
+  }
+});
+
 /**
  * This model loads all services supported by the stack
  * The model maps to the  http://hostname:8080/api/v1/stacks/HDP/versions/${versionNumber}/services?fields=StackServices/*,serviceComponents/*
@@ -71,6 +121,38 @@ App.StackService = DS.Model.extend({
    */
   dependentServiceNames: DS.attr('array', {defaultValue: []}),
 
+  dependencies: function(availableServices) {
+    return this.get('requiredServices')
+      .map(function (serviceName) { return availableServices.findProperty('serviceName', serviceName)})
+      .filter(function (each) { return !!each })
+      .map(function (service) { return Dependency.fromService(service); });
+  },
+
+  /**
+   * Add dependencies which are not already included in selectedServices to the given missingDependencies collection
+  */
+  collectMissingDependencies: function(selectedServices, availableServices, missingDependencies) {
+    this._missingDependencies(selectedServices, availableServices).forEach(function (dependency) {
+      this._addMissingDependency(dependency, availableServices, missingDependencies);
+    }.bind(this));
+  },
+
+  _missingDependencies: function(selectedServices, availableServices) {
+    return this.dependencies(availableServices).filter(function(dependency) {
+      return dependency.isMissing(selectedServices);
+    });
+  },
+
+  _addMissingDependency: function(dependency, availableServices, missingDependencies) {
+    if(!missingDependencies.some(function(each) { return each.get('serviceName') == dependency.get('name'); })) {
+      missingDependencies.push(MissingDependency.create({
+         serviceName: dependency.get('name'),
+         displayName: dependency.get('displayName'),
+         compatibleServices: dependency.compatibleServices(availableServices)
+      }));
+    }
+  },
+
   // Is the service a distributed filesystem
   isDFS: function () {
     return this.get('serviceType') === 'HCFS' || ['HDFS', 'GLUSTERFS'].contains(this.get('serviceName'));
diff --git a/ambari-web/test/controllers/wizard/step4_test.js b/ambari-web/test/controllers/wizard/step4_test.js
index 0fad0c1..1cd4863 100644
--- a/ambari-web/test/controllers/wizard/step4_test.js
+++ b/ambari-web/test/controllers/wizard/step4_test.js
@@ -32,7 +32,7 @@ describe('App.WizardStep4Controller', function () {
   beforeEach(function() {
     controller = App.WizardStep4Controller.create();
     services.forEach(function(serviceName) {
-      controller.pushObject(Ember.Object.create({
+      controller.pushObject(App.StackService.createRecord({
         'serviceName':serviceName, 'isSelected': true, 'isHiddenOnSelectServicePage': false, 'isInstalled': false, 'isDisabled': 'HDFS' === serviceName, isDFS: 'HDFS' === serviceName
       }));
     });
@@ -43,7 +43,7 @@ describe('App.WizardStep4Controller', function () {
     modelSetup.setupStackServiceComponent();
     if (selectedServiceNames.contains('GLUSTERFS')) allServices.push('GLUSTERFS');
     allServices = allServices.map(function(serviceName) {
-      return [Ember.Object.create({
+      return [App.StackService.createRecord({
         'serviceName': serviceName,
         'isSelected': false,
         'canBeSelected': true,
@@ -51,7 +51,7 @@ describe('App.WizardStep4Controller', function () {
         isPrimaryDFS: serviceName === 'HDFS',
         isDFS: ['HDFS','GLUSTERFS'].contains(serviceName),
         isMonitoringService: ['GANGLIA'].contains(serviceName),
-        requiredServices: App.StackService.find(serviceName).get('requiredServices'),
+        requiredServices: App.StackService.find(serviceName).get('requiredServices') || [],
         displayNameOnSelectServicePage: App.format.role(serviceName, true),
         coSelectedServices: function() {
           return App.StackService.coSelected[this.get('serviceName')] || [];
@@ -167,7 +167,7 @@ describe('App.WizardStep4Controller', function () {
         beforeEach(function () {
           controller.clear();
           Object.keys(testCase.condition).forEach(function (id) {
-            controller.pushObject(Ember.Object.create({
+            controller.pushObject(App.StackService.createRecord({
               serviceName: id,
               isSelected: testCase.condition[id],
               canBeSelected: true,
@@ -1006,7 +1006,7 @@ describe('App.WizardStep4Controller', function () {
     });
 
     it('serviceValidation should not be called when service not selected', function() {
-      controller.pushObject(Em.Object.create({
+      controller.pushObject(App.StackService.createRecord({
         serviceName: 'S1',
         isSelected: false
       }));
@@ -1016,7 +1016,7 @@ describe('App.WizardStep4Controller', function () {
 
     it('serviceValidation should not be called when dependent service does not exist', function() {
       controller.pushObjects([
-        Em.Object.create({
+        App.StackService.createRecord({
           serviceName: 'S1',
           isSelected: true
         })
@@ -1027,7 +1027,7 @@ describe('App.WizardStep4Controller', function () {
 
     it('serviceValidation should not be called when dependent service is selected', function() {
       controller.pushObjects([
-        Em.Object.create({
+        App.StackService.createRecord({
           serviceName: 'S1',
           isSelected: true
         }),
@@ -1042,7 +1042,7 @@ describe('App.WizardStep4Controller', function () {
 
     it('serviceValidation should be called when dependent service is not selected', function() {
       controller.pushObjects([
-        Em.Object.create({
+        App.StackService.createRecord({
           serviceName: 'S1',
           isSelected: true
         }),

-- 
To stop receiving notification emails like this one, please contact
"commits@ambari.apache.org" <co...@ambari.apache.org>.