You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by ma...@apache.org on 2014/01/31 00:44:39 UTC
[7/7] git commit: AMBARI-4479. Sync configs for core hadoop services
for GlusterFS stack (Erin Boyd via mahadev)
AMBARI-4479. Sync configs for core hadoop services for GlusterFS stack (Erin Boyd via mahadev)
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/a013ff27
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/a013ff27
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/a013ff27
Branch: refs/heads/branch-1.4.4
Commit: a013ff27e820cbe9abc29422b902d9d76aed5c89
Parents: bd1af43
Author: Mahadev Konar <ma...@apache.org>
Authored: Thu Jan 30 15:43:04 2014 -0800
Committer: Mahadev Konar <ma...@apache.org>
Committed: Thu Jan 30 15:44:02 2014 -0800
----------------------------------------------------------------------
.../HDP/2.0.6.GlusterFS/role_command_order.json | 100 +
.../services/GANGLIA/configuration/global.xml | 51 +
.../services/GANGLIA/metainfo.xml | 4 +
.../GLUSTERFS/configuration/hadoop-policy.xml | 134 -
.../GLUSTERFS/configuration/hdfs-site.xml | 415 -
.../services/GLUSTERFS/metainfo.xml | 5 +-
.../services/HBASE/configuration/hbase-site.xml | 34 +-
.../2.0.6.GlusterFS/services/HBASE/metainfo.xml | 8 +-
.../2.0.6.GlusterFS/services/HBASE/metrics.json | 13635 +++++++++++++++++
.../services/HDFS/configuration/core-site.xml | 2 +-
.../services/HDFS/configuration/global.xml | 2 +-
.../services/HDFS/configuration/hdfs-site.xml | 44 +-
.../2.0.6.GlusterFS/services/HDFS/metainfo.xml | 9 +-
.../2.0.6.GlusterFS/services/HDFS/metrics.json | 7800 ++++++++++
.../services/HIVE/configuration/hive-site.xml | 29 +-
.../2.0.6.GlusterFS/services/HIVE/metainfo.xml | 8 +-
.../services/MAPREDUCE2/metainfo.xml | 7 +-
.../services/MAPREDUCE2/metrics.json | 383 +
.../services/NAGIOS/configuration/global.xml | 51 +
.../services/NAGIOS/metainfo.xml | 4 +
.../services/OOZIE/configuration/oozie-site.xml | 4 +-
.../2.0.6.GlusterFS/services/OOZIE/metainfo.xml | 5 +-
.../services/WEBHCAT/configuration/global.xml | 51 +
.../WEBHCAT/configuration/webhcat-site.xml | 2 +-
.../services/WEBHCAT/metainfo.xml | 6 +-
.../YARN/configuration/capacity-scheduler.xml | 8 +
.../services/YARN/configuration/yarn-site.xml | 6 +
.../2.0.6.GlusterFS/services/YARN/metainfo.xml | 8 +-
.../2.0.6.GlusterFS/services/YARN/metrics.json | 2534 +++
.../services/ZOOKEEPER/metainfo.xml | 4 +
30 files changed, 24753 insertions(+), 600 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/a013ff27/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/role_command_order.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/role_command_order.json b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/role_command_order.json
new file mode 100644
index 0000000..550f885
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/role_command_order.json
@@ -0,0 +1,100 @@
+{
+ "_comment" : "Record format:",
+ "_comment" : "blockedRole-blockedCommand: [blockerRole1-blockerCommand1, blockerRole2-blockerCommand2, ...]",
+ "general_deps" : {
+ "_comment" : "dependencies for all cases",
+ "NAGIOS_SERVER-INSTALL" : ["HIVE_CLIENT-INSTALL", "HCAT-INSTALL",
+ "MAPREDUCE_CLIENT-INSTALL", "OOZIE_CLIENT-INSTALL"],
+ "HBASE_MASTER-START": ["ZOOKEEPER_SERVER-START"],
+ "HBASE_REGIONSERVER-START": ["HBASE_MASTER-START"],
+ "OOZIE_SERVER-START": ["JOBTRACKER-START", "TASKTRACKER-START"],
+ "WEBHCAT_SERVER-START": ["TASKTRACKER-START", "HIVE_SERVER-START"],
+ "HIVE_METASTORE-START": ["MYSQL_SERVER-START"],
+ "HIVE_SERVER-START": ["TASKTRACKER-START", "MYSQL_SERVER-START"],
+ "HUE_SERVER-START": ["HIVE_SERVER-START", "HCAT-START", "OOZIE_SERVER-START"],
+ "FLUME_SERVER-START": ["OOZIE_SERVER-START"],
+ "NAGIOS_SERVER-START": ["HBASE_MASTER-START", "HBASE_REGIONSERVER-START",
+ "GANGLIA_SERVER-START", "GANGLIA_MONITOR-START", "HCAT-START",
+ "HIVE_SERVER-START", "HIVE_METASTORE-START", "HUE_SERVER-START",
+ "JOBTRACKER-START", "TASKTRACKER-START", "ZOOKEEPER_SERVER-START",
+ "MYSQL_SERVER-START", "OOZIE_SERVER-START", "PIG-START", "SQOOP-START",
+ "WEBHCAT_SERVER-START", "FLUME_SERVER-START"],
+ "MAPREDUCE_SERVICE_CHECK-SERVICE_CHECK": ["JOBTRACKER-START", "TASKTRACKER-START"],
+ "OOZIE_SERVICE_CHECK-SERVICE_CHECK": ["OOZIE_SERVER-START", "MAPREDUCE2_SERVICE_CHECK-SERVICE_CHECK"],
+ "WEBHCAT_SERVICE_CHECK-SERVICE_CHECK": ["WEBHCAT_SERVER-START"],
+ "HBASE_SERVICE_CHECK-SERVICE_CHECK": ["HBASE_MASTER-START", "HBASE_REGIONSERVER-START"],
+ "HIVE_SERVICE_CHECK-SERVICE_CHECK": ["HIVE_SERVER-START", "HIVE_METASTORE-START"],
+ "HCAT_SERVICE_CHECK-SERVICE_CHECK": ["HIVE_SERVER-START"],
+ "PIG_SERVICE_CHECK-SERVICE_CHECK": ["JOBTRACKER-START", "TASKTRACKER-START"],
+ "SQOOP_SERVICE_CHECK-SERVICE_CHECK": ["JOBTRACKER-START", "TASKTRACKER-START"],
+ "ZOOKEEPER_SERVICE_CHECK-SERVICE_CHECK": ["ZOOKEEPER_SERVER-START"],
+ "ZOOKEEPER_QUORUM_SERVICE_CHECK-SERVICE_CHECK": ["ZOOKEEPER_SERVER-START"],
+ "ZOOKEEPER_SERVER-STOP" : ["HBASE_MASTER-STOP", "HBASE_REGIONSERVER-STOP"],
+ "HBASE_MASTER-STOP": ["HBASE_REGIONSERVER-STOP"],
+ "TASKTRACKER-UPGRADE": ["JOBTRACKER-UPGRADE"],
+ "MAPREDUCE_CLIENT-UPGRADE": ["TASKTRACKER-UPGRADE", "JOBTRACKER-UPGRADE"],
+ "ZOOKEEPER_SERVER-UPGRADE": ["MAPREDUCE_CLIENT-UPGRADE"],
+ "ZOOKEEPER_CLIENT-UPGRADE": ["ZOOKEEPER_SERVER-UPGRADE"],
+ "HBASE_MASTER-UPGRADE": ["ZOOKEEPER_CLIENT-UPGRADE"],
+ "HBASE_REGIONSERVER-UPGRADE": ["HBASE_MASTER-UPGRADE"],
+ "HBASE_CLIENT-UPGRADE": ["HBASE_REGIONSERVER-UPGRADE"],
+ "HIVE_SERVER-UPGRADE" : ["HBASE_CLIENT-UPGRADE"],
+ "HIVE_METASTORE-UPGRADE" : ["HIVE_SERVER-UPGRADE"],
+ "MYSQL_SERVER-UPGRADE": ["HIVE_METASTORE-UPGRADE"],
+ "HIVE_CLIENT-UPGRADE": ["MYSQL_SERVER-UPGRADE"],
+ "HCAT-UPGRADE": ["HIVE_CLIENT-UPGRADE"],
+ "OOZIE_SERVER-UPGRADE" : ["HCAT-UPGRADE"],
+ "OOZIE_CLIENT-UPGRADE" : ["OOZIE_SERVER-UPGRADE"],
+ "WEBHCAT_SERVER-UPGRADE" : ["OOZIE_CLIENT-UPGRADE"],
+ "PIG-UPGRADE" : ["WEBHCAT_SERVER-UPGRADE"],
+ "SQOOP-UPGRADE" : ["PIG-UPGRADE"],
+ "NAGIOS_SERVER-UPGRADE" : ["SQOOP-UPGRADE"],
+ "GANGLIA_SERVER-UPGRADE" : ["NAGIOS_SERVER-UPGRADE"],
+ "GANGLIA_MONITOR-UPGRADE" : ["GANGLIA_SERVER-UPGRADE"]
+ },
+ "_comment" : "GLUSTERFS-specific dependencies",
+ "optional_glusterfs": {
+ "HBASE_MASTER-START": ["PEERSTATUS-START"],
+ "JOBTRACKER-START": ["PEERSTATUS-START"],
+ "TASKTRACKER-START": ["PEERSTATUS-START"],
+ "GLUSTERFS_SERVICE_CHECK-SERVICE_CHECK": ["PEERSTATUS-START"],
+ "JOBTRACKER-UPGRADE": ["GLUSTERFS_CLIENT-UPGRADE"]
+ },
+ "_comment" : "Dependencies that are used when GLUSTERFS is not present in cluster",
+ "optional_no_glusterfs": {
+ "SECONDARY_NAMENODE-START": ["NAMENODE-START"],
+ "RESOURCEMANAGER-START": ["NAMENODE-START", "DATANODE-START"],
+ "NODEMANAGER-START": ["NAMENODE-START", "DATANODE-START", "RESOURCEMANAGER-START"],
+ "HISTORYSERVER-START": ["NAMENODE-START", "DATANODE-START"],
+ "HBASE_MASTER-START": ["NAMENODE-START", "DATANODE-START"],
+ "JOBTRACKER-START": ["NAMENODE-START", "DATANODE-START"],
+ "TASKTRACKER-START": ["NAMENODE-START", "DATANODE-START"],
+ "HIVE_SERVER-START": ["DATANODE-START"],
+ "WEBHCAT_SERVER-START": ["DATANODE-START"],
+ "NAGIOS_SERVER-START": ["NAMENODE-START", "SECONDARY_NAMENODE-START",
+ "DATANODE-START", "RESOURCEMANAGER-START", "NODEMANAGER-START", "HISTORYSERVER-START"],
+ "HDFS_SERVICE_CHECK-SERVICE_CHECK": ["NAMENODE-START", "DATANODE-START",
+ "SECONDARY_NAMENODE-START"],
+ "MAPREDUCE2_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START",
+ "RESOURCEMANAGER-START", "HISTORYSERVER-START", "YARN_SERVICE_CHECK-SERVICE_CHECK"],
+ "YARN_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START", "RESOURCEMANAGER-START"],
+ "RESOURCEMANAGER_SERVICE_CHECK-SERVICE_CHECK": ["RESOURCEMANAGER-START"],
+ "PIG_SERVICE_CHECK-SERVICE_CHECK": ["RESOURCEMANAGER-START", "NODEMANAGER-START"],
+ "NAMENODE-STOP": ["JOBTRACKER-STOP", "TASKTRACKER-STOP", "RESOURCEMANAGER-STOP",
+ "NODEMANAGER-STOP", "HISTORYSERVER-STOP", "HBASE_MASTER-STOP"],
+ "DATANODE-STOP": ["JOBTRACKER-STOP", "TASKTRACKER-STOP", "RESOURCEMANAGER-STOP",
+ "NODEMANAGER-STOP", "HISTORYSERVER-STOP", "HBASE_MASTER-STOP"],
+ "SECONDARY_NAMENODE-UPGRADE": ["NAMENODE-UPGRADE"],
+ "DATANODE-UPGRADE": ["SECONDARY_NAMENODE-UPGRADE"],
+ "HDFS_CLIENT-UPGRADE": ["DATANODE-UPGRADE"],
+ "JOBTRACKER-UPGRADE": ["HDFS_CLIENT-UPGRADE"]
+ },
+ "_comment" : "Dependencies that are used in HA NameNode cluster",
+ "optional_ha": {
+ "NAMENODE-START": ["JOURNALNODE-START", "ZOOKEEPER_SERVER-START"],
+ "ZKFC-START": ["NAMENODE-START"],
+ "NAGIOS_SERVER-START": ["ZKFC-START", "JOURNALNODE-START"],
+ "HDFS_SERVICE_CHECK-SERVICE_CHECK": ["ZKFC-START"]
+ }
+}
+
http://git-wip-us.apache.org/repos/asf/ambari/blob/a013ff27/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/GANGLIA/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/GANGLIA/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/GANGLIA/configuration/global.xml
new file mode 100644
index 0000000..18eae57
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/GANGLIA/configuration/global.xml
@@ -0,0 +1,51 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+
+ <property>
+ <name>ganglia_conf_dir</name>
+ <value>/etc/ganglia/hdp</value>
+ <description></description>
+ </property>
+ <property>
+ <name>ganglia_runtime_dir</name>
+ <value>/var/run/ganglia/hdp</value>
+ <description></description>
+ </property>
+ <property>
+ <name>gmetad_user</name>
+ <value>nobody</value>
+ <description></description>
+ </property>
+ <property>
+ <name>gmond_user</name>
+ <value>nobody</value>
+ <description></description>
+ </property>
+ <property>
+ <name>rrdcached_base_dir</name>
+ <value>/var/lib/ganglia/rrds</value>
+ <description>Location of rrd files.</description>
+ </property>
+
+</configuration>
http://git-wip-us.apache.org/repos/asf/ambari/blob/a013ff27/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/GANGLIA/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/GANGLIA/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/GANGLIA/metainfo.xml
index 9f7444b..3e6d37a 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/GANGLIA/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/GANGLIA/metainfo.xml
@@ -33,4 +33,8 @@
</components>
+ <configuration-dependencies>
+ <config-type>global</config-type>
+ </configuration-dependencies>
+
</metainfo>
http://git-wip-us.apache.org/repos/asf/ambari/blob/a013ff27/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/GLUSTERFS/configuration/hadoop-policy.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/GLUSTERFS/configuration/hadoop-policy.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/GLUSTERFS/configuration/hadoop-policy.xml
deleted file mode 100644
index 900da99..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/GLUSTERFS/configuration/hadoop-policy.xml
+++ /dev/null
@@ -1,134 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!--
- Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements. See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration>
- <property>
- <name>security.client.protocol.acl</name>
- <value>*</value>
- <description>ACL for ClientProtocol, which is used by user code
- via the DistributedFileSystem.
- The ACL is a comma-separated list of user and group names. The user and
- group list is separated by a blank. For e.g. "alice,bob users,wheel".
- A special value of "*" means all users are allowed.</description>
- </property>
-
- <property>
- <name>security.client.datanode.protocol.acl</name>
- <value>*</value>
- <description>ACL for ClientDatanodeProtocol, the client-to-datanode protocol
- for block recovery.
- The ACL is a comma-separated list of user and group names. The user and
- group list is separated by a blank. For e.g. "alice,bob users,wheel".
- A special value of "*" means all users are allowed.</description>
- </property>
-
- <property>
- <name>security.datanode.protocol.acl</name>
- <value>*</value>
- <description>ACL for DatanodeProtocol, which is used by datanodes to
- communicate with the namenode.
- The ACL is a comma-separated list of user and group names. The user and
- group list is separated by a blank. For e.g. "alice,bob users,wheel".
- A special value of "*" means all users are allowed.</description>
- </property>
-
- <property>
- <name>security.inter.datanode.protocol.acl</name>
- <value>*</value>
- <description>ACL for InterDatanodeProtocol, the inter-datanode protocol
- for updating generation timestamp.
- The ACL is a comma-separated list of user and group names. The user and
- group list is separated by a blank. For e.g. "alice,bob users,wheel".
- A special value of "*" means all users are allowed.</description>
- </property>
-
- <property>
- <name>security.namenode.protocol.acl</name>
- <value>*</value>
- <description>ACL for NamenodeProtocol, the protocol used by the secondary
- namenode to communicate with the namenode.
- The ACL is a comma-separated list of user and group names. The user and
- group list is separated by a blank. For e.g. "alice,bob users,wheel".
- A special value of "*" means all users are allowed.</description>
- </property>
-
- <property>
- <name>security.inter.tracker.protocol.acl</name>
- <value>*</value>
- <description>ACL for InterTrackerProtocol, used by the tasktrackers to
- communicate with the jobtracker.
- The ACL is a comma-separated list of user and group names. The user and
- group list is separated by a blank. For e.g. "alice,bob users,wheel".
- A special value of "*" means all users are allowed.</description>
- </property>
-
- <property>
- <name>security.job.submission.protocol.acl</name>
- <value>*</value>
- <description>ACL for JobSubmissionProtocol, used by job clients to
- communciate with the jobtracker for job submission, querying job status etc.
- The ACL is a comma-separated list of user and group names. The user and
- group list is separated by a blank. For e.g. "alice,bob users,wheel".
- A special value of "*" means all users are allowed.</description>
- </property>
-
- <property>
- <name>security.task.umbilical.protocol.acl</name>
- <value>*</value>
- <description>ACL for TaskUmbilicalProtocol, used by the map and reduce
- tasks to communicate with the parent tasktracker.
- The ACL is a comma-separated list of user and group names. The user and
- group list is separated by a blank. For e.g. "alice,bob users,wheel".
- A special value of "*" means all users are allowed.</description>
- </property>
-
- <property>
- <name>security.admin.operations.protocol.acl</name>
- <value></value>
- <description>ACL for AdminOperationsProtocol. Used for admin commands.
- The ACL is a comma-separated list of user and group names. The user and
- group list is separated by a blank. For e.g. "alice,bob users,wheel".
- A special value of "*" means all users are allowed.</description>
- </property>
-
- <property>
- <name>security.refresh.usertogroups.mappings.protocol.acl</name>
- <value></value>
- <description>ACL for RefreshUserMappingsProtocol. Used to refresh
- users mappings. The ACL is a comma-separated list of user and
- group names. The user and group list is separated by a blank. For
- e.g. "alice,bob users,wheel". A special value of "*" means all
- users are allowed.</description>
- </property>
-
-<property>
- <name>security.refresh.policy.protocol.acl</name>
- <value></value>
- <description>ACL for RefreshAuthorizationPolicyProtocol, used by the
- dfsadmin and mradmin commands to refresh the security policy in-effect.
- The ACL is a comma-separated list of user and group names. The user and
- group list is separated by a blank. For e.g. "alice,bob users,wheel".
- A special value of "*" means all users are allowed.</description>
- </property>
-
-
-</configuration>
http://git-wip-us.apache.org/repos/asf/ambari/blob/a013ff27/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/GLUSTERFS/configuration/hdfs-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/GLUSTERFS/configuration/hdfs-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/GLUSTERFS/configuration/hdfs-site.xml
deleted file mode 100644
index db92d4b..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/GLUSTERFS/configuration/hdfs-site.xml
+++ /dev/null
@@ -1,415 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!--
- Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements. See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration>
-
-<!-- file system properties -->
-
- <property>
- <name>dfs.name.dir</name>
- <!-- cluster variant -->
- <value></value>
- <description>Determines where on the local filesystem the DFS name node
- should store the name table. If this is a comma-delimited list
- of directories then the name table is replicated in all of the
- directories, for redundancy. </description>
- <final>true</final>
- </property>
-
- <property>
- <name>dfs.support.append</name>
- <value></value>
- <description>to enable dfs append</description>
- <final>true</final>
- </property>
-
- <property>
- <name>dfs.webhdfs.enabled</name>
- <value></value>
- <description>to enable webhdfs</description>
- <final>true</final>
- </property>
-
- <property>
- <name>dfs.datanode.socket.write.timeout</name>
- <value>0</value>
- <description>DFS Client write socket timeout</description>
- </property>
-
- <property>
- <name>dfs.datanode.failed.volumes.tolerated</name>
- <value></value>
- <description>#of failed disks dn would tolerate</description>
- <final>true</final>
- </property>
-
- <property>
- <name>dfs.block.local-path-access.user</name>
- <value></value>
- <description>the user who is allowed to perform short
- circuit reads.
- </description>
- <final>true</final>
- </property>
-
- <property>
- <name>dfs.data.dir</name>
- <value></value>
- <description>Determines where on the local filesystem an DFS data node
- should store its blocks. If this is a comma-delimited
- list of directories, then data will be stored in all named
- directories, typically on different devices.
- Directories that do not exist are ignored.
- </description>
- <final>true</final>
- </property>
-
- <property>
- <name>dfs.hosts.exclude</name>
- <value></value>
- <description>Names a file that contains a list of hosts that are
- not permitted to connect to the namenode. The full pathname of the
- file must be specified. If the value is empty, no hosts are
- excluded.</description>
- </property>
-
- <property>
- <name>dfs.hosts</name>
- <value></value>
- <description>Names a file that contains a list of hosts that are
- permitted to connect to the namenode. The full pathname of the file
- must be specified. If the value is empty, all hosts are
- permitted.</description>
- </property>
-
- <property>
- <name>dfs.replication.max</name>
- <value>50</value>
- <description>Maximal block replication.
- </description>
- </property>
-
- <property>
- <name>dfs.replication</name>
- <value></value>
- <description>Default block replication.
- </description>
- </property>
-
- <property>
- <name>dfs.heartbeat.interval</name>
- <value>3</value>
- <description>Determines datanode heartbeat interval in seconds.</description>
- </property>
-
- <property>
- <name>dfs.safemode.threshold.pct</name>
- <value>1.0f</value>
- <description>
- Specifies the percentage of blocks that should satisfy
- the minimal replication requirement defined by dfs.replication.min.
- Values less than or equal to 0 mean not to start in safe mode.
- Values greater than 1 will make safe mode permanent.
- </description>
- </property>
-
- <property>
- <name>dfs.balance.bandwidthPerSec</name>
- <value>6250000</value>
- <description>
- Specifies the maximum amount of bandwidth that each datanode
- can utilize for the balancing purpose in term of
- the number of bytes per second.
- </description>
- </property>
-
- <property>
- <name>dfs.datanode.address</name>
- <value></value>
- </property>
-
- <property>
- <name>dfs.datanode.http.address</name>
- <value></value>
- </property>
-
- <property>
- <name>dfs.block.size</name>
- <value>134217728</value>
- <description>The default block size for new files.</description>
- </property>
-
- <property>
- <name>dfs.http.address</name>
- <value></value>
-<description>The name of the default file system. Either the
-literal string "local" or a host:port for NDFS.</description>
-<final>true</final>
-</property>
-
-<property>
-<name>dfs.datanode.du.reserved</name>
-<!-- cluster variant -->
-<value></value>
-<description>Reserved space in bytes per volume. Always leave this much space free for non dfs use.
-</description>
-</property>
-
-<property>
-<name>dfs.datanode.ipc.address</name>
-<value>0.0.0.0:8010</value>
-<description>
-The datanode ipc server address and port.
-If the port is 0 then the server will start on a free port.
-</description>
-</property>
-
-<property>
-<name>dfs.blockreport.initialDelay</name>
-<value>120</value>
-<description>Delay for first block report in seconds.</description>
-</property>
-
-<property>
-<name>dfs.datanode.du.pct</name>
-<value>0.85f</value>
-<description>When calculating remaining space, only use this percentage of the real available space
-</description>
-</property>
-
-<property>
-<name>dfs.namenode.handler.count</name>
-<value>40</value>
-<description>The number of server threads for the namenode.</description>
-</property>
-
-<property>
-<name>dfs.datanode.max.xcievers</name>
-<value>4096</value>
-<description>PRIVATE CONFIG VARIABLE</description>
-</property>
-
-<!-- Permissions configuration -->
-
-<property>
-<name>dfs.umaskmode</name>
-<value>077</value>
-<description>
-The octal umask used when creating files and directories.
-</description>
-</property>
-
-<property>
-<name>dfs.web.ugi</name>
-<!-- cluster variant -->
-<value>gopher,gopher</value>
-<description>The user account used by the web interface.
-Syntax: USERNAME,GROUP1,GROUP2, ...
-</description>
-</property>
-
-<property>
-<name>dfs.permissions</name>
-<value>true</value>
-<description>
-If "true", enable permission checking in HDFS.
-If "false", permission checking is turned off,
-but all other behavior is unchanged.
-Switching from one parameter value to the other does not change the mode,
-owner or group of files or directories.
-</description>
-</property>
-
-<property>
-<name>dfs.permissions.supergroup</name>
-<value>hdfs</value>
-<description>The name of the group of super-users.</description>
-</property>
-
-<property>
-<name>dfs.namenode.handler.count</name>
-<value>100</value>
-<description>Added to grow Queue size so that more client connections are allowed</description>
-</property>
-
-<property>
-<name>ipc.server.max.response.size</name>
-<value>5242880</value>
-</property>
-<property>
-<name>dfs.block.access.token.enable</name>
-<value>true</value>
-<description>
-If "true", access tokens are used as capabilities for accessing datanodes.
-If "false", no access tokens are checked on accessing datanodes.
-</description>
-</property>
-
-<property>
-<name>dfs.namenode.kerberos.principal</name>
-<value></value>
-<description>
-Kerberos principal name for the NameNode
-</description>
-</property>
-
-<property>
-<name>dfs.secondary.namenode.kerberos.principal</name>
-<value></value>
- <description>
- Kerberos principal name for the secondary NameNode.
- </description>
- </property>
-
-
-<!--
- This is KRB DOMAIN specific. The FQDN of the namenode has to be mentioned.
--->
- <property>
- <name>dfs.namenode.kerberos.https.principal</name>
- <value></value>
- <description>The Kerberos principal for the host that the NameNode runs on.</description>
-
- </property>
-
- <property>
- <name>dfs.secondary.namenode.kerberos.https.principal</name>
- <value></value>
- <description>The Kerberos principal for the hostthat the secondary NameNode runs on.</description>
-
- </property>
-
- <property>
- <!-- cluster variant -->
- <name>dfs.secondary.http.address</name>
- <value></value>
- <description>Address of secondary namenode web server</description>
- </property>
-
- <property>
- <name>dfs.secondary.https.port</name>
- <value>50490</value>
- <description>The https port where secondary-namenode binds</description>
- </property>
-
- <property>
- <name>dfs.web.authentication.kerberos.principal</name>
- <value></value>
- <description>
- The HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
- The HTTP Kerberos principal MUST start with 'HTTP/' per Kerberos
- HTTP SPENGO specification.
- </description>
- </property>
-
- <property>
- <name>dfs.web.authentication.kerberos.keytab</name>
- <value></value>
- <description>
- The Kerberos keytab file with the credentials for the
- HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
- </description>
- </property>
-
- <property>
- <name>dfs.datanode.kerberos.principal</name>
- <value></value>
- <description>
- The Kerberos principal that the DataNode runs as. "_HOST" is replaced by the real host name.
- </description>
- </property>
-
- <property>
- <name>dfs.namenode.keytab.file</name>
- <value></value>
- <description>
- Combined keytab file containing the namenode service and host principals.
- </description>
- </property>
-
- <property>
- <name>dfs.secondary.namenode.keytab.file</name>
- <value></value>
- <description>
- Combined keytab file containing the namenode service and host principals.
- </description>
- </property>
-
- <property>
- <name>dfs.datanode.keytab.file</name>
- <value></value>
- <description>
- The filename of the keytab file for the DataNode.
- </description>
- </property>
-
- <property>
- <name>dfs.https.port</name>
- <value>50470</value>
- <description>The https port where namenode binds</description>
-
- </property>
-
- <property>
- <name>dfs.https.address</name>
- <value></value>
- <description>The https address where namenode binds</description>
-
- </property>
-
- <property>
- <name>dfs.datanode.data.dir.perm</name>
- <value></value>
-<description>The permissions that should be there on dfs.data.dir
-directories. The datanode will not come up if the permissions are
-different on existing dfs.data.dir directories. If the directories
-don't exist, they will be created with this permission.</description>
- </property>
-
- <property>
- <name>dfs.access.time.precision</name>
- <value>0</value>
- <description>The access time for HDFS file is precise upto this value.
- The default value is 1 hour. Setting a value of 0 disables
- access times for HDFS.
- </description>
-</property>
-
-<property>
- <name>dfs.cluster.administrators</name>
- <value> hdfs</value>
- <description>ACL for who all can view the default servlets in the HDFS</description>
-</property>
-
-<property>
- <name>ipc.server.read.threadpool.size</name>
- <value>5</value>
- <description></description>
-</property>
-
-<property>
- <name>dfs.datanode.failed.volumes.tolerated</name>
- <value>0</value>
- <description>Number of failed disks datanode would tolerate</description>
-</property>
-
-</configuration>
http://git-wip-us.apache.org/repos/asf/ambari/blob/a013ff27/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/GLUSTERFS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/GLUSTERFS/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/GLUSTERFS/metainfo.xml
index 2bf75a2..a8f6a43 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/GLUSTERFS/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/GLUSTERFS/metainfo.xml
@@ -25,6 +25,7 @@
<category>CLIENT</category>
</component>
</components>
-
-
+ <configuration-dependencies>
+ <config-type>core-site</config-type>
+ </configuration-dependencies>
</metainfo>
http://git-wip-us.apache.org/repos/asf/ambari/blob/a013ff27/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/HBASE/configuration/hbase-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/HBASE/configuration/hbase-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/HBASE/configuration/hbase-site.xml
index 4270410..bf4af7d 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/HBASE/configuration/hbase-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/HBASE/configuration/hbase-site.xml
@@ -22,7 +22,7 @@
<configuration>
<property>
<name>hbase.rootdir</name>
- <value></value>
+ <value>hdfs://localhost:8020/apps/hbase/data</value>
<description>The directory shared by region servers and into
which HBase persists. The URL should be 'fully-qualified'
to include the filesystem scheme. For example, to specify the
@@ -44,7 +44,7 @@
</property>
<property>
<name>hbase.tmp.dir</name>
- <value></value>
+ <value>/hadoop/hbase</value>
<description>Temporary directory on the local filesystem.
Change this setting to point to a location more permanent
than '/tmp' (The '/tmp' directory is often cleared on
@@ -69,14 +69,14 @@
</property>
<property>
<name>hbase.regionserver.global.memstore.upperLimit</name>
- <value></value>
+ <value>0.4</value>
<description>Maximum size of all memstores in a region server before new
updates are blocked and flushes are forced. Defaults to 40% of heap
</description>
</property>
<property>
<name>hbase.regionserver.handler.count</name>
- <value></value>
+ <value>60</value>
<description>Count of RPC Listener instances spun up on RegionServers.
Same property is used by the Master for count of master handlers.
Default is 10.
@@ -84,8 +84,8 @@
</property>
<property>
<name>hbase.hregion.majorcompaction</name>
- <value></value>
- <description>The time (in miliseconds) between 'major' compactions of all
+ <value>86400000</value>
+ <description>The time (in milliseconds) between 'major' compactions of all
HStoreFiles in a region. Default: 1 day.
Set to 0 to disable automated major compactions.
</description>
@@ -93,7 +93,7 @@
<property>
<name>hbase.regionserver.global.memstore.lowerLimit</name>
- <value></value>
+ <value>0.38</value>
<description>When memstores are being forced to flush to make room in
memory, keep flushing until we hit this mark. Defaults to 35% of heap.
This value equal to hbase.regionserver.global.memstore.upperLimit causes
@@ -103,7 +103,7 @@
</property>
<property>
<name>hbase.hregion.memstore.block.multiplier</name>
- <value></value>
+ <value>2</value>
<description>Block updates if memstore has hbase.hregion.memstore.block.multiplier
time hbase.hregion.flush.size bytes. Useful preventing
runaway memstore during spikes in update traffic. Without an
@@ -114,7 +114,7 @@
</property>
<property>
<name>hbase.hregion.memstore.flush.size</name>
- <value></value>
+ <value>134217728</value>
<description>
Memstore will be flushed to disk if size of the memstore
exceeds this number of bytes. Value is checked by a thread that runs
@@ -123,7 +123,7 @@
</property>
<property>
<name>hbase.hregion.memstore.mslab.enabled</name>
- <value></value>
+ <value>true</value>
<description>
Enables the MemStore-Local Allocation Buffer,
a feature which works to prevent heap fragmentation under
@@ -133,7 +133,7 @@
</property>
<property>
<name>hbase.hregion.max.filesize</name>
- <value></value>
+ <value>10737418240</value>
<description>
Maximum HStoreFile size. If any one of a column families' HStoreFiles has
grown to exceed this value, the hosting HRegion is split in two.
@@ -142,7 +142,7 @@
</property>
<property>
<name>hbase.client.scanner.caching</name>
- <value></value>
+ <value>100</value>
<description>Number of rows that will be fetched when calling next
on a scanner if it is not served from (local, client) memory. Higher
caching values will enable faster scanners but will eat up more memory
@@ -164,7 +164,7 @@
</property>
<property>
<name>hbase.client.keyvalue.maxsize</name>
- <value></value>
+ <value>10485760</value>
<description>Specifies the combined maximum allowed size of a KeyValue
instance. This is to set an upper boundary for a single entry saved in a
storage file. Since they cannot be split it helps avoiding that a region
@@ -175,7 +175,7 @@
</property>
<property>
<name>hbase.hstore.compactionThreshold</name>
- <value></value>
+ <value>3</value>
<description>
If more than this number of HStoreFiles in any one HStore
(one HStoreFile is written per flush of memstore) then a compaction
@@ -193,7 +193,7 @@
<property>
<name>hbase.hstore.blockingStoreFiles</name>
- <value></value>
+ <value>10</value>
<description>
If more than this number of StoreFiles in any one Store
(one StoreFile is written per flush of MemStore) then updates are
@@ -203,7 +203,7 @@
</property>
<property>
<name>hfile.block.cache.size</name>
- <value></value>
+ <value>0.40</value>
<description>
Percentage of maximum heap (-Xmx setting) to allocate to block cache
used by HFile/StoreFile. Default of 0.25 means allocate 25%.
@@ -309,7 +309,7 @@
-->
<property>
<name>hbase.zookeeper.quorum</name>
- <value></value>
+ <value>localhost</value>
<description>Comma separated list of servers in the ZooKeeper Quorum.
For example, "host1.mydomain.com,host2.mydomain.com,host3.mydomain.com".
By default this is set to localhost for local and pseudo-distributed modes
http://git-wip-us.apache.org/repos/asf/ambari/blob/a013ff27/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/HBASE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/HBASE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/HBASE/metainfo.xml
index dfaa256..afe527d 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/HBASE/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/HBASE/metainfo.xml
@@ -18,7 +18,7 @@
<metainfo>
<user>mapred</user>
<comment>Non-relational distributed database and centralized service for configuration management & synchronization</comment>
- <version>0.96.0.2.0.6.0</version>
+ <version>0.96.1.2.0.6.1</version>
<components>
<component>
@@ -36,5 +36,9 @@
<category>CLIENT</category>
</component>
</components>
-
+ <configuration-dependencies>
+ <config-type>global</config-type>
+ <config-type>hbase-site</config-type>
+ <config-type>hbase-policy</config-type>
+ </configuration-dependencies>
</metainfo>