You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by sr...@apache.org on 2015/04/22 04:59:11 UTC
ambari git commit: AMBARI-10646. HBase config UI: Eliminate
hbase.client.scanner.caching setting (srimanth)
Repository: ambari
Updated Branches:
refs/heads/trunk 375266ddc -> 1a02c9cce
AMBARI-10646. HBase config UI: Eliminate hbase.client.scanner.caching setting (srimanth)
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/1a02c9cc
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/1a02c9cc
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/1a02c9cc
Branch: refs/heads/trunk
Commit: 1a02c9cce64124bb8994aa9fa8b1699a2d2c468f
Parents: 375266d
Author: Srimanth Gunturi <sg...@hortonworks.com>
Authored: Tue Apr 21 19:56:08 2015 -0700
Committer: Srimanth Gunturi <sg...@hortonworks.com>
Committed: Tue Apr 21 19:56:08 2015 -0700
----------------------------------------------------------------------
.../0.96.0.2.0/configuration/hbase-env.xml | 4 +-
.../0.96.0.2.0/configuration/hbase-site.xml | 20 +++---
.../HDFS/2.1.0.2.0/configuration/hdfs-site.xml | 4 +-
.../services/HBASE/configuration/hbase-env.xml | 2 +-
.../HDP/2.2/services/HBASE/themes/theme.json | 23 ++-----
.../stacks/HDP/2.2/services/stack_advisor.py | 17 +++--
.../stacks/2.2/common/test_stack_advisor.py | 68 ++++++++++++++++----
7 files changed, 88 insertions(+), 50 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/1a02c9cc/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/configuration/hbase-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/configuration/hbase-env.xml b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/configuration/hbase-env.xml
index 105bb40..24705e7 100644
--- a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/configuration/hbase-env.xml
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/configuration/hbase-env.xml
@@ -35,7 +35,7 @@
<name>hbase_regionserver_heapsize</name>
<value>4096</value>
<description>HBase RegionServer Heap Size.</description>
- <display-name>HBase RegionServer Maximum Memory</display-name>
+ <display-name>RegionServer Maximum Memory</display-name>
<value-attributes>
<type>int</type>
<minimum>1024</minimum>
@@ -62,7 +62,7 @@ and the -Xmn ratio (hbase_regionserver_xmn_ratio) exceeds this value.
<name>hbase_master_heapsize</name>
<value>4096</value>
<description>HBase Master Heap Size</description>
- <display-name>HBase Master Maximum Memory</display-name>
+ <display-name>Master Maximum Memory</display-name>
<value-attributes>
<type>int</type>
<minimum>1024</minimum>
http://git-wip-us.apache.org/repos/asf/ambari/blob/1a02c9cc/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/configuration/hbase-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/configuration/hbase-site.xml b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/configuration/hbase-site.xml
index eda02ca..9e67bf6 100644
--- a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/configuration/hbase-site.xml
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/configuration/hbase-site.xml
@@ -89,6 +89,7 @@
<type>float</type>
<minimum>0</minimum>
<maximum>0.8</maximum>
+ <increment-step>0.01</increment-step>
</value-attributes>
</property>
<property>
@@ -188,7 +189,7 @@
</property>
<property>
<name>hbase.hregion.max.filesize</name>
- <value>1073741824</value>
+ <value>10737418240</value>
<description>
Maximum HStoreFile size. If any one of a column families' HStoreFiles has
grown to exceed this value, the hosting HRegion is split in two.
@@ -198,9 +199,9 @@
<value-attributes>
<type>int</type>
<minimum>1073741824</minimum>
- <maximum>10737418240</maximum>
+ <maximum>107374182400</maximum>
<unit>B</unit>
- <increment-step>268435456</increment-step>
+ <increment-step>1073741824</increment-step>
</value-attributes>
</property>
<property>
@@ -268,7 +269,7 @@
is run to rewrite all HStoreFiles files as one. Larger numbers
put off compaction but when it runs, it takes longer to complete.
</description>
- <display-name>Maximum Files in a Store before Compaction</display-name>
+ <display-name>Maximum Store Files before Minor Compaction</display-name>
<value-attributes>
<type>int</type>
<entries>
@@ -315,6 +316,7 @@
<type>float</type>
<minimum>0</minimum>
<maximum>0.8</maximum>
+ <increment-step>0.01</increment-step>
</value-attributes>
</property>
@@ -456,15 +458,15 @@
</property>
<property>
<name>hbase.rpc.timeout</name>
- <value>60000</value>
+ <value>90000</value>
<description>This is for the RPC layer to define how long HBase client applications
take for a remote call to time out. It uses pings to check connections
but will eventually throw a TimeoutException.</description>
- <display-name>HBase RPC Timeout</display-name>
+ <display-name>RPC Timeout</display-name>
<value-attributes>
<type>int</type>
- <minimum>30000</minimum>
- <maximum>300000</maximum>
+ <minimum>10000</minimum>
+ <maximum>180000</maximum>
<unit>milliseconds</unit>
<increment-step>10000</increment-step>
</value-attributes>
@@ -482,7 +484,7 @@
<value-attributes>
<type>int</type>
<minimum>30000</minimum>
- <maximum>300000</maximum>
+ <maximum>180000</maximum>
<unit>milliseconds</unit>
<increment-step>10000</increment-step>
</value-attributes>
http://git-wip-us.apache.org/repos/asf/ambari/blob/1a02c9cc/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hdfs-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hdfs-site.xml b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hdfs-site.xml
index c9d7172..0108b27 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hdfs-site.xml
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hdfs-site.xml
@@ -180,11 +180,11 @@
<entries>
<entry>
<value>0.99</value>
- <label>NN HA</label>
+ <label>0.99f</label>
</entry>
<entry>
<value>1.0</value>
- <label>No NN HA</label>
+ <label>1.0f</label>
</entry>
</entries>
<selection-cardinality>1</selection-cardinality>
http://git-wip-us.apache.org/repos/asf/ambari/blob/1a02c9cc/ambari-server/src/main/resources/stacks/HDP/2.2/services/HBASE/configuration/hbase-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/HBASE/configuration/hbase-env.xml b/ambari-server/src/main/resources/stacks/HDP/2.2/services/HBASE/configuration/hbase-env.xml
index 6b6d53b..0120e82 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.2/services/HBASE/configuration/hbase-env.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/HBASE/configuration/hbase-env.xml
@@ -60,7 +60,7 @@ and the -Xmn ratio (hbase_regionserver_xmn_ratio) exceeds this value.
<name>phoenix_sql_enabled</name>
<value>false</value>
<description>Enable Phoenix SQL</description>
- <display-name>Phoenix SQL</display-name>
+ <display-name>Enable Phoenix</display-name>
<value-attributes>
<type>value-list</type>
<entries>
http://git-wip-us.apache.org/repos/asf/ambari/blob/1a02c9cc/ambari-server/src/main/resources/stacks/HDP/2.2/services/HBASE/themes/theme.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/HBASE/themes/theme.json b/ambari-server/src/main/resources/stacks/HDP/2.2/services/HBASE/themes/theme.json
index 3da7470..e803292 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.2/services/HBASE/themes/theme.json
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/HBASE/themes/theme.json
@@ -16,7 +16,7 @@
"sections": [
{
"name": "section-hbase-memory",
- "display-name": "Memory",
+ "display-name": "Server",
"row-index": "0",
"column-index": "0",
"row-span": "1",
@@ -132,7 +132,7 @@
},
{
"name": "section-hbase-phoenix",
- "display-name": "Phoenix",
+ "display-name": "Phoenix SQL",
"row-index": "2",
"column-index": "2",
"row-span": "1",
@@ -173,7 +173,7 @@
},
{
"config": "hbase-site/hbase.regionserver.global.memstore.upperLimit",
- "subsection-name": "subsection-hbase-memory-col2"
+ "subsection-name": "subsection-hbase-memory-col1"
},
{
"config": "hbase-site/hbase.hregion.memstore.flush.size",
@@ -185,11 +185,7 @@
},
{
"config": "hbase-site/hbase.regionserver.handler.count",
- "subsection-name": "subsection-hbase-client-col1"
- },
- {
- "config": "hbase-site/hbase.client.scanner.caching",
- "subsection-name": "subsection-hbase-client-col1"
+ "subsection-name": "subsection-hbase-memory-col2"
},
{
"config": "hbase-site/hbase.client.retries.number",
@@ -311,17 +307,6 @@
}
},
{
- "config":"hbase-site/hbase.client.scanner.caching",
- "widget":{
- "type":"slider",
- "units":[
- {
- "unit-name":"int"
- }
- ]
- }
- },
- {
"config":"hbase-site/hbase.client.retries.number",
"widget":{
"type":"slider",
http://git-wip-us.apache.org/repos/asf/ambari/blob/1a02c9cc/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
index 6289e6a..1a8034d 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
@@ -100,6 +100,10 @@ class HDP22StackAdvisor(HDP21StackAdvisor):
putHdfsEnvProperty = self.putProperty(configurations, "hadoop-env", services)
putHdfsEnvPropertyAttribute = self.putPropertyAttribute(configurations, "hadoop-env")
+ putHdfsEnvProperty('namenode_heapsize', max(int(clusterData['totalAvailableRam'] / 2), 1024))
+ putHdfsEnvProperty('namenode_opt_newsize', max(int(clusterData['totalAvailableRam'] / 8), 128))
+ putHdfsEnvProperty('namenode_opt_maxnewsize', max(int(clusterData['totalAvailableRam'] / 8), 256))
+
nn_max_heapsize=None
if (namenodeHosts is not None and len(namenodeHosts) > 0):
if len(namenodeHosts) > 1:
@@ -109,10 +113,13 @@ class HDP22StackAdvisor(HDP21StackAdvisor):
putHdfsEnvPropertyAttribute('namenode_heapsize', 'maximum', nn_max_heapsize)
- #Old fallback values
- putHdfsEnvProperty('namenode_heapsize', max(int(clusterData['totalAvailableRam'] / 2), 1024))
- putHdfsEnvProperty('namenode_opt_newsize', max(int(clusterData['totalAvailableRam'] / 8), 128))
- putHdfsEnvProperty('namenode_opt_maxnewsize', max(int(clusterData['totalAvailableRam'] / 8), 256))
+ nn_heapsize = nn_max_heapsize
+ nn_heapsize -= clusterData["reservedRam"]
+ if clusterData["hBaseInstalled"]:
+ nn_heapsize -= clusterData["hbaseRam"]
+ putHdfsEnvProperty('namenode_heapsize', max(int(nn_heapsize / 2), 1024))
+ putHdfsEnvProperty('namenode_opt_newsize', max(int(nn_heapsize / 8), 128))
+ putHdfsEnvProperty('namenode_opt_maxnewsize', max(int(nn_heapsize / 8), 256))
datanodeHosts = self.getHostsWithComponent("HDFS", "DATANODE", services, hosts)
if datanodeHosts is not None and len(datanodeHosts) > 0:
@@ -154,7 +161,7 @@ class HDP22StackAdvisor(HDP21StackAdvisor):
nn_memory_config = nn_memory_configs[index]
#override with new values if applicable
- if nn_max_heapsize is not None and nn_max_heapsize <= nn_memory_config['nn_heap']:
+ if nn_max_heapsize is not None and nn_memory_config['nn_heap'] <= nn_max_heapsize:
putHdfsEnvProperty('namenode_heapsize', nn_memory_config['nn_heap'])
putHdfsEnvProperty('namenode_opt_newsize', nn_memory_config['nn_opt'])
putHdfsEnvProperty('namenode_opt_maxnewsize', nn_memory_config['nn_opt'])
http://git-wip-us.apache.org/repos/asf/ambari/blob/1a02c9cc/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py b/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py
index 0453b7a..4c79512 100644
--- a/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py
+++ b/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py
@@ -1438,7 +1438,9 @@ class TestHDP22StackAdvisor(TestCase):
}
clusterData = {
"totalAvailableRam": 2048,
- "hBaseInstalled": 111
+ "hBaseInstalled": True,
+ "hbaseRam": 111,
+ "reservedRam": 128
}
expected = {
'hadoop-env': {
@@ -1449,7 +1451,7 @@ class TestHDP22StackAdvisor(TestCase):
},
'property_attributes': {
'dtnode_heapsize': {'maximum': '2048'},
- 'namenode_heapsize': {'maximum': '1024'}
+ 'namenode_heapsize': {'maximum': '10240'}
}
},
'hdfs-site': {
@@ -1587,7 +1589,7 @@ class TestHDP22StackAdvisor(TestCase):
"ph_cpu_count" : 1,
"public_host_name" : "host2",
"rack_info" : "/default-rack",
- "total_mem" : 1048576
+ "total_mem" : 10485760
}
},
]
@@ -1595,9 +1597,9 @@ class TestHDP22StackAdvisor(TestCase):
self.stackAdvisor.recommendHDFSConfigurations(configurations, clusterData, services, hosts)
self.assertEquals(configurations, expected)
- # namenode heapsize depends on # of datanodes
+ # Test 1 - namenode heapsize depends on # of datanodes
datanode_hostnames = services["services"][0]["components"][0]["StackServiceComponents"]["hostnames"] # datanode hostnames
- for i in xrange(200):
+ for i in xrange(10):
hostname = "datanode" + `i`
datanode_hostnames.append(hostname)
hosts['items'].append(
@@ -1616,15 +1618,57 @@ class TestHDP22StackAdvisor(TestCase):
}
)
self.stackAdvisor.recommendHDFSConfigurations(configurations, clusterData, services, hosts)
- self.assertEquals(configurations["hadoop-env"]["properties"]["namenode_heapsize"], "47872")
- self.assertEquals(configurations["hadoop-env"]["properties"]["namenode_opt_maxnewsize"], "6144")
- self.assertEquals(configurations["hadoop-env"]["properties"]["namenode_opt_maxnewsize"], "6144")
+ self.assertEquals(configurations["hadoop-env"]["properties"]["namenode_heapsize"], "3072")
+ self.assertEquals(configurations["hadoop-env"]["properties"]["namenode_opt_maxnewsize"], "512")
+ self.assertEquals(configurations["hadoop-env"]["properties"]["namenode_opt_maxnewsize"], "512")
+ # Test 2 - add more datanodes
+ for i in xrange(11,30):
+ hostname = "datanode" + `i`
+ datanode_hostnames.append(hostname)
+ hosts['items'].append(
+ {
+ "href" : "/api/v1/hosts/" + hostname,
+ "Hosts" : {
+ "cpu_count" : 1,
+ "host_name" : hostname,
+ "os_arch" : "x86_64",
+ "os_type" : "centos6",
+ "ph_cpu_count" : 1,
+ "public_host_name" : hostname,
+ "rack_info" : "/default-rack",
+ "total_mem" : 2097152
+ }
+ }
+ )
# namenode_heapsize depends on number of disks used used by datanode
- configurations["hdfs-site"]["properties"]["dfs.datanode.data.dir"] = "/path1"
+ configurations["hdfs-site"]["properties"]["dfs.datanode.data.dir"] = "/path1,/path2,/path3,/path4"
+ self.stackAdvisor.recommendHDFSConfigurations(configurations, clusterData, services, hosts)
+ self.assertEquals(configurations["hadoop-env"]["properties"]["namenode_heapsize"], "9984")
+ self.assertEquals(configurations["hadoop-env"]["properties"]["namenode_opt_maxnewsize"], "1280")
+ self.assertEquals(configurations["hadoop-env"]["properties"]["namenode_opt_maxnewsize"], "1280")
+ # Test 3 - more datanodes than host can handle
+ for i in xrange(31, 90):
+ hostname = "datanode" + `i`
+ datanode_hostnames.append(hostname)
+ hosts['items'].append(
+ {
+ "href" : "/api/v1/hosts/" + hostname,
+ "Hosts" : {
+ "cpu_count" : 1,
+ "host_name" : hostname,
+ "os_arch" : "x86_64",
+ "os_type" : "centos6",
+ "ph_cpu_count" : 1,
+ "public_host_name" : hostname,
+ "rack_info" : "/default-rack",
+ "total_mem" : 2097152
+ }
+ }
+ )
self.stackAdvisor.recommendHDFSConfigurations(configurations, clusterData, services, hosts)
- self.assertEquals(configurations["hadoop-env"]["properties"]["namenode_heapsize"], "14848")
- self.assertEquals(configurations["hadoop-env"]["properties"]["namenode_opt_maxnewsize"], "2048")
- self.assertEquals(configurations["hadoop-env"]["properties"]["namenode_opt_maxnewsize"], "2048")
+ self.assertEquals(configurations["hadoop-env"]["properties"]["namenode_heapsize"], "5000")
+ self.assertEquals(configurations["hadoop-env"]["properties"]["namenode_opt_maxnewsize"], "1250")
+ self.assertEquals(configurations["hadoop-env"]["properties"]["namenode_opt_maxnewsize"], "1250")
def test_validateHDFSConfigurationsEnv(self):
configurations = {}