You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@kylin.apache.org by li...@apache.org on 2015/02/12 06:22:00 UTC

[06/31] incubator-kylin git commit: Update sandbox settings to HDP 2.2

Update sandbox settings to HDP 2.2

Project: http://git-wip-us.apache.org/repos/asf/incubator-kylin/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-kylin/commit/fe026669
Tree: http://git-wip-us.apache.org/repos/asf/incubator-kylin/tree/fe026669
Diff: http://git-wip-us.apache.org/repos/asf/incubator-kylin/diff/fe026669

Branch: refs/heads/master
Commit: fe02666932ecceb0ebfdbdaf37aaf9ea22adef04
Parents: e63c66f
Author: Shao Feng, Shi <sh...@hotmail.com>
Authored: Tue Feb 3 15:38:51 2015 +0800
Committer: Shao Feng, Shi <sh...@hotmail.com>
Committed: Tue Feb 3 15:38:51 2015 +0800

----------------------------------------------------------------------
 .../sandbox/capacity-scheduler.xml              | 210 +++--
 examples/test_case_data/sandbox/core-site.xml   | 245 +++---
 .../test_case_data/sandbox/hadoop-policy.xml    | 278 ++-----
 .../test_case_data/sandbox/hbase-policy.xml     |  19 +
 examples/test_case_data/sandbox/hbase-site.xml  | 373 ++++-----
 examples/test_case_data/sandbox/hdfs-site.xml   | 440 ++++++----
 examples/test_case_data/sandbox/hive-site.xml   | 794 +++++++++++++++++++
 examples/test_case_data/sandbox/httpfs-site.xml |  17 -
 .../test_case_data/sandbox/kylin.properties     |   2 +-
 examples/test_case_data/sandbox/mapred-site.xml | 394 +++++----
 examples/test_case_data/sandbox/yarn-site.xml   | 656 +++++++++++----
 .../kylinolap/job/BuildCubeWithEngineTest.java  |   1 +
 .../java/com/kylinolap/rest/DebugTomcat.java    |   2 +
 13 files changed, 2334 insertions(+), 1097 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-kylin/blob/fe026669/examples/test_case_data/sandbox/capacity-scheduler.xml
----------------------------------------------------------------------
diff --git a/examples/test_case_data/sandbox/capacity-scheduler.xml b/examples/test_case_data/sandbox/capacity-scheduler.xml
index cddca6c..7b4a367 100644
--- a/examples/test_case_data/sandbox/capacity-scheduler.xml
+++ b/examples/test_case_data/sandbox/capacity-scheduler.xml
@@ -1,111 +1,99 @@
-<!--
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-<configuration>
-
-  <property>
-    <name>yarn.scheduler.capacity.maximum-applications</name>
-    <value>10000</value>
-    <description>
-      Maximum number of applications that can be pending and running.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.maximum-am-resource-percent</name>
-    <value>0.1</value>
-    <description>
-      Maximum percent of resources in the cluster which can be used to run 
-      application masters i.e. controls number of concurrent running
-      applications.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.resource-calculator</name>
-    <value>org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator</value>
-    <description>
-      The ResourceCalculator implementation to be used to compare 
-      Resources in the scheduler.
-      The default i.e. DefaultResourceCalculator only uses Memory while
-      DominantResourceCalculator uses dominant-resource to compare 
-      multi-dimensional resources such as Memory, CPU etc.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.root.queues</name>
-    <value>default</value>
-    <description>
-      The queues at the this level (root is the root queue).
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.root.default.capacity</name>
-    <value>100</value>
-    <description>Default queue target capacity.</description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.root.default.user-limit-factor</name>
-    <value>1</value>
-    <description>
-      Default queue user limit a percentage from 0.0 to 1.0.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.root.default.maximum-capacity</name>
-    <value>100</value>
-    <description>
-      The maximum capacity of the default queue. 
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.root.default.state</name>
-    <value>RUNNING</value>
-    <description>
-      The state of the default queue. State can be one of RUNNING or STOPPED.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.root.default.acl_submit_applications</name>
-    <value>*</value>
-    <description>
-      The ACL of who can submit jobs to the default queue.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.root.default.acl_administer_queue</name>
-    <value>*</value>
-    <description>
-      The ACL of who can administer jobs on the default queue.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.node-locality-delay</name>
-    <value>-1</value>
-    <description>
-      Number of missed scheduling opportunities after which the CapacityScheduler 
-      attempts to schedule rack-local containers. 
-      Typically this should be set to number of racks in the cluster, this 
-      feature is disabled by default, set to -1.
-    </description>
-  </property>
-
-</configuration>
+<!--Tue Dec 16 19:08:08 2014-->
+    <configuration>
+    
+    <property>
+      <name>yarn.scheduler.capacity.default.minimum-user-limit-percent</name>
+      <value>100</value>
+    </property>
+    
+    <property>
+      <name>yarn.scheduler.capacity.maximum-am-resource-percent</name>
+      <value>0.5</value>
+    </property>
+    
+    <property>
+      <name>yarn.scheduler.capacity.maximum-applications</name>
+      <value>10000</value>
+    </property>
+    
+    <property>
+      <name>yarn.scheduler.capacity.node-locality-delay</name>
+      <value>40</value>
+    </property>
+    
+    <property>
+      <name>yarn.scheduler.capacity.resource-calculator</name>
+      <value>org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator</value>
+    </property>
+    
+    <property>
+      <name>yarn.scheduler.capacity.root.accessible-node-labels</name>
+      <value>*</value>
+    </property>
+    
+    <property>
+      <name>yarn.scheduler.capacity.root.accessible-node-labels.default.capacity</name>
+      <value>-1</value>
+    </property>
+    
+    <property>
+      <name>yarn.scheduler.capacity.root.accessible-node-labels.default.maximum-capacity</name>
+      <value>-1</value>
+    </property>
+    
+    <property>
+      <name>yarn.scheduler.capacity.root.acl_administer_queue</name>
+      <value>*</value>
+    </property>
+    
+    <property>
+      <name>yarn.scheduler.capacity.root.capacity</name>
+      <value>100</value>
+    </property>
+    
+    <property>
+      <name>yarn.scheduler.capacity.root.default-node-label-expression</name>
+      <value> </value>
+    </property>
+    
+    <property>
+      <name>yarn.scheduler.capacity.root.default.acl_administer_jobs</name>
+      <value>*</value>
+    </property>
+    
+    <property>
+      <name>yarn.scheduler.capacity.root.default.acl_submit_applications</name>
+      <value>*</value>
+    </property>
+    
+    <property>
+      <name>yarn.scheduler.capacity.root.default.capacity</name>
+      <value>100</value>
+    </property>
+    
+    <property>
+      <name>yarn.scheduler.capacity.root.default.maximum-am-resource-percent</name>
+      <value>0.5</value>
+    </property>
+    
+    <property>
+      <name>yarn.scheduler.capacity.root.default.maximum-capacity</name>
+      <value>100</value>
+    </property>
+    
+    <property>
+      <name>yarn.scheduler.capacity.root.default.state</name>
+      <value>RUNNING</value>
+    </property>
+    
+    <property>
+      <name>yarn.scheduler.capacity.root.default.user-limit-factor</name>
+      <value>1</value>
+    </property>
+    
+    <property>
+      <name>yarn.scheduler.capacity.root.queues</name>
+      <value>default</value>
+    </property>
+    
+  </configuration>

http://git-wip-us.apache.org/repos/asf/incubator-kylin/blob/fe026669/examples/test_case_data/sandbox/core-site.xml
----------------------------------------------------------------------
diff --git a/examples/test_case_data/sandbox/core-site.xml b/examples/test_case_data/sandbox/core-site.xml
index c697835..1b3a3ee 100644
--- a/examples/test_case_data/sandbox/core-site.xml
+++ b/examples/test_case_data/sandbox/core-site.xml
@@ -1,93 +1,152 @@
-<!--Mon Feb 10 22:50:51 2014-->
-  <configuration>
-    <property>
-    <name>hadoop.proxyuser.hcat.groups</name>
-    <value>*</value>
-  </property>
-    <property>
-    <name>hadoop.proxyuser.hcat.hosts</name>
-    <value>*</value>
-  </property>
-    <property>
-    <name>fs.trash.interval</name>
-    <value>360</value>
-  </property>
-    <property>
-    <name>io.compression.codecs</name>
-    <value>org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec</value>
-  </property>
-    <property>
-    <name>hadoop.proxyuser.hive.groups</name>
-    <value>*</value>
-  </property>
-    <property>
-    <name>hadoop.proxyuser.hue.hosts</name>
-    <value>*</value>
-  </property>
-    <property>
-    <name>io.file.buffer.size</name>
-    <value>131072</value>
-  </property>
-    <property>
-    <name>ipc.client.connection.maxidletime</name>
-    <value>30000</value>
-  </property>
-    <property>
-    <name>mapreduce.jobtracker.webinterface.trusted</name>
-    <value>false</value>
-  </property>
-    <property>
-    <name>hadoop.security.authorization</name>
-    <value>false</value>
-  </property>
-    <property>
-    <name>hadoop.proxyuser.oozie.groups</name>
-    <value>*</value>
-  </property>
-    <property>
-    <name>hadoop.security.auth_to_local</name>
-    <value>
-        RULE:[2:$1@$0]([rn]m@.*)s/.*/yarn/
-        RULE:[2:$1@$0](jhs@.*)s/.*/mapred/
-        RULE:[2:$1@$0]([nd]n@.*)s/.*/hdfs/
-        RULE:[2:$1@$0](hm@.*)s/.*/hbase/
-        RULE:[2:$1@$0](rs@.*)s/.*/hbase/
-        DEFAULT</value>
-  </property>
-    <property>
-    <name>hadoop.security.authentication</name>
-    <value>simple</value>
-  </property>
-    <property>
-    <name>ipc.client.idlethreshold</name>
-    <value>8000</value>
-  </property>
-    <property>
-    <name>fs.checkpoint.size</name>
-    <value>0.5</value>
-  </property>
-    <property>
-    <name>fs.defaultFS</name>
-    <value>hdfs://sandbox.hortonworks.com:8020</value>
-  </property>
-    <property>
-    <name>hadoop.proxyuser.oozie.hosts</name>
-    <value>*</value>
-  </property>
-    <property>
-    <name>hadoop.proxyuser.hive.hosts</name>
-    <value>*</value>
-  </property>
-    <property>
-    <name>io.serializations</name>
-    <value>org.apache.hadoop.io.serializer.WritableSerialization</value>
-  </property>
-    <property>
-    <name>hadoop.proxyuser.hue.groups</name>
-    <value>*</value>
-  </property>
-    <property>
-    <name>ipc.client.connect.max.retries</name>
-    <value>50</value>
-  </property>
-  </configuration>
\ No newline at end of file
+<!--Mon Jan 19 07:28:25 2015-->
+    <configuration>
+    
+    <property>
+      <name>fs.defaultFS</name>
+      <value>hdfs://sandbox.hortonworks.com:8020</value>
+      <final>true</final>
+    </property>
+    
+    <property>
+      <name>fs.trash.interval</name>
+      <value>360</value>
+    </property>
+    
+    <property>
+      <name>hadoop.http.authentication.simple.anonymous.allowed</name>
+      <value>true</value>
+    </property>
+    
+    <property>
+      <name>hadoop.proxyuser.falcon.groups</name>
+      <value>users</value>
+    </property>
+    
+    <property>
+      <name>hadoop.proxyuser.falcon.hosts</name>
+      <value>*</value>
+    </property>
+    
+    <property>
+      <name>hadoop.proxyuser.hbase.groups</name>
+      <value>users</value>
+    </property>
+    
+    <property>
+      <name>hadoop.proxyuser.hbase.hosts</name>
+      <value>*</value>
+    </property>
+    
+    <property>
+      <name>hadoop.proxyuser.hcat.groups</name>
+      <value>*</value>
+    </property>
+    
+    <property>
+      <name>hadoop.proxyuser.hcat.hosts</name>
+      <value>sandbox.hortonworks.com</value>
+    </property>
+    
+    <property>
+      <name>hadoop.proxyuser.hive.groups</name>
+      <value>users</value>
+    </property>
+    
+    <property>
+      <name>hadoop.proxyuser.hive.hosts</name>
+      <value>*</value>
+    </property>
+    
+    <property>
+      <name>hadoop.proxyuser.hue.groups</name>
+      <value>*</value>
+    </property>
+    
+    <property>
+      <name>hadoop.proxyuser.hue.hosts</name>
+      <value>*</value>
+    </property>
+    
+    <property>
+      <name>hadoop.proxyuser.oozie.groups</name>
+      <value>*</value>
+    </property>
+    
+    <property>
+      <name>hadoop.proxyuser.oozie.hosts</name>
+      <value>sandbox.hortonworks.com</value>
+    </property>
+    
+    <property>
+      <name>hadoop.proxyuser.root.groups</name>
+      <value>*</value>
+    </property>
+    
+    <property>
+      <name>hadoop.proxyuser.root.hosts</name>
+      <value>*</value>
+    </property>
+    
+    <property>
+      <name>hadoop.security.auth_to_local</name>
+      <value>
+        RULE:[2:$1@$0]([rn]m@.*)s/.*/yarn/
+        RULE:[2:$1@$0](jhs@.*)s/.*/mapred/
+        RULE:[2:$1@$0]([nd]n@.*)s/.*/hdfs/
+        RULE:[2:$1@$0](hm@.*)s/.*/hbase/
+        RULE:[2:$1@$0](rs@.*)s/.*/hbase/
+        DEFAULT
+    </value>
+    </property>
+    
+    <property>
+      <name>hadoop.security.authentication</name>
+      <value>simple</value>
+    </property>
+    
+    <property>
+      <name>hadoop.security.authorization</name>
+      <value>false</value>
+    </property>
+    
+    <property>
+      <name>io.compression.codecs</name>
+      <value>org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec</value>
+    </property>
+    
+    <property>
+      <name>io.file.buffer.size</name>
+      <value>131072</value>
+    </property>
+    
+    <property>
+      <name>io.serializations</name>
+      <value>org.apache.hadoop.io.serializer.WritableSerialization</value>
+    </property>
+    
+    <property>
+      <name>ipc.client.connect.max.retries</name>
+      <value>50</value>
+    </property>
+    
+    <property>
+      <name>ipc.client.connection.maxidletime</name>
+      <value>30000</value>
+    </property>
+    
+    <property>
+      <name>ipc.client.idlethreshold</name>
+      <value>8000</value>
+    </property>
+    
+    <property>
+      <name>ipc.server.tcpnodelay</name>
+      <value>true</value>
+    </property>
+    
+    <property>
+      <name>mapreduce.jobtracker.webinterface.trusted</name>
+      <value>false</value>
+    </property>
+    
+  </configuration>

http://git-wip-us.apache.org/repos/asf/incubator-kylin/blob/fe026669/examples/test_case_data/sandbox/hadoop-policy.xml
----------------------------------------------------------------------
diff --git a/examples/test_case_data/sandbox/hadoop-policy.xml b/examples/test_case_data/sandbox/hadoop-policy.xml
index 1bcd78a..0f7e0b9 100644
--- a/examples/test_case_data/sandbox/hadoop-policy.xml
+++ b/examples/test_case_data/sandbox/hadoop-policy.xml
@@ -1,219 +1,59 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-
- Copyright 2011 The Apache Software Foundation
- 
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements.  See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership.  The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration>
-  <property>
-    <name>security.client.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for ClientProtocol, which is used by user code
-    via the DistributedFileSystem.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.client.datanode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for ClientDatanodeProtocol, the client-to-datanode protocol
-    for block recovery.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.datanode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for DatanodeProtocol, which is used by datanodes to
-    communicate with the namenode.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.inter.datanode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for InterDatanodeProtocol, the inter-datanode protocol
-    for updating generation timestamp.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.namenode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for NamenodeProtocol, the protocol used by the secondary
-    namenode to communicate with the namenode.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
- <property>
-    <name>security.admin.operations.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for AdminOperationsProtocol. Used for admin commands.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.refresh.usertogroups.mappings.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for RefreshUserMappingsProtocol. Used to refresh
-    users mappings. The ACL is a comma-separated list of user and
-    group names. The user and group list is separated by a blank. For
-    e.g. "alice,bob users,wheel".  A special value of "*" means all
-    users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.refresh.policy.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for RefreshAuthorizationPolicyProtocol, used by the
-    dfsadmin and mradmin commands to refresh the security policy in-effect.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.ha.service.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for HAService protocol used by HAAdmin to manage the
-      active and stand-by states of namenode.</description>
-  </property>
-
-  <property>
-    <name>security.zkfc.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for access to the ZK Failover Controller
-    </description>
-  </property>
-
-  <property>
-    <name>security.qjournal.service.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for QJournalProtocol, used by the NN to communicate with
-    JNs when using the QuorumJournalManager for edit logs.</description>
-  </property>
-
-  <property>
-    <name>security.mrhs.client.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for HSClientProtocol, used by job clients to
-    communciate with the MR History Server job status etc. 
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <!-- YARN Protocols -->
-
-  <property>
-    <name>security.resourcetracker.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for ResourceTrackerProtocol, used by the
-    ResourceManager and NodeManager to communicate with each other.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.resourcemanager-administration.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for ResourceManagerAdministrationProtocol, for admin commands. 
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.applicationclient.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for ApplicationClientProtocol, used by the ResourceManager 
-    and applications submission clients to communicate with each other.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.applicationmaster.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for ApplicationMasterProtocol, used by the ResourceManager 
-    and ApplicationMasters to communicate with each other.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.containermanagement.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for ContainerManagementProtocol protocol, used by the NodeManager 
-    and ApplicationMasters to communicate with each other.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.resourcelocalizer.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for ResourceLocalizer protocol, used by the NodeManager 
-    and ResourceLocalizer to communicate with each other.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.job.task.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for TaskUmbilicalProtocol, used by the map and reduce
-    tasks to communicate with the parent tasktracker.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.job.client.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for MRClientProtocol, used by job clients to
-    communciate with the MR ApplicationMaster to query job status etc. 
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-</configuration>
+<!--Tue Dec 16 19:07:40 2014-->
+    <configuration>
+    
+    <property>
+      <name>security.admin.operations.protocol.acl</name>
+      <value>hadoop</value>
+    </property>
+    
+    <property>
+      <name>security.client.datanode.protocol.acl</name>
+      <value>*</value>
+    </property>
+    
+    <property>
+      <name>security.client.protocol.acl</name>
+      <value>*</value>
+    </property>
+    
+    <property>
+      <name>security.datanode.protocol.acl</name>
+      <value>*</value>
+    </property>
+    
+    <property>
+      <name>security.inter.datanode.protocol.acl</name>
+      <value>*</value>
+    </property>
+    
+    <property>
+      <name>security.inter.tracker.protocol.acl</name>
+      <value>*</value>
+    </property>
+    
+    <property>
+      <name>security.job.client.protocol.acl</name>
+      <value>*</value>
+    </property>
+    
+    <property>
+      <name>security.job.task.protocol.acl</name>
+      <value>*</value>
+    </property>
+    
+    <property>
+      <name>security.namenode.protocol.acl</name>
+      <value>*</value>
+    </property>
+    
+    <property>
+      <name>security.refresh.policy.protocol.acl</name>
+      <value>hadoop</value>
+    </property>
+    
+    <property>
+      <name>security.refresh.usertogroups.mappings.protocol.acl</name>
+      <value>hadoop</value>
+    </property>
+    
+  </configuration>

http://git-wip-us.apache.org/repos/asf/incubator-kylin/blob/fe026669/examples/test_case_data/sandbox/hbase-policy.xml
----------------------------------------------------------------------
diff --git a/examples/test_case_data/sandbox/hbase-policy.xml b/examples/test_case_data/sandbox/hbase-policy.xml
new file mode 100644
index 0000000..36756b8
--- /dev/null
+++ b/examples/test_case_data/sandbox/hbase-policy.xml
@@ -0,0 +1,19 @@
+<!--Mon Jan 19 07:29:07 2015-->
+    <configuration>
+    
+    <property>
+      <name>security.admin.protocol.acl</name>
+      <value>*</value>
+    </property>
+    
+    <property>
+      <name>security.client.protocol.acl</name>
+      <value>*</value>
+    </property>
+    
+    <property>
+      <name>security.masterregion.protocol.acl</name>
+      <value>*</value>
+    </property>
+    
+  </configuration>

http://git-wip-us.apache.org/repos/asf/incubator-kylin/blob/fe026669/examples/test_case_data/sandbox/hbase-site.xml
----------------------------------------------------------------------
diff --git a/examples/test_case_data/sandbox/hbase-site.xml b/examples/test_case_data/sandbox/hbase-site.xml
index aafb16e..9fd97c9 100644
--- a/examples/test_case_data/sandbox/hbase-site.xml
+++ b/examples/test_case_data/sandbox/hbase-site.xml
@@ -1,179 +1,194 @@
-<configuration>
-
-	<property>
-		<name>hbase.master.info.bindAddress</name>
-		<value>0.0.0.0</value>
-	</property>
-
-	<property>
-		<name>hbase.hstore.flush.retries.number</name>
-		<value>120</value>
-	</property>
-
-	<property>
-		<name>hbase.client.keyvalue.maxsize</name>
-		<value>10485760</value>
-	</property>
-
-	<property>
-		<name>hbase.regionserver.keytab.file</name>
-		<value>/etc/security/keytabs/hbase.service.keytab</value>
-	</property>
-
-	<property>
-		<name>hbase.hstore.compactionThreshold</name>
-		<value>3</value>
-	</property>
-
-	<property>
-		<name>hbase.zookeeper.property.clientPort</name>
-		<value>2181</value>
-	</property>
-
-	<property>
-		<name>hbase.regionserver.handler.count</name>
-		<value>60</value>
-	</property>
-
-	<property>
-		<name>hbase.regionserver.global.memstore.lowerLimit</name>
-		<value>0.38</value>
-	</property>
-
-	<property>
-		<name>hbase.master.kerberos.principal</name>
-		<value>hbase/_HOST@EXAMPLE.COM</value>
-	</property>
-
-	<property>
-		<name>hbase.hregion.memstore.block.multiplier</name>
-		<value>2</value>
-	</property>
-
-	<property>
-		<name>hbase.hregion.memstore.flush.size</name>
-		<value>134217728</value>
-	</property>
-
-	<property>
-		<name>hbase.superuser</name>
-		<value>hbase</value>
-	</property>
-
-	<property>
-		<name>hbase.rootdir</name>
-		<value>hdfs://sandbox.hortonworks.com:8020/apps/hbase/data
-		</value>
-	</property>
-
-	<property>
-		<name>hbase.regionserver.kerberos.principal</name>
-		<value>hbase/_HOST@EXAMPLE.COM</value>
-	</property>
-
-	<property>
-		<name>hbase.regionserver.global.memstore.upperLimit</name>
-		<value>0.4</value>
-	</property>
-
-	<property>
-		<name>zookeeper.session.timeout</name>
-		<value>30000</value>
-	</property>
-
-	<property>
-		<name>hbase.client.scanner.caching</name>
-		<value>100</value>
-	</property>
-
-	<property>
-		<name>hbase.tmp.dir</name>
-		<value>/tmp/hbase</value>
-	</property>
-
-	<property>
-		<name>hfile.block.cache.size</name>
-		<value>0.40</value>
-	</property>
-
-	<property>
-		<name>hbase.hregion.max.filesize</name>
-		<value>10737418240</value>
-	</property>
-
-	<property>
-		<name>hbase.security.authentication</name>
-		<value>simple</value>
-	</property>
-
-	<property>
-		<name>hbase.defaults.for.version.skip</name>
-		<value>true</value>
-	</property>
-
-	<property>
-		<name>hbase.master.info.port</name>
-		<value>60010</value>
-	</property>
-
-	<property>
-		<name>hbase.zookeeper.quorum</name>
-		<value>sandbox.hortonworks.com</value>
-	</property>
-
-	<property>
-		<name>hbase.regionserver.info.port</name>
-		<value>60030</value>
-	</property>
-
-	<property>
-		<name>zookeeper.znode.parent</name>
-		<value>/hbase-unsecure</value>
-	</property>
-
-	<property>
-		<name>hbase.hstore.blockingStoreFiles</name>
-		<value>10</value>
-	</property>
-
-	<property>
-		<name>hbase.hregion.majorcompaction</name>
-		<value>86400000</value>
-	</property>
-
-	<property>
-		<name>hbase.security.authorization</name>
-		<value>false</value>
-	</property>
-
-	<property>
-		<name>hbase.master.keytab.file</name>
-		<value>/etc/security/keytabs/hbase.service.keytab</value>
-	</property>
-
-	<property>
-		<name>hbase.local.dir</name>
-		<value>${hbase.tmp.dir}/local</value>
-	</property>
-
-	<property>
-		<name>hbase.cluster.distributed</name>
-		<value>true</value>
-	</property>
-
-	<property>
-		<name>hbase.hregion.memstore.mslab.enabled</name>
-		<value>true</value>
-	</property>
-
-	<property>
-		<name>dfs.domain.socket.path</name>
-		<value>/var/lib/hadoop-hdfs/dn_socket</value>
-	</property>
-
-	<property>
-		<name>hbase.zookeeper.useMulti</name>
-		<value>true</value>
-	</property>
-
-</configuration>
\ No newline at end of file
+<!--Mon Jan 19 07:29:07 2015-->
+    <configuration>
+    
+    <property>
+      <name>dfs.domain.socket.path</name>
+      <value>/var/lib/hadoop-hdfs/dn_socket</value>
+    </property>
+    
+    <property>
+      <name>hbase.client.keyvalue.maxsize</name>
+      <value>10485760</value>
+    </property>
+    
+    <property>
+      <name>hbase.client.scanner.caching</name>
+      <value>100</value>
+    </property>
+    
+    <property>
+      <name>hbase.cluster.distributed</name>
+      <value>true</value>
+    </property>
+    
+    <property>
+      <name>hbase.coprocessor.master.classes</name>
+      <value>com.xasecure.authorization.hbase.XaSecureAuthorizationCoprocessor</value>
+    </property>
+    
+    <property>
+      <name>hbase.coprocessor.region.classes</name>
+      <value>com.xasecure.authorization.hbase.XaSecureAuthorizationCoprocessor</value>
+    </property>
+    
+    <property>
+      <name>hbase.defaults.for.version.skip</name>
+      <value>true</value>
+    </property>
+    
+    <property>
+      <name>hbase.hregion.majorcompaction</name>
+      <value>604800000</value>
+    </property>
+    
+    <property>
+      <name>hbase.hregion.majorcompaction.jitter</name>
+      <value>0.50</value>
+    </property>
+    
+    <property>
+      <name>hbase.hregion.max.filesize</name>
+      <value>10737418240</value>
+    </property>
+    
+    <property>
+      <name>hbase.hregion.memstore.block.multiplier</name>
+      <value>4</value>
+    </property>
+    
+    <property>
+      <name>hbase.hregion.memstore.flush.size</name>
+      <value>134217728</value>
+    </property>
+    
+    <property>
+      <name>hbase.hregion.memstore.mslab.enabled</name>
+      <value>true</value>
+    </property>
+    
+    <property>
+      <name>hbase.hstore.blockingStoreFiles</name>
+      <value>10</value>
+    </property>
+    
+    <property>
+      <name>hbase.hstore.compactionThreshold</name>
+      <value>3</value>
+    </property>
+    
+    <property>
+      <name>hbase.local.dir</name>
+      <value>${hbase.tmp.dir}/local</value>
+    </property>
+    
+    <property>
+      <name>hbase.master.info.bindAddress</name>
+      <value>0.0.0.0</value>
+    </property>
+    
+    <property>
+      <name>hbase.master.info.port</name>
+      <value>60010</value>
+    </property>
+    
+    <property>
+      <name>hbase.master.port</name>
+      <value>60000</value>
+    </property>
+    
+    <property>
+      <name>hbase.regionserver.global.memstore.lowerLimit</name>
+      <value>0.38</value>
+    </property>
+    
+    <property>
+      <name>hbase.regionserver.global.memstore.upperLimit</name>
+      <value>0.4</value>
+    </property>
+    
+    <property>
+      <name>hbase.regionserver.handler.count</name>
+      <value>60</value>
+    </property>
+    
+    <property>
+      <name>hbase.regionserver.info.port</name>
+      <value>60030</value>
+    </property>
+    
+    <property>
+      <name>hbase.rootdir</name>
+      <value>hdfs://sandbox.hortonworks.com:8020/apps/hbase/data</value>
+    </property>
+    
+    <property>
+      <name>hbase.rpc.engine</name>
+      <value>org.apache.hadoop.hbase.ipc.SecureRpcEngine</value>
+    </property>
+    
+    <property>
+      <name>hbase.rpc.protection</name>
+      <value>PRIVACY</value>
+    </property>
+    
+    <property>
+      <name>hbase.security.authentication</name>
+      <value>simple</value>
+    </property>
+    
+    <property>
+      <name>hbase.security.authorization</name>
+      <value>true</value>
+    </property>
+    
+    <property>
+      <name>hbase.superuser</name>
+      <value>hbase</value>
+    </property>
+    
+    <property>
+      <name>hbase.tmp.dir</name>
+      <value>/hadoop/hbase</value>
+    </property>
+    
+    <property>
+      <name>hbase.zookeeper.property.clientPort</name>
+      <value>2181</value>
+    </property>
+    
+    <property>
+      <name>hbase.zookeeper.quorum</name>
+      <value>sandbox.hortonworks.com</value>
+    </property>
+    
+    <property>
+      <name>hbase.zookeeper.useMulti</name>
+      <value>true</value>
+    </property>
+    
+    <property>
+      <name>hbase_master_heapsize</name>
+      <value>250</value>
+    </property>
+    
+    <property>
+      <name>hbase_regionserver_heapsize</name>
+      <value>250</value>
+    </property>
+    
+    <property>
+      <name>hfile.block.cache.size</name>
+      <value>0.40</value>
+    </property>
+    
+    <property>
+      <name>zookeeper.session.timeout</name>
+      <value>30000</value>
+    </property>
+    
+    <property>
+      <name>zookeeper.znode.parent</name>
+      <value>/hbase-unsecure</value>
+    </property>
+    
+  </configuration>

http://git-wip-us.apache.org/repos/asf/incubator-kylin/blob/fe026669/examples/test_case_data/sandbox/hdfs-site.xml
----------------------------------------------------------------------
diff --git a/examples/test_case_data/sandbox/hdfs-site.xml b/examples/test_case_data/sandbox/hdfs-site.xml
index 5450d82..f8347dc 100644
--- a/examples/test_case_data/sandbox/hdfs-site.xml
+++ b/examples/test_case_data/sandbox/hdfs-site.xml
@@ -1,175 +1,265 @@
-<!--Mon Feb 10 22:50:51 2014-->
-  <configuration>
-    <property>
-    <name>dfs.namenode.stale.datanode.interval</name>
-    <value>30000</value>
-  </property>
-    <property>
-    <name>dfs.https.port</name>
-    <value>50470</value>
-  </property>
-    <property>
-    <name>dfs.datanode.address</name>
-    <value>0.0.0.0:50010</value>
-  </property>
-    <property>
-    <name>dfs.cluster.administrators</name>
-    <value>hdfs</value>
-  </property>
-    <property>
-    <name>dfs.namenode.checkpoint.dir</name>
-    <value>/hadoop/hdfs/namesecondary</value>
-  </property>
-    <property>
-    <name>dfs.client.block.write.replace-datanode-on-failure.policy</name>
-    <value>NEVER</value>
-  </property>
-    <property>
-    <name>dfs.client.read.shortcircuit</name>
-    <value>true</value>
-  </property>
-    <property>
-    <name>dfs.blockreport.initialDelay</name>
-    <value>120</value>
-  </property>
-    <property>
-    <name>dfs.namenode.avoid.read.stale.datanode</name>
-    <value>true</value>
-  </property>
-    <property>
-    <name>dfs.namenode.checkpoint.period</name>
-    <value>21600</value>
-  </property>
-    <property>
-    <name>dfs.namenode.secondary.http-address</name>
-    <value>sandbox.hortonworks.com:50090</value>
-  </property>
-    <property>
-    <name>dfs.support.append</name>
-    <value>true</value>
-  </property>
-    <property>
-    <name>dfs.heartbeat.interval</name>
-    <value>3</value>
-  </property>
-    <property>
-    <name>dfs.datanode.http.address</name>
-    <value>0.0.0.0:50075</value>
-  </property>
-    <property>
-    <name>dfs.replication</name>
-    <value>3</value>
-  </property>
-    <property>
-    <name>dfs.datanode.du.reserved</name>
-    <value>1073741824</value>
-  </property>
-    <property>
-    <name>dfs.permissions.superusergroup</name>
-    <value>hdfs</value>
-  </property>
-    <property>
-    <name>dfs.datanode.balance.bandwidthPerSec</name>
-    <value>6250000</value>
-  </property>
-    <property>
-    <name>dfs.blocksize</name>
-    <value>134217728</value>
-  </property>
-    <property>
-    <name>dfs.namenode.name.dir</name>
-    <value>/hadoop/hdfs/namenode</value>
-  </property>
-    <property>
-    <name>dfs.namenode.checkpoint.edits.dir</name>
-    <value>${dfs.namenode.checkpoint.dir}</value>
-  </property>
-    <property>
-    <name>dfs.namenode.accesstime.precision</name>
-    <value>0</value>
-  </property>
-    <property>
-    <name>dfs.datanode.ipc.address</name>
-    <value>0.0.0.0:8010</value>
-  </property>
-    <property>
-    <name>dfs.datanode.data.dir.perm</name>
-    <value>750</value>
-  </property>
-    <property>
-    <name>dfs.webhdfs.enabled</name>
-    <value>true</value>
-  </property>
-    <property>
-    <name>dfs.namenode.handler.count</name>
-    <value>5</value>
-  </property>
-    <property>
-    <name>dfs.client.read.shortcircuit.streams.cache.size</name>
-    <value>4096</value>
-  </property>
-    <property>
-    <name>dfs.replication.max</name>
-    <value>50</value>
-  </property>
-    <property>
-    <name>dfs.namenode.write.stale.datanode.ratio</name>
-    <value>1.0f</value>
-  </property>
-    <property>
-    <name>dfs.datanode.max.transfer.threads</name>
-    <value>1024</value>
-  </property>
-    <property>
-    <name>dfs.namenode.https-address</name>
-    <value>sandbox.hortonworks.com:50470</value>
-  </property>
-  <property>
-    <name>dfs.permissions</name>
-    <value>false</value>
-  </property>
-    <property>
-    <name>dfs.domain.socket.path</name>
-    <value>/var/lib/hadoop-hdfs/dn_socket</value>
-  </property>
-    <property>
-    <name>dfs.namenode.avoid.write.stale.datanode</name>
-    <value>true</value>
-  </property>
-    <property>
-    <name>dfs.block.access.token.enable</name>
-    <value>true</value>
-  </property>
-    <property>
-    <name>dfs.hosts.exclude</name>
-    <value>/etc/hadoop/conf/dfs.exclude</value>
-  </property>
-    <property>
-    <name>dfs.journalnode.http-address</name>
-    <value>0.0.0.0:8480</value>
-  </property>
-    <property>
-    <name>fs.permissions.umask-mode</name>
-    <value>022</value>
-  </property>
-    <property>
-    <name>dfs.datanode.data.dir</name>
-    <value>/hadoop/hdfs/data</value>
-  </property>
-    <property>
-    <name>dfs.journalnode.edits.dir</name>
-    <value>/grid/0/hdfs/journal</value>
-  </property>
-    <property>
-    <name>dfs.namenode.safemode.threshold-pct</name>
-    <value>1.0f</value>
-  </property>
-    <property>
-    <name>dfs.namenode.http-address</name>
-    <value>sandbox.hortonworks.com:50070</value>
-  </property>
-    <property>
-    <name>dfs.datanode.failed.volumes.tolerated</name>
-    <value>0</value>
-  </property>
-  </configuration>
\ No newline at end of file
+<!--Mon Jan 19 07:29:07 2015-->
+    <configuration>
+    
+    <property>
+      <name>dfs.block.access.token.enable</name>
+      <value>false</value>
+    </property>
+    
+    <property>
+      <name>dfs.block.size</name>
+      <value>34217472</value>
+    </property>
+    
+    <property>
+      <name>dfs.blockreport.initialDelay</name>
+      <value>120</value>
+    </property>
+    
+    <property>
+      <name>dfs.blocksize</name>
+      <value>134217728</value>
+    </property>
+    
+    <property>
+      <name>dfs.client.read.shortcircuit</name>
+      <value>true</value>
+    </property>
+    
+    <property>
+      <name>dfs.client.read.shortcircuit.streams.cache.size</name>
+      <value>4096</value>
+    </property>
+    
+    <property>
+      <name>dfs.cluster.administrators</name>
+      <value> hdfs</value>
+    </property>
+    
+    <property>
+      <name>dfs.datanode.address</name>
+      <value>0.0.0.0:50010</value>
+    </property>
+    
+    <property>
+      <name>dfs.datanode.balance.bandwidthPerSec</name>
+      <value>6250000</value>
+    </property>
+    
+    <property>
+      <name>dfs.datanode.data.dir</name>
+      <value>/hadoop/hdfs/data</value>
+      <final>true</final>
+    </property>
+    
+    <property>
+      <name>dfs.datanode.data.dir.perm</name>
+      <value>750</value>
+    </property>
+    
+    <property>
+      <name>dfs.datanode.du.reserved</name>
+      <value>1073741824</value>
+    </property>
+    
+    <property>
+      <name>dfs.datanode.failed.volumes.tolerated</name>
+      <value>0</value>
+      <final>true</final>
+    </property>
+    
+    <property>
+      <name>dfs.datanode.http.address</name>
+      <value>0.0.0.0:50075</value>
+    </property>
+    
+    <property>
+      <name>dfs.datanode.https.address</name>
+      <value>0.0.0.0:50475</value>
+    </property>
+    
+    <property>
+      <name>dfs.datanode.ipc.address</name>
+      <value>0.0.0.0:8010</value>
+    </property>
+    
+    <property>
+      <name>dfs.datanode.max.transfer.threads</name>
+      <value>1024</value>
+    </property>
+    
+    <property>
+      <name>dfs.datanode.max.xcievers</name>
+      <value>1024</value>
+    </property>
+    
+    <property>
+      <name>dfs.domain.socket.path</name>
+      <value>/var/lib/hadoop-hdfs/dn_socket</value>
+    </property>
+    
+    <property>
+      <name>dfs.heartbeat.interval</name>
+      <value>3</value>
+    </property>
+    
+    <property>
+      <name>dfs.hosts.exclude</name>
+      <value>/etc/hadoop/conf/dfs.exclude</value>
+    </property>
+    
+    <property>
+      <name>dfs.http.policy</name>
+      <value>HTTP_ONLY</value>
+    </property>
+    
+    <property>
+      <name>dfs.https.port</name>
+      <value>50470</value>
+    </property>
+    
+    <property>
+      <name>dfs.journalnode.edits.dir</name>
+      <value>/hadoop/hdfs/journalnode</value>
+    </property>
+    
+    <property>
+      <name>dfs.journalnode.http-address</name>
+      <value>0.0.0.0:8480</value>
+    </property>
+    
+    <property>
+      <name>dfs.namenode.accesstime.precision</name>
+      <value>3600000</value>
+    </property>
+    
+    <property>
+      <name>dfs.namenode.avoid.read.stale.datanode</name>
+      <value>true</value>
+    </property>
+    
+    <property>
+      <name>dfs.namenode.avoid.write.stale.datanode</name>
+      <value>true</value>
+    </property>
+    
+    <property>
+      <name>dfs.namenode.checkpoint.dir</name>
+      <value>/hadoop/hdfs/namesecondary</value>
+    </property>
+    
+    <property>
+      <name>dfs.namenode.checkpoint.edits.dir</name>
+      <value>${dfs.namenode.checkpoint.dir}</value>
+    </property>
+    
+    <property>
+      <name>dfs.namenode.checkpoint.period</name>
+      <value>21600</value>
+    </property>
+    
+    <property>
+      <name>dfs.namenode.checkpoint.txns</name>
+      <value>1000000</value>
+    </property>
+    
+    <property>
+      <name>dfs.namenode.handler.count</name>
+      <value>100</value>
+    </property>
+    
+    <property>
+      <name>dfs.namenode.http-address</name>
+      <value>sandbox.hortonworks.com:50070</value>
+      <final>true</final>
+    </property>
+    
+    <property>
+      <name>dfs.namenode.https-address</name>
+      <value>sandbox.hortonworks.com:50470</value>
+    </property>
+    
+    <property>
+      <name>dfs.namenode.name.dir</name>
+      <value>/hadoop/hdfs/namenode</value>
+      <final>true</final>
+    </property>
+    
+    <property>
+      <name>dfs.namenode.name.dir.restore</name>
+      <value>true</value>
+    </property>
+    
+    <property>
+      <name>dfs.namenode.safemode.threshold-pct</name>
+      <value>1.0f</value>
+    </property>
+    
+    <property>
+      <name>dfs.namenode.secondary.http-address</name>
+      <value>sandbox.hortonworks.com:50090</value>
+    </property>
+    
+    <property>
+      <name>dfs.namenode.stale.datanode.interval</name>
+      <value>30000</value>
+    </property>
+    
+    <property>
+      <name>dfs.namenode.startup.delay.block.deletion.sec</name>
+      <value>3600</value>
+    </property>
+    
+    <property>
+      <name>dfs.namenode.write.stale.datanode.ratio</name>
+      <value>1.0f</value>
+    </property>
+    
+    <property>
+      <name>dfs.nfs.exports.allowed.hosts</name>
+      <value>* rw</value>
+    </property>
+    
+    <property>
+      <name>dfs.nfs3.dump.dir</name>
+      <value>/tmp/.hdfs-nfs</value>
+    </property>
+    
+    <property>
+      <name>dfs.permissions.enabled</name>
+      <value>true</value>
+    </property>
+    
+    <property>
+      <name>dfs.permissions.superusergroup</name>
+      <value>hdfs</value>
+    </property>
+    
+    <property>
+      <name>dfs.replication</name>
+      <value>1</value>
+    </property>
+    
+    <property>
+      <name>dfs.replication.max</name>
+      <value>50</value>
+    </property>
+    
+    <property>
+      <name>dfs.support.append</name>
+      <value>true</value>
+      <final>true</final>
+    </property>
+    
+    <property>
+      <name>dfs.webhdfs.enabled</name>
+      <value>true</value>
+      <final>true</final>
+    </property>
+    
+    <property>
+      <name>fs.permissions.umask-mode</name>
+      <value>022</value>
+    </property>
+    
+  </configuration>

http://git-wip-us.apache.org/repos/asf/incubator-kylin/blob/fe026669/examples/test_case_data/sandbox/hive-site.xml
----------------------------------------------------------------------
diff --git a/examples/test_case_data/sandbox/hive-site.xml b/examples/test_case_data/sandbox/hive-site.xml
new file mode 100644
index 0000000..40afbf6
--- /dev/null
+++ b/examples/test_case_data/sandbox/hive-site.xml
@@ -0,0 +1,794 @@
+<!--Tue Dec 16 19:33:41 2014-->
+    <configuration>
+    
+    <property>
+      <name>ambari.hive.db.schema.name</name>
+      <value>hive</value>
+    </property>
+    
+    <property>
+      <name>datanucleus.cache.level2.type</name>
+      <value>none</value>
+    </property>
+    
+    <property>
+      <name>hive.auto.convert.join</name>
+      <value>true</value>
+    </property>
+    
+    <property>
+      <name>hive.auto.convert.join.noconditionaltask</name>
+      <value>true</value>
+    </property>
+    
+    <property>
+      <name>hive.auto.convert.join.noconditionaltask.size</name>
+      <value>1000000000</value>
+    </property>
+    
+    <property>
+      <name>hive.auto.convert.sortmerge.join</name>
+      <value>true</value>
+    </property>
+    
+    <property>
+      <name>hive.auto.convert.sortmerge.join.noconditionaltask</name>
+      <value>true</value>
+    </property>
+    
+    <property>
+      <name>hive.auto.convert.sortmerge.join.to.mapjoin</name>
+      <value>false</value>
+    </property>
+    
+    <property>
+      <name>hive.cbo.enable</name>
+      <value>true</value>
+    </property>
+    
+    <property>
+      <name>hive.cli.print.header</name>
+      <value>false</value>
+    </property>
+    
+    <property>
+      <name>hive.cluster.delegation.token.store.class</name>
+      <value>org.apache.hadoop.hive.thrift.ZooKeeperTokenStore</value>
+    </property>
+    
+    <property>
+      <name>hive.cluster.delegation.token.store.zookeeper.connectString</name>
+      <value>sandbox.hortonworks.com:2181</value>
+    </property>
+    
+    <property>
+      <name>hive.cluster.delegation.token.store.zookeeper.znode</name>
+      <value>/hive/cluster/delegation</value>
+    </property>
+    
+    <property>
+      <name>hive.compactor.abortedtxn.threshold</name>
+      <value>1000</value>
+    </property>
+    
+    <property>
+      <name>hive.compactor.check.interval</name>
+      <value>300s</value>
+    </property>
+    
+    <property>
+      <name>hive.compactor.delta.num.threshold</name>
+      <value>10</value>
+    </property>
+    
+    <property>
+      <name>hive.compactor.delta.pct.threshold</name>
+      <value>0.1f</value>
+    </property>
+    
+    <property>
+      <name>hive.compactor.initiator.on</name>
+      <value>true</value>
+    </property>
+    
+    <property>
+      <name>hive.compactor.worker.threads</name>
+      <value>0</value>
+    </property>
+    
+    <property>
+      <name>hive.compactor.worker.timeout</name>
+      <value>86400s</value>
+    </property>
+    
+    <property>
+      <name>hive.compute.query.using.stats</name>
+      <value>true</value>
+    </property>
+    
+    <property>
+      <name>hive.conf.restricted.list</name>
+      <value>hive.security.authenticator.manager,hive.security.authorization.manager,hive.users.in.admin.role</value>
+    </property>
+    
+    <property>
+      <name>hive.convert.join.bucket.mapjoin.tez</name>
+      <value>false</value>
+    </property>
+    
+    <property>
+      <name>hive.enforce.bucketing</name>
+      <value>true</value>
+    </property>
+    
+    <property>
+      <name>hive.enforce.sorting</name>
+      <value>true</value>
+    </property>
+    
+    <property>
+      <name>hive.enforce.sortmergebucketmapjoin</name>
+      <value>true</value>
+    </property>
+    
+    <property>
+      <name>hive.exec.compress.intermediate</name>
+      <value>false</value>
+    </property>
+    
+    <property>
+      <name>hive.exec.compress.output</name>
+      <value>false</value>
+    </property>
+    
+    <property>
+      <name>hive.exec.dynamic.partition</name>
+      <value>true</value>
+    </property>
+    
+    <property>
+      <name>hive.exec.dynamic.partition.mode</name>
+      <value>nonstrict</value>
+    </property>
+    
+    <property>
+      <name>hive.exec.failure.hooks</name>
+      <value>org.apache.hadoop.hive.ql.hooks.ATSHook</value>
+    </property>
+    
+    <property>
+      <name>hive.exec.max.created.files</name>
+      <value>100000</value>
+    </property>
+    
+    <property>
+      <name>hive.exec.max.dynamic.partitions</name>
+      <value>5000</value>
+    </property>
+    
+    <property>
+      <name>hive.exec.max.dynamic.partitions.pernode</name>
+      <value>2000</value>
+    </property>
+    
+    <property>
+      <name>hive.exec.orc.compression.strategy</name>
+      <value>SPEED</value>
+    </property>
+    
+    <property>
+      <name>hive.exec.orc.default.compress</name>
+      <value>ZLIB</value>
+    </property>
+    
+    <property>
+      <name>hive.exec.orc.default.stripe.size</name>
+      <value>67108864</value>
+    </property>
+    
+    <property>
+      <name>hive.exec.parallel</name>
+      <value>false</value>
+    </property>
+    
+    <property>
+      <name>hive.exec.parallel.thread.number</name>
+      <value>8</value>
+    </property>
+    
+    <property>
+      <name>hive.exec.post.hooks</name>
+      <value>org.apache.hadoop.hive.ql.hooks.ATSHook</value>
+    </property>
+    
+    <property>
+      <name>hive.exec.pre.hooks</name>
+      <value>org.apache.hadoop.hive.ql.hooks.ATSHook</value>
+    </property>
+    
+    <property>
+      <name>hive.exec.reducers.bytes.per.reducer</name>
+      <value>67108864</value>
+    </property>
+    
+    <property>
+      <name>hive.exec.reducers.max</name>
+      <value>1009</value>
+    </property>
+    
+    <property>
+      <name>hive.exec.scratchdir</name>
+      <value>/tmp/hive</value>
+    </property>
+    
+    <property>
+      <name>hive.exec.submit.local.task.via.child</name>
+      <value>true</value>
+    </property>
+    
+    <property>
+      <name>hive.exec.submitviachild</name>
+      <value>false</value>
+    </property>
+    
+    <property>
+      <name>hive.execution.engine</name>
+      <value>mr</value>
+    </property>
+    
+    <property>
+      <name>hive.fetch.task.aggr</name>
+      <value>false</value>
+    </property>
+    
+    <property>
+      <name>hive.fetch.task.conversion</name>
+      <value>more</value>
+    </property>
+    
+    <property>
+      <name>hive.fetch.task.conversion.threshold</name>
+      <value>1073741824</value>
+    </property>
+    
+    <property>
+      <name>hive.heapsize</name>
+      <value>250</value>
+    </property>
+    
+    <property>
+      <name>hive.limit.optimize.enable</name>
+      <value>true</value>
+    </property>
+    
+    <property>
+      <name>hive.limit.pushdown.memory.usage</name>
+      <value>0.04</value>
+    </property>
+    
+    <property>
+      <name>hive.map.aggr</name>
+      <value>true</value>
+    </property>
+    
+    <property>
+      <name>hive.map.aggr.hash.force.flush.memory.threshold</name>
+      <value>0.9</value>
+    </property>
+    
+    <property>
+      <name>hive.map.aggr.hash.min.reduction</name>
+      <value>0.5</value>
+    </property>
+    
+    <property>
+      <name>hive.map.aggr.hash.percentmemory</name>
+      <value>0.5</value>
+    </property>
+    
+    <property>
+      <name>hive.mapjoin.bucket.cache.size</name>
+      <value>10000</value>
+    </property>
+    
+    <property>
+      <name>hive.mapjoin.optimized.hashtable</name>
+      <value>true</value>
+    </property>
+    
+    <property>
+      <name>hive.mapred.reduce.tasks.speculative.execution</name>
+      <value>false</value>
+    </property>
+    
+    <property>
+      <name>hive.merge.mapfiles</name>
+      <value>true</value>
+    </property>
+    
+    <property>
+      <name>hive.merge.mapredfiles</name>
+      <value>false</value>
+    </property>
+    
+    <property>
+      <name>hive.merge.orcfile.stripe.level</name>
+      <value>true</value>
+    </property>
+    
+    <property>
+      <name>hive.merge.rcfile.block.level</name>
+      <value>true</value>
+    </property>
+    
+    <property>
+      <name>hive.merge.size.per.task</name>
+      <value>256000000</value>
+    </property>
+    
+    <property>
+      <name>hive.merge.smallfiles.avgsize</name>
+      <value>16000000</value>
+    </property>
+    
+    <property>
+      <name>hive.merge.tezfiles</name>
+      <value>false</value>
+    </property>
+    
+    <property>
+      <name>hive.metastore.authorization.storage.checks</name>
+      <value>false</value>
+    </property>
+    
+    <property>
+      <name>hive.metastore.cache.pinobjtypes</name>
+      <value>Table,Database,Type,FieldSchema,Order</value>
+    </property>
+    
+    <property>
+      <name>hive.metastore.client.connect.retry.delay</name>
+      <value>5s</value>
+    </property>
+    
+    <property>
+      <name>hive.metastore.client.socket.timeout</name>
+      <value>1800s</value>
+    </property>
+    
+    <property>
+      <name>hive.metastore.connect.retries</name>
+      <value>24</value>
+    </property>
+    
+    <property>
+      <name>hive.metastore.execute.setugi</name>
+      <value>true</value>
+    </property>
+    
+    <property>
+      <name>hive.metastore.failure.retries</name>
+      <value>24</value>
+    </property>
+    
+    <property>
+      <name>hive.metastore.kerberos.keytab.file</name>
+      <value>/etc/security/keytabs/hive.service.keytab</value>
+    </property>
+    
+    <property>
+      <name>hive.metastore.kerberos.principal</name>
+      <value>hive/_HOST@EXAMPLE.COM</value>
+    </property>
+    
+    <property>
+      <name>hive.metastore.pre.event.listeners</name>
+      <value>org.apache.hadoop.hive.ql.security.authorization.AuthorizationPreEventListener</value>
+    </property>
+    
+    <property>
+      <name>hive.metastore.sasl.enabled</name>
+      <value>false</value>
+    </property>
+    
+    <property>
+      <name>hive.metastore.server.max.threads</name>
+      <value>100000</value>
+    </property>
+    
+    <property>
+      <name>hive.metastore.uris</name>
+      <value>thrift://sandbox.hortonworks.com:9083</value>
+    </property>
+    
+    <property>
+      <name>hive.metastore.warehouse.dir</name>
+      <value>/apps/hive/warehouse</value>
+    </property>
+    
+    <property>
+      <name>hive.optimize.bucketmapjoin</name>
+      <value>true</value>
+    </property>
+    
+    <property>
+      <name>hive.optimize.bucketmapjoin.sortedmerge</name>
+      <value>false</value>
+    </property>
+    
+    <property>
+      <name>hive.optimize.constant.propagation</name>
+      <value>true</value>
+    </property>
+    
+    <property>
+      <name>hive.optimize.index.filter</name>
+      <value>true</value>
+    </property>
+    
+    <property>
+      <name>hive.optimize.mapjoin.mapreduce</name>
+      <value>true</value>
+    </property>
+    
+    <property>
+      <name>hive.optimize.metadataonly</name>
+      <value>true</value>
+    </property>
+    
+    <property>
+      <name>hive.optimize.null.scan</name>
+      <value>true</value>
+    </property>
+    
+    <property>
+      <name>hive.optimize.reducededuplication</name>
+      <value>true</value>
+    </property>
+    
+    <property>
+      <name>hive.optimize.reducededuplication.min.reducer</name>
+      <value>4</value>
+    </property>
+    
+    <property>
+      <name>hive.optimize.sort.dynamic.partition</name>
+      <value>false</value>
+    </property>
+    
+    <property>
+      <name>hive.orc.compute.splits.num.threads</name>
+      <value>10</value>
+    </property>
+    
+    <property>
+      <name>hive.orc.splits.include.file.footer</name>
+      <value>false</value>
+    </property>
+    
+    <property>
+      <name>hive.prewarm.enabled</name>
+      <value>false</value>
+    </property>
+    
+    <property>
+      <name>hive.prewarm.numcontainers</name>
+      <value>10</value>
+    </property>
+    
+    <property>
+      <name>hive.security.authenticator.manager</name>
+      <value>org.apache.hadoop.hive.ql.security.ProxyUserAuthenticator</value>
+    </property>
+    
+    <property>
+      <name>hive.security.authorization.enabled</name>
+      <value>false</value>
+    </property>
+    
+    <property>
+      <name>hive.security.authorization.manager</name>
+      <value>org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdConfOnlyAuthorizerFactory</value>
+    </property>
+    
+    <property>
+      <name>hive.security.metastore.authenticator.manager</name>
+      <value>org.apache.hadoop.hive.ql.security.HadoopDefaultMetastoreAuthenticator</value>
+    </property>
+    
+    <property>
+      <name>hive.security.metastore.authorization.auth.reads</name>
+      <value>true</value>
+    </property>
+    
+    <property>
+      <name>hive.security.metastore.authorization.manager</name>
+      <value>org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider,org.apache.hadoop.hive.ql.security.authorization.MetaStoreAuthzAPIAuthorizerEmbedOnly</value>
+    </property>
+    
+    <property>
+      <name>hive.server2.allow.user.substitution</name>
+      <value>true</value>
+    </property>
+    
+    <property>
+      <name>hive.server2.authentication</name>
+      <value>NONE</value>
+    </property>
+    
+    <property>
+      <name>hive.server2.authentication.spnego.keytab</name>
+      <value>HTTP/_HOST@EXAMPLE.COM</value>
+    </property>
+    
+    <property>
+      <name>hive.server2.authentication.spnego.principal</name>
+      <value>/etc/security/keytabs/spnego.service.keytab</value>
+    </property>
+    
+    <property>
+      <name>hive.server2.enable.doAs</name>
+      <value>false</value>
+    </property>
+    
+    <property>
+      <name>hive.server2.enable.impersonation</name>
+      <value>true</value>
+    </property>
+    
+    <property>
+      <name>hive.server2.logging.operation.enabled</name>
+      <value>true</value>
+    </property>
+    
+    <property>
+      <name>hive.server2.logging.operation.log.location</name>
+      <value>${system:java.io.tmpdir}/${system:user.name}/operation_logs</value>
+    </property>
+    
+    <property>
+      <name>hive.server2.support.dynamic.service.discovery</name>
+      <value>true</value>
+    </property>
+    
+    <property>
+      <name>hive.server2.table.type.mapping</name>
+      <value>CLASSIC</value>
+    </property>
+    
+    <property>
+      <name>hive.server2.tez.default.queues</name>
+      <value>default</value>
+    </property>
+    
+    <property>
+      <name>hive.server2.tez.initialize.default.sessions</name>
+      <value>false</value>
+    </property>
+    
+    <property>
+      <name>hive.server2.tez.sessions.per.default.queue</name>
+      <value>1</value>
+    </property>
+    
+    <property>
+      <name>hive.server2.thrift.http.path</name>
+      <value>cliservice</value>
+    </property>
+    
+    <property>
+      <name>hive.server2.thrift.http.port</name>
+      <value>10001</value>
+    </property>
+    
+    <property>
+      <name>hive.server2.thrift.max.worker.threads</name>
+      <value>500</value>
+    </property>
+    
+    <property>
+      <name>hive.server2.thrift.port</name>
+      <value>10000</value>
+    </property>
+    
+    <property>
+      <name>hive.server2.thrift.sasl.qop</name>
+      <value>auth</value>
+    </property>
+    
+    <property>
+      <name>hive.server2.transport.mode</name>
+      <value>binary</value>
+    </property>
+    
+    <property>
+      <name>hive.server2.use.SSL</name>
+      <value>false</value>
+    </property>
+    
+    <property>
+      <name>hive.server2.zookeeper.namespace</name>
+      <value>hiveserver2</value>
+    </property>
+    
+    <property>
+      <name>hive.smbjoin.cache.rows</name>
+      <value>10000</value>
+    </property>
+    
+    <property>
+      <name>hive.stats.autogather</name>
+      <value>true</value>
+    </property>
+    
+    <property>
+      <name>hive.stats.dbclass</name>
+      <value>fs</value>
+    </property>
+    
+    <property>
+      <name>hive.stats.fetch.column.stats</name>
+      <value>false</value>
+    </property>
+    
+    <property>
+      <name>hive.stats.fetch.partition.stats</name>
+      <value>true</value>
+    </property>
+    
+    <property>
+      <name>hive.support.concurrency</name>
+      <value>true</value>
+    </property>
+    
+    <property>
+      <name>hive.tez.auto.reducer.parallelism</name>
+      <value>false</value>
+    </property>
+    
+    <property>
+      <name>hive.tez.container.size</name>
+      <value>250</value>
+    </property>
+    
+    <property>
+      <name>hive.tez.cpu.vcores</name>
+      <value>-1</value>
+    </property>
+    
+    <property>
+      <name>hive.tez.dynamic.partition.pruning</name>
+      <value>true</value>
+    </property>
+    
+    <property>
+      <name>hive.tez.dynamic.partition.pruning.max.data.size</name>
+      <value>104857600</value>
+    </property>
+    
+    <property>
+      <name>hive.tez.dynamic.partition.pruning.max.event.size</name>
+      <value>1048576</value>
+    </property>
+    
+    <property>
+      <name>hive.tez.input.format</name>
+      <value>org.apache.hadoop.hive.ql.io.HiveInputFormat</value>
+    </property>
+    
+    <property>
+      <name>hive.tez.java.opts</name>
+      <value>-server -Xmx200m -Djava.net.preferIPv4Stack=true</value>
+    </property>
+    
+    <property>
+      <name>hive.tez.log.level</name>
+      <value>INFO</value>
+    </property>
+    
+    <property>
+      <name>hive.tez.max.partition.factor</name>
+      <value>2.0</value>
+    </property>
+    
+    <property>
+      <name>hive.tez.min.partition.factor</name>
+      <value>0.25</value>
+    </property>
+    
+    <property>
+      <name>hive.tez.smb.number.waves</name>
+      <value>0.5</value>
+    </property>
+    
+    <property>
+      <name>hive.txn.manager</name>
+      <value>org.apache.hadoop.hive.ql.lockmgr.DbTxnManager</value>
+    </property>
+    
+    <property>
+      <name>hive.txn.max.open.batch</name>
+      <value>1000</value>
+    </property>
+    
+    <property>
+      <name>hive.txn.timeout</name>
+      <value>300</value>
+    </property>
+    
+    <property>
+      <name>hive.user.install.directory</name>
+      <value>/user/</value>
+    </property>
+    
+    <property>
+      <name>hive.users.in.admin.role</name>
+      <value>hue,hive</value>
+    </property>
+    
+    <property>
+      <name>hive.vectorized.execution.enabled</name>
+      <value>true</value>
+    </property>
+    
+    <property>
+      <name>hive.vectorized.execution.reduce.enabled</name>
+      <value>false</value>
+    </property>
+    
+    <property>
+      <name>hive.vectorized.groupby.checkinterval</name>
+      <value>4096</value>
+    </property>
+    
+    <property>
+      <name>hive.vectorized.groupby.flush.percent</name>
+      <value>0.1</value>
+    </property>
+    
+    <property>
+      <name>hive.vectorized.groupby.maxentries</name>
+      <value>100000</value>
+    </property>
+    
+    <property>
+      <name>hive.zookeeper.client.port</name>
+      <value>2181</value>
+    </property>
+    
+    <property>
+      <name>hive.zookeeper.namespace</name>
+      <value>hive_zookeeper_namespace</value>
+    </property>
+    
+    <property>
+      <name>hive.zookeeper.quorum</name>
+      <value>sandbox.hortonworks.com:2181</value>
+    </property>
+    
+    <property>
+      <name>hive_metastore_user_passwd</name>
+      <value>hive</value>
+    </property>
+    
+    <property>
+      <name>javax.jdo.option.ConnectionDriverName</name>
+      <value>com.mysql.jdbc.Driver</value>
+    </property>
+    
+    <property>
+      <name>javax.jdo.option.ConnectionPassword</name>
+      <value>hive</value>
+    </property>
+    
+    <property>
+      <name>javax.jdo.option.ConnectionURL</name>
+      <value>jdbc:mysql://sandbox.hortonworks.com/hive?createDatabaseIfNotExist=true</value>
+    </property>
+    
+    <property>
+      <name>javax.jdo.option.ConnectionUserName</name>
+      <value>hive</value>
+    </property>
+    
+  </configuration>

http://git-wip-us.apache.org/repos/asf/incubator-kylin/blob/fe026669/examples/test_case_data/sandbox/httpfs-site.xml
----------------------------------------------------------------------
diff --git a/examples/test_case_data/sandbox/httpfs-site.xml b/examples/test_case_data/sandbox/httpfs-site.xml
deleted file mode 100644
index f27544f..0000000
--- a/examples/test_case_data/sandbox/httpfs-site.xml
+++ /dev/null
@@ -1,17 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-  http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-<configuration>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-kylin/blob/fe026669/examples/test_case_data/sandbox/kylin.properties
----------------------------------------------------------------------
diff --git a/examples/test_case_data/sandbox/kylin.properties b/examples/test_case_data/sandbox/kylin.properties
index 5317b27..c6b5e97 100644
--- a/examples/test_case_data/sandbox/kylin.properties
+++ b/examples/test_case_data/sandbox/kylin.properties
@@ -1,7 +1,7 @@
 ## Config for Kylin Engine ##
 
 # List of web servers in use, this enables one web server instance to sync up with other servers.
-kylin.rest.servers=sandbox:7070
+kylin.rest.servers=localhost:7070
 
 # The metadata store in hbase
 kylin.metadata.url=kylin_metadata_qa@hbase

http://git-wip-us.apache.org/repos/asf/incubator-kylin/blob/fe026669/examples/test_case_data/sandbox/mapred-site.xml
----------------------------------------------------------------------
diff --git a/examples/test_case_data/sandbox/mapred-site.xml b/examples/test_case_data/sandbox/mapred-site.xml
index 355f64b..ee3837c 100644
--- a/examples/test_case_data/sandbox/mapred-site.xml
+++ b/examples/test_case_data/sandbox/mapred-site.xml
@@ -1,155 +1,239 @@
-<!--Mon Feb 10 22:50:51 2014-->
-  <configuration>
-    <property>
-    <name>mapreduce.jobhistory.webapp.address</name>
-    <value>sandbox.hortonworks.com:19888</value>
-  </property>
-    <property>
-    <name>mapreduce.shuffle.port</name>
-    <value>13562</value>
-  </property>
-    <property>
-    <name>mapreduce.cluster.administrators</name>
-    <value> hadoop</value>
-  </property>
-    <property>
-    <name>mapreduce.task.timeout</name>
-    <value>300000</value>
-  </property>
-    <property>
-    <name>mapreduce.map.sort.spill.percent</name>
-    <value>0.7</value>
-  </property>
-    <property>
-    <name>mapreduce.admin.user.env</name>
-    <value>LD_LIBRARY_PATH=/usr/lib/hadoop/lib/native:/usr/lib/hadoop/lib/native/`$JAVA_HOME/bin/java -d32 -version &amp;&gt; /dev/null;if [ $? -eq 0 ]; then echo Linux-i386-32; else echo Linux-amd64-64;fi`</value>
-  </property>
-    <property>
-    <name>mapreduce.reduce.shuffle.input.buffer.percent</name>
-    <value>0.7</value>
-  </property>
-    <property>
-    <name>mapreduce.admin.reduce.child.java.opts</name>
-    <value>-Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN</value>
-  </property>
-    <property>
-    <name>mapreduce.reduce.input.buffer.percent</name>
-    <value>0.0</value>
-  </property>
-    <property>
-    <name>mapreduce.reduce.shuffle.parallelcopies</name>
-    <value>30</value>
-  </property>
-    <property>
-    <name>mapreduce.output.fileoutputformat.compress.type</name>
-    <value>BLOCK</value>
-  </property>
-    <property>
-    <name>mapreduce.reduce.memory.mb</name>
-    <value>1024</value>
-  </property>
-    <property>
-    <name>mapreduce.task.io.sort.mb</name>
-    <value>200</value>
-  </property>
-    <property>
-    <name>mapreduce.job.reduce.slowstart.completedmaps</name>
-    <value>0.05</value>
-  </property>
-    <property>
-    <name>yarn.app.mapreduce.am.log.level</name>
-    <value>INFO</value>
-  </property>
-    <property>
-    <name>mapreduce.map.java.opts</name>
-    <value>-Xmx512m</value>
-  </property>
-    <property>
-    <name>mapreduce.reduce.log.level</name>
-    <value>INFO</value>
-  </property>
-    <property>
-    <name>mapreduce.map.log.level</name>
-    <value>INFO</value>
-  </property>
-    <property>
-    <name>mapreduce.am.max-attempts</name>
-    <value>2</value>
-  </property>
-    <property>
-    <name>yarn.app.mapreduce.am.staging-dir</name>
-    <value>/user</value>
-  </property>
-    <property>
-    <name>mapreduce.task.io.sort.factor</name>
-    <value>100</value>
-  </property>
-    <property>
-    <name>mapreduce.framework.name</name>
-    <value>yarn</value>
-  </property>
-    <property>
-    <name>mapreduce.reduce.shuffle.merge.percent</name>
-    <value>0.66</value>
-  </property>
-    <property>
-    <name>mapreduce.reduce.speculative</name>
-    <value>false</value>
-  </property>
-  <property>
-    <name>mapreduce.application.classpath</name>
-    <value>/tmp/kylin/*,/usr/lib/hbase/lib/*</value>
-  </property>
-  <property>
-    <name>mapreduce.map.output.compress</name>
-    <value>false</value>
-  </property>
-    <property>
-    <name>mapreduce.map.speculative</name>
-    <value>false</value>
-  </property>
-    <property>
-    <name>mapreduce.map.memory.mb</name>
-    <value>1024</value>
-  </property>
-    <property>
-    <name>yarn.app.mapreduce.am.admin-command-opts</name>
-    <value>-Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN</value>
-  </property>
-    <property>
-    <name>mapreduce.jobhistory.address</name>
-    <value>sandbox.hortonworks.com:10020</value>
-  </property>
-    <property>
-    <name>mapreduce.jobhistory.done-dir</name>
-    <value>/mr-history/done</value>
-  </property>
-    <property>
-    <name>mapreduce.output.fileoutputformat.compress</name>
-    <value>false</value>
-  </property>
-    <property>
-    <name>yarn.app.mapreduce.am.command-opts</name>
-    <value>-Xmx312m</value>
-  </property>
-    <property>
-    <name>mapreduce.reduce.java.opts</name>
-    <value>-Xmx512m</value>
-  </property>
-    <property>
-    <name>yarn.app.mapreduce.am.resource.mb</name>
-    <value>250</value>
-  </property>
-    <property>
-    <name>mapreduce.admin.map.child.java.opts</name>
-    <value>-Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN</value>
-  </property>
-    <property>
-    <name>mapreduce.jobhistory.intermediate-done-dir</name>
-    <value>/mr-history/tmp</value>
-  </property>
-  <property>
-	<name>mapreduce.app-submission.cross-platform</name>
-	<value>true</value>
-  </property>
-  </configuration>
+<!--Tue Dec 16 19:08:08 2014-->
+    <configuration>
+    
+    <property>
+      <name>io.sort.mb</name>
+      <value>64</value>
+    </property>
+    
+    <property>
+      <name>mapred.child.java.opts</name>
+      <value>-Xmx200m</value>
+    </property>
+    
+    <property>
+      <name>mapred.job.map.memory.mb</name>
+      <value>250</value>
+    </property>
+    
+    <property>
+      <name>mapred.job.reduce.memory.mb</name>
+      <value>250</value>
+    </property>
+    
+    <property>
+      <name>mapreduce.admin.map.child.java.opts</name>
+      <value>-server -XX:NewRatio=8 -Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}</value>
+    </property>
+    
+    <property>
+      <name>mapreduce.admin.reduce.child.java.opts</name>
+      <value>-server -XX:NewRatio=8 -Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}</value>
+    </property>
+    
+    <property>
+      <name>mapreduce.admin.user.env</name>
+      <value>LD_LIBRARY_PATH=/usr/hdp/${hdp.version}/hadoop/lib/native:/usr/hdp/${hdp.version}/hadoop/lib/native/Linux-amd64-64</value>
+    </property>
+    
+    <property>
+      <name>mapreduce.am.max-attempts</name>
+      <value>2</value>
+    </property>
+    
+    <property>
+      <name>mapreduce.application.classpath</name>
+      <value>/tmp/kylin/*,$HADOOP_CONF_DIR,/usr/hdp/current/hive-client/conf/,/usr/hdp/${hdp.version}/hive/lib/hive-metastore.jar,/usr/hdp/${hdp.version}/hive/lib/hive-exec.jar,/usr/hdp/${hdp.version}/hive-hcatalog/share/hcatalog/*,$PWD/mr-framework/hadoop/share/hadoop/mapreduce/*,$PWD/mr-framework/hadoop/share/hadoop/mapreduce/lib/*,$PWD/mr-framework/hadoop/share/hadoop/common/*,$PWD/mr-framework/hadoop/share/hadoop/common/lib/*,$PWD/mr-framework/hadoop/share/hadoop/yarn/*,$PWD/mr-framework/hadoop/share/hadoop/yarn/lib/*,$PWD/mr-framework/hadoop/share/hadoop/hdfs/*,$PWD/mr-framework/hadoop/share/hadoop/hdfs/lib/*,/usr/hdp/${hdp.version}/hadoop/lib/hadoop-lzo-0.6.0.${hdp.version}.jar,/etc/hadoop/conf/secure</value>
+    </property>
+    
+    <property>
+      <name>mapreduce.application.framework.path</name>
+      <value>/hdp/apps/${hdp.version}/mapreduce/mapreduce.tar.gz#mr-framework</value>
+    </property>
+    
+    <property>
+      <name>mapreduce.cluster.administrators</name>
+      <value> hadoop</value>
+    </property>
+    
+    <property>
+      <name>mapreduce.framework.name</name>
+      <value>yarn</value>
+    </property>
+    
+    <property>
+      <name>mapreduce.job.emit-timeline-data</name>
+      <value>false</value>
+    </property>
+    
+    <property>
+      <name>mapreduce.job.reduce.slowstart.completedmaps</name>
+      <value>0.05</value>
+    </property>
+    
+    <property>
+      <name>mapreduce.jobhistory.address</name>
+      <value>sandbox.hortonworks.com:10020</value>
+    </property>
+    
+    <property>
+      <name>mapreduce.jobhistory.bind-host</name>
+      <value>0.0.0.0</value>
+    </property>
+    
+    <property>
+      <name>mapreduce.jobhistory.done-dir</name>
+      <value>/mr-history/done</value>
+    </property>
+    
+    <property>
+      <name>mapreduce.jobhistory.intermediate-done-dir</name>
+      <value>/mr-history/tmp</value>
+    </property>
+    
+    <property>
+      <name>mapreduce.jobhistory.webapp.address</name>
+      <value>sandbox.hortonworks.com:19888</value>
+    </property>
+    
+    <property>
+      <name>mapreduce.map.java.opts</name>
+      <value>-Xmx200m</value>
+    </property>
+    
+    <property>
+      <name>mapreduce.map.log.level</name>
+      <value>INFO</value>
+    </property>
+    
+    <property>
+      <name>mapreduce.map.memory.mb</name>
+      <value>250</value>
+    </property>
+    
+    <property>
+      <name>mapreduce.map.output.compress</name>
+      <value>false</value>
+    </property>
+    
+    <property>
+      <name>mapreduce.map.sort.spill.percent</name>
+      <value>0.7</value>
+    </property>
+    
+    <property>
+      <name>mapreduce.map.speculative</name>
+      <value>false</value>
+    </property>
+    
+    <property>
+      <name>mapreduce.output.fileoutputformat.compress</name>
+      <value>false</value>
+    </property>
+    
+    <property>
+      <name>mapreduce.output.fileoutputformat.compress.type</name>
+      <value>BLOCK</value>
+    </property>
+    
+    <property>
+      <name>mapreduce.reduce.input.buffer.percent</name>
+      <value>0.0</value>
+    </property>
+    
+    <property>
+      <name>mapreduce.reduce.java.opts</name>
+      <value>-Xmx200m</value>
+    </property>
+    
+    <property>
+      <name>mapreduce.reduce.log.level</name>
+      <value>INFO</value>
+    </property>
+    
+    <property>
+      <name>mapreduce.reduce.memory.mb</name>
+      <value>250</value>
+    </property>
+    
+    <property>
+      <name>mapreduce.reduce.shuffle.fetch.retry.enabled</name>
+      <value>1</value>
+    </property>
+    
+    <property>
+      <name>mapreduce.reduce.shuffle.fetch.retry.interval-ms</name>
+      <value>1000</value>
+    </property>
+    
+    <property>
+      <name>mapreduce.reduce.shuffle.fetch.retry.timeout-ms</name>
+      <value>30000</value>
+    </property>
+    
+    <property>
+      <name>mapreduce.reduce.shuffle.input.buffer.percent</name>
+      <value>0.7</value>
+    </property>
+    
+    <property>
+      <name>mapreduce.reduce.shuffle.merge.percent</name>
+      <value>0.66</value>
+    </property>
+    
+    <property>
+      <name>mapreduce.reduce.shuffle.parallelcopies</name>
+      <value>30</value>
+    </property>
+    
+    <property>
+      <name>mapreduce.reduce.speculative</name>
+      <value>false</value>
+    </property>
+    
+    <property>
+      <name>mapreduce.shuffle.port</name>
+      <value>13562</value>
+    </property>
+    
+    <property>
+      <name>mapreduce.task.io.sort.factor</name>
+      <value>100</value>
+    </property>
+    
+    <property>
+      <name>mapreduce.task.io.sort.mb</name>
+      <value>64</value>
+    </property>
+    
+    <property>
+      <name>mapreduce.task.timeout</name>
+      <value>300000</value>
+    </property>
+    
+    <property>
+      <name>yarn.app.mapreduce.am.admin-command-opts</name>
+      <value>-Dhdp.version=${hdp.version}</value>
+    </property>
+    
+    <property>
+      <name>yarn.app.mapreduce.am.command-opts</name>
+      <value>-Xmx200m</value>
+    </property>
+    
+    <property>
+      <name>yarn.app.mapreduce.am.log.level</name>
+      <value>INFO</value>
+    </property>
+    
+    <property>
+      <name>yarn.app.mapreduce.am.resource.mb</name>
+      <value>250</value>
+    </property>
+    
+    <property>
+      <name>yarn.app.mapreduce.am.staging-dir</name>
+      <value>/user</value>
+    </property>
+    
+  </configuration>