You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by ma...@apache.org on 2014/08/08 19:10:39 UTC

[2/4] AMBARI-5934. Provide ability to rebalance HDFS.

http://git-wip-us.apache.org/repos/asf/ambari/blob/cb662f49/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/balancer-emulator/balancer.log
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/balancer-emulator/balancer.log b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/balancer-emulator/balancer.log
new file mode 100644
index 0000000..2010c02
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/balancer-emulator/balancer.log
@@ -0,0 +1,29 @@
+Time Stamp               Iteration#  Bytes Already Moved  Bytes Left To Move  Bytes Being Moved
+Jul 28, 2014 5:01:49 PM           0                  0 B             5.74 GB            9.79 GB
+Jul 28, 2014 5:03:00 PM           1                  0 B             5.58 GB            9.79 GB
+Jul 28, 2014 5:04:07 PM           2                  0 B             5.40 GB            9.79 GB
+Jul 28, 2014 5:05:14 PM           3                  0 B             5.06 GB            9.79 GB
+Jul 28, 2014 5:05:50 PM           4                  0 B             5.06 GB            9.79 GB
+Jul 28, 2014 5:06:56 PM           5                  0 B             4.81 GB            9.79 GB
+Jul 28, 2014 5:07:33 PM           6                  0 B             4.80 GB            9.79 GB
+Jul 28, 2014 5:09:11 PM           7                  0 B             4.29 GB            9.79 GB
+Jul 28, 2014 5:09:47 PM           8                  0 B             4.29 GB            9.79 GB
+Jul 28, 2014 5:11:24 PM           9                  0 B             3.89 GB            9.79 GB
+Jul 28, 2014 5:12:00 PM          10                  0 B             3.86 GB            9.79 GB
+Jul 28, 2014 5:13:37 PM          11                  0 B             3.23 GB            9.79 GB
+Jul 28, 2014 5:15:13 PM          12                  0 B             2.53 GB            9.79 GB
+Jul 28, 2014 5:15:49 PM          13                  0 B             2.52 GB            9.79 GB
+Jul 28, 2014 5:16:25 PM          14                  0 B             2.51 GB            9.79 GB
+Jul 28, 2014 5:17:01 PM          15                  0 B             2.39 GB            9.79 GB
+Jul 28, 2014 5:17:37 PM          16                  0 B             2.38 GB            9.79 GB
+Jul 28, 2014 5:18:14 PM          17                  0 B             2.31 GB            9.79 GB
+Jul 28, 2014 5:18:50 PM          18                  0 B             2.30 GB            9.79 GB
+Jul 28, 2014 5:19:26 PM          19                  0 B             2.21 GB            9.79 GB
+Jul 28, 2014 5:20:02 PM          20                  0 B             2.10 GB            9.79 GB
+Jul 28, 2014 5:20:38 PM          21                  0 B             2.06 GB            9.79 GB
+Jul 28, 2014 5:22:14 PM          22                  0 B             1.68 GB            9.79 GB
+Jul 28, 2014 5:23:20 PM          23                  0 B             1.00 GB            9.79 GB
+Jul 28, 2014 5:23:56 PM          24                  0 B          1016.16 MB            9.79 GB
+Jul 28, 2014 5:25:33 PM          25                  0 B            30.55 MB            9.79 GB
+The cluster is balanced. Exiting...
+Balancing took 24.858033333333335 minutes

http://git-wip-us.apache.org/repos/asf/ambari/blob/cb662f49/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/balancer-emulator/hdfs-command.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/balancer-emulator/hdfs-command.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/balancer-emulator/hdfs-command.py
new file mode 100644
index 0000000..df173fe
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/balancer-emulator/hdfs-command.py
@@ -0,0 +1,45 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+import time
+import sys
+from threading import Thread
+
+
+def write_function(path, handle, interval):
+  with open(path) as f:
+      for line in f:
+          handle.write(line)
+          handle.flush()
+          time.sleep(interval)
+          
+thread = Thread(target =  write_function, args = ('balancer.log', sys.stdout, 1))
+thread.start()
+
+threaderr = Thread(target =  write_function, args = ('balancer-err.log', sys.stderr, 0.3))
+threaderr.start()
+
+thread.join()  
+
+
+def rebalancer_out():
+  write_function('balancer.log', sys.stdout)
+  
+def rebalancer_err():
+  write_function('balancer-err.log', sys.stdout)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/cb662f49/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs_rebalance.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs_rebalance.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs_rebalance.py
new file mode 100644
index 0000000..1dc545e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs_rebalance.py
@@ -0,0 +1,130 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import re
+
+class HdfsParser():
+  def __init__(self):
+    self.initialLine = None
+    self.state = None
+  
+  def parseLine(self, line):
+    hdfsLine = HdfsLine()
+    type, matcher = hdfsLine.recognizeType(line)
+    if(type == HdfsLine.LineType.HeaderStart):
+      self.state = 'PROCESS_STARTED'
+    elif (type == HdfsLine.LineType.Progress):
+      self.state = 'PROGRESS'
+      hdfsLine.parseProgressLog(line, matcher)
+      if(self.initialLine == None): self.initialLine = hdfsLine
+      
+      return hdfsLine 
+    elif (type == HdfsLine.LineType.ProgressEnd):
+      self.state = 'PROCESS_FINISED'
+    return None
+    
+class HdfsLine():
+  
+  class LineType:
+    HeaderStart, Progress, ProgressEnd, Unknown = range(4)
+  
+  
+  MEMORY_SUFFIX = ['B','KB','MB','GB','TB','PB','EB']
+  MEMORY_PATTERN = '(?P<memmult_%d>(?P<memory_%d>(\d+)(.|,)?(\d+)?) (?P<mult_%d>'+"|".join(MEMORY_SUFFIX)+'))'
+  
+  HEADER_BEGIN_PATTERN = re.compile('Time Stamp\w+Iteration#\w+Bytes Already Moved\w+Bytes Left To Move\w+Bytes Being Moved')
+  PROGRESS_PATTERN = re.compile(
+                            "(?P<date>.*?)\s+" + 
+                            "(?P<iteration>\d+)\s+" + 
+                            MEMORY_PATTERN % (1,1,1) + "\s+" + 
+                            MEMORY_PATTERN % (2,2,2) + "\s+" +
+                            MEMORY_PATTERN % (3,3,3)
+                            )
+  PROGRESS_END_PATTERN = re.compile('(The cluster is balanced. Exiting...|The cluster is balanced. Exiting...)')
+  
+  def __init__(self):
+    self.date = None
+    self.iteration = None
+    self.bytesAlreadyMoved = None 
+    self.bytesLeftToMove = None
+    self.bytesBeingMoved = None 
+    self.bytesAlreadyMovedStr = None 
+    self.bytesLeftToMoveStr = None
+    self.bytesBeingMovedStr = None 
+  
+  def recognizeType(self, line):
+    for (type, pattern) in (
+                            (HdfsLine.LineType.HeaderStart, self.HEADER_BEGIN_PATTERN),
+                            (HdfsLine.LineType.Progress, self.PROGRESS_PATTERN), 
+                            (HdfsLine.LineType.ProgressEnd, self.PROGRESS_END_PATTERN)
+                            ):
+      m = re.match(pattern, line)
+      if m:
+        return type, m
+    return HdfsLine.LineType.Unknown, None
+    
+  def parseProgressLog(self, line, m):
+    '''
+    Parse the line of 'hdfs rebalancer' output. The example output being parsed:
+    
+    Time Stamp               Iteration#  Bytes Already Moved  Bytes Left To Move  Bytes Being Moved
+    Jul 28, 2014 5:01:49 PM           0                  0 B             5.74 GB            9.79 GB
+    Jul 28, 2014 5:03:00 PM           1                  0 B             5.58 GB            9.79 GB
+    
+    Throws AmbariException in case of parsing errors
+
+    '''
+    m = re.match(self.PROGRESS_PATTERN, line)
+    if m:
+      self.date = m.group('date') 
+      self.iteration = int(m.group('iteration'))
+       
+      self.bytesAlreadyMoved = self.parseMemory(m.group('memory_1'), m.group('mult_1')) 
+      self.bytesLeftToMove = self.parseMemory(m.group('memory_2'), m.group('mult_2')) 
+      self.bytesBeingMoved = self.parseMemory(m.group('memory_3'), m.group('mult_3'))
+       
+      self.bytesAlreadyMovedStr = m.group('memmult_1') 
+      self.bytesLeftToMoveStr = m.group('memmult_2')
+      self.bytesBeingMovedStr = m.group('memmult_3') 
+    else:
+      raise AmbariException("Failed to parse line [%s]") 
+  
+  def parseMemory(self, memorySize, multiplier_type):
+    try:
+      factor = self.MEMORY_SUFFIX.index(multiplier_type)
+    except ValueError:
+      raise AmbariException("Failed to memory value [%s %s]" % (memorySize, multiplier_type))
+    
+    return float(memorySize) * (1024 ** factor)
+  def toJson(self):
+    return {
+            'timeStamp' : self.date,
+            'iteration' : self.iteration,
+            
+            'dataMoved': self.bytesAlreadyMovedStr,
+            'dataLeft' : self.bytesLeftToMoveStr,
+            'dataBeingMoved': self.bytesBeingMovedStr,
+            
+            'bytesMoved': self.bytesAlreadyMoved,
+            'bytesLeft' : self.bytesLeftToMove,
+            'bytesBeingMoved': self.bytesBeingMoved,
+          }
+  def __str__(self):
+    return "[ date=%s,iteration=%d, bytesAlreadyMoved=%d, bytesLeftToMove=%d, bytesBeingMoved=%d]"%(self.date, self.iteration, self.bytesAlreadyMoved, self.bytesLeftToMove, self.bytesBeingMoved)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/cb662f49/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/namenode.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/namenode.py
index 1978881..3b320d1 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/namenode.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/namenode.py
@@ -20,6 +20,13 @@ limitations under the License.
 from resource_management import *
 from hdfs_namenode import namenode
 from hdfs import hdfs
+import time
+import json
+import subprocess
+import hdfs_rebalance
+import sys
+import os
+from datetime import datetime
 
 
 class NameNode(Script):
@@ -66,6 +73,51 @@ class NameNode(Script):
     env.set_params(params)
     namenode(action="decommission")
     pass
+  
+  def rebalancehdfs(self, env):
+    import params
+    env.set_params(params)
 
+    name_node_parameters = json.loads( params.name_node_params )
+    threshold = name_node_parameters['threshold']
+    print "Starting balancer with threshold = %s" % threshold
+      
+    def calculateCompletePercent(first, current):
+      return 1.0 - current.bytesLeftToMove/first.bytesLeftToMove
+    
+    
+    def startRebalancingProcess(threshold):
+      rebalanceCommand = format('hadoop --config {hadoop_conf_dir} balancer -threshold {threshold}')
+      return ['su','-',params.hdfs_user,'-c', rebalanceCommand]
+    
+    command = startRebalancingProcess(threshold)
+    
+    basedir = os.path.join(env.config.basedir, 'scripts')
+    if(threshold == 'DEBUG'): #FIXME TODO remove this on PROD
+      basedir = os.path.join(env.config.basedir, 'scripts', 'balancer-emulator')
+      command = ['python','hdfs-command.py']
+    
+    print "Executing command %s" % command
+    
+    parser = hdfs_rebalance.HdfsParser()
+    proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
+                          shell=False,
+                          close_fds=True,
+                          cwd=basedir
+                          )
+    for line in iter(proc.stdout.readline, ''):
+      sys.stdout.write('[balancer] %s %s' % (str(datetime.now()), line ))
+      pl = parser.parseLine(line)
+      if pl:
+        res = pl.toJson()
+        res['completePercent'] = calculateCompletePercent(parser.initialLine, pl) 
+        
+        self.put_structured_out(res)
+      elif parser.state == 'PROCESS_FINISED' : 
+        sys.stdout.write('[balancer] %s %s' % (str(datetime.now()), 'Process is finished' ))
+        self.put_structured_out({'completePercent' : 1})
+        break
+      
+      
 if __name__ == "__main__":
   NameNode().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/cb662f49/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/params.py
index 8f6c7e4..9a5e393 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/params.py
@@ -184,3 +184,4 @@ if not "com.hadoop.compression.lzo" in io_compression_codecs:
   exclude_packages = ["lzo", "hadoop-lzo", "hadoop-lzo-native", "liblzo2-2"]
 else:
   exclude_packages = []
+name_node_params = default("/commandParams/namenode", None)

http://git-wip-us.apache.org/repos/asf/ambari/blob/cb662f49/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionScheduler.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionScheduler.java b/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionScheduler.java
index 17e8724..60ed592 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionScheduler.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionScheduler.java
@@ -46,7 +46,9 @@ import org.apache.ambari.server.agent.ActionQueue;
 import org.apache.ambari.server.agent.AgentCommand;
 import org.apache.ambari.server.agent.CommandReport;
 import org.apache.ambari.server.agent.ExecutionCommand;
+import org.apache.ambari.server.agent.AgentCommand.AgentCommandType;
 import org.apache.ambari.server.configuration.Configuration;
+import org.apache.ambari.server.controller.AmbariCustomCommandExecutionHelper;
 import org.apache.ambari.server.controller.HostsMap;
 import org.apache.ambari.server.serveraction.ServerAction;
 import org.apache.ambari.server.serveraction.ServerActionManagerImpl;
@@ -742,6 +744,74 @@ public class TestActionScheduler {
     Assert.assertEquals(HostRoleStatus.PENDING, stages.get(3).getHostRoleStatus(hostname3, "DATANODE"));
     Assert.assertEquals(HostRoleStatus.PENDING, stages.get(4).getHostRoleStatus(hostname4, "GANGLIA_MONITOR"));
   }
+  /**
+   * Verifies that ActionScheduler allows to execute background tasks in parallel
+   */
+  @Test
+  public void testBackgroundStagesExecutionEnable() throws Exception {
+    ActionQueue aq = new ActionQueue();
+    Clusters fsm = mock(Clusters.class);
+    Cluster oneClusterMock = mock(Cluster.class);
+    Service serviceObj = mock(Service.class);
+    ServiceComponent scomp = mock(ServiceComponent.class);
+    ServiceComponentHost sch = mock(ServiceComponentHost.class);
+    UnitOfWork unitOfWork = mock(UnitOfWork.class);
+    RequestFactory requestFactory = mock(RequestFactory.class);
+    when(fsm.getCluster(anyString())).thenReturn(oneClusterMock);
+    when(oneClusterMock.getService(anyString())).thenReturn(serviceObj);
+    when(serviceObj.getServiceComponent(anyString())).thenReturn(scomp);
+    when(scomp.getServiceComponentHost(anyString())).thenReturn(sch);
+    when(serviceObj.getCluster()).thenReturn(oneClusterMock);
+    
+    String hostname1 = "ahost.ambari.apache.org";
+    String hostname2 = "bhost.ambari.apache.org";
+    HashMap<String, ServiceComponentHost> hosts =
+        new HashMap<String, ServiceComponentHost>();
+    hosts.put(hostname1, sch);
+    hosts.put(hostname2, sch);
+    when(scomp.getServiceComponentHosts()).thenReturn(hosts);
+    
+    List<Stage> stages = new ArrayList<Stage>();
+    Stage backgroundStage = null;
+    stages.add(//stage with background command
+        backgroundStage = getStageWithSingleTask(
+            hostname1, "cluster1", Role.NAMENODE, RoleCommand.CUSTOM_COMMAND, "REBALANCEHDFS", Service.Type.HDFS, 1, 1, 1));
+    
+    Assert.assertEquals(AgentCommandType.BACKGROUND_EXECUTION_COMMAND ,backgroundStage.getExecutionCommands(hostname1).get(0).getExecutionCommand().getCommandType());
+    
+    stages.add( // Stage with the same hostname, should be scheduled
+        getStageWithSingleTask(
+            hostname1, "cluster1", Role.GANGLIA_MONITOR,
+            RoleCommand.START, Service.Type.GANGLIA, 2, 2, 2));
+    
+    stages.add(
+        getStageWithSingleTask(
+            hostname2, "cluster1", Role.DATANODE,
+            RoleCommand.START, Service.Type.HDFS, 3, 3, 3));
+    
+    
+    ActionDBAccessor db = mock(ActionDBAccessor.class);
+    when(db.getStagesInProgress()).thenReturn(stages);
+    
+    Properties properties = new Properties();
+    properties.put(Configuration.PARALLEL_STAGE_EXECUTION_KEY, "true");
+    Configuration conf = new Configuration(properties);
+    ActionScheduler scheduler = new ActionScheduler(100, 50, db, aq, fsm, 3,
+        new HostsMap((String) null), new ServerActionManagerImpl(fsm),
+        unitOfWork, conf);
+    
+    ActionManager am = new ActionManager(
+        2, 2, aq, fsm, db, new HostsMap((String) null),
+        new ServerActionManagerImpl(fsm), unitOfWork,
+        requestFactory, conf);
+    
+    scheduler.doWork();
+    
+    Assert.assertEquals(HostRoleStatus.QUEUED, stages.get(0).getHostRoleStatus(hostname1, "NAMENODE"));
+    Assert.assertEquals(HostRoleStatus.QUEUED, stages.get(2).getHostRoleStatus(hostname2, "DATANODE"));
+
+    Assert.assertEquals(HostRoleStatus.QUEUED, stages.get(1).getHostRoleStatus(hostname1, "GANGLIA_MONITOR"));
+  }
 
 
   @Test
@@ -1234,6 +1304,19 @@ public class TestActionScheduler {
     return stage;
   }
 
+  private Stage getStageWithSingleTask(String hostname, String clusterName, Role role, RoleCommand roleCommand,
+      String customCommandName, Service.Type service, int taskId, int stageId, int requestId) {
+    Stage stage = getStageWithSingleTask(hostname, clusterName, role, roleCommand, service, taskId, stageId, requestId);
+
+    HostRoleCommand cmd = stage.getHostRoleCommand(hostname, role.name());
+    if (cmd != null) {
+      cmd.setCustomCommandName(customCommandName);
+    }
+
+    stage.getExecutionCommandWrapper(hostname, role.toString()).getExecutionCommand().setCommandType(AgentCommandType.BACKGROUND_EXECUTION_COMMAND);
+    return stage;
+  }
+
   private void addInstallTaskToStage(Stage stage, String hostname,
                               String clusterName, Role role,
                               RoleCommand roleCommand, Service.Type service,

http://git-wip-us.apache.org/repos/asf/ambari/blob/cb662f49/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java b/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
index 172a2d6..9f0ccb0 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
@@ -1232,13 +1232,18 @@ public class AmbariMetaInfoTest {
     ccd = findCustomCommand("YET_ANOTHER_PARENT_COMMAND", component);
     Assert.assertEquals("scripts/yet_another_parent_command.py",
             ccd.getCommandScript().getScript());
-
-    Assert.assertEquals(2, component.getCustomCommands().size());
+    
+    ccd = findCustomCommand("REBALANCEHDFS", component);
+    Assert.assertEquals("scripts/namenode.py",
+        ccd.getCommandScript().getScript());
+    Assert.assertTrue(ccd.isBackground());
+    
+    Assert.assertEquals(3, component.getCustomCommands().size());
 
     // Test custom command script inheritance
     component = metaInfo.getComponent(STACK_NAME_HDP, "2.0.8",
             "HDFS", "NAMENODE");
-    Assert.assertEquals(3, component.getCustomCommands().size());
+    Assert.assertEquals(4, component.getCustomCommands().size());
 
     ccd = findCustomCommand("YET_ANOTHER_PARENT_COMMAND", component);
     Assert.assertEquals("scripts/yet_another_parent_command.py",

http://git-wip-us.apache.org/repos/asf/ambari/blob/cb662f49/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
index 07730dd..43c49de 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
@@ -7244,8 +7244,9 @@ public class AmbariManagementControllerTest {
 
     StackServiceComponentResponse response = responses.iterator().next();
     assertNotNull(response.getCustomCommands());
-    assertEquals(1, response.getCustomCommands().size());
+    assertEquals(2, response.getCustomCommands().size());
     assertEquals("DECOMMISSION", response.getCustomCommands().get(0));
+    assertEquals("REBALANCEHDFS", response.getCustomCommands().get(1));
 
     StackServiceComponentRequest journalNodeRequest = new StackServiceComponentRequest(
         STACK_NAME, NEW_STACK_VERSION, SERVICE_NAME, "JOURNALNODE");

http://git-wip-us.apache.org/repos/asf/ambari/blob/cb662f49/ambari-server/src/test/java/org/apache/ambari/server/controller/BackgroundCustomCommandExecutionTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/BackgroundCustomCommandExecutionTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/BackgroundCustomCommandExecutionTest.java
new file mode 100644
index 0000000..d49cadd
--- /dev/null
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/BackgroundCustomCommandExecutionTest.java
@@ -0,0 +1,275 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.controller;
+
+import static org.mockito.Matchers.any;
+
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import junit.framework.Assert;
+
+import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.actionmanager.ActionManager;
+import org.apache.ambari.server.actionmanager.ExecutionCommandWrapper;
+import org.apache.ambari.server.actionmanager.Stage;
+import org.apache.ambari.server.agent.AgentCommand.AgentCommandType;
+import org.apache.ambari.server.agent.ExecutionCommand;
+import org.apache.ambari.server.api.services.AmbariMetaInfo;
+import org.apache.ambari.server.configuration.Configuration;
+import org.apache.ambari.server.controller.internal.ComponentResourceProviderTest;
+import org.apache.ambari.server.controller.internal.RequestResourceFilter;
+import org.apache.ambari.server.controller.internal.ServiceResourceProviderTest;
+import org.apache.ambari.server.orm.GuiceJpaInitializer;
+import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
+import org.apache.ambari.server.state.Clusters;
+import org.apache.ambari.server.state.Host;
+import org.apache.ambari.server.state.HostState;
+import org.apache.ambari.server.state.State;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.mockito.ArgumentCaptor;
+import org.mockito.Captor;
+import org.mockito.Mock;
+import org.mockito.Mockito;
+import org.mockito.runners.MockitoJUnitRunner;
+
+import com.google.inject.Guice;
+import com.google.inject.Injector;
+import com.google.inject.persist.PersistService;
+
+@RunWith(MockitoJUnitRunner.class)
+public class BackgroundCustomCommandExecutionTest {
+  private Injector injector;
+  private AmbariManagementController controller;
+  private AmbariMetaInfo ambariMetaInfo;
+  private Configuration configuration;
+  private Clusters clusters;
+  
+  
+  private static final String REQUEST_CONTEXT_PROPERTY = "context";
+  
+  @Captor ArgumentCaptor<List<Stage>> stagesCaptor;
+  @Mock ActionManager am;
+  
+  @Before
+  public void setup() throws Exception {
+    InMemoryDefaultTestModule module = new InMemoryDefaultTestModule(){
+      
+      
+      @Override
+      protected void configure() {
+        getProperties().put(Configuration.CUSTOM_ACTION_DEFINITION_KEY, "src/main/resources/custom_action_definitions");
+        super.configure();
+        bind(ActionManager.class).toInstance(am);
+      }
+    };
+    injector = Guice.createInjector(module);
+    
+    
+    injector.getInstance(GuiceJpaInitializer.class);
+    controller = injector.getInstance(AmbariManagementController.class);
+    clusters = injector.getInstance(Clusters.class);
+    configuration = injector.getInstance(Configuration.class);
+    
+    Assert.assertEquals("src/main/resources/custom_action_definitions", configuration.getCustomActionDefinitionPath());
+    
+    ambariMetaInfo = injector.getInstance(AmbariMetaInfo.class);
+    ambariMetaInfo.init();
+  }
+  @After
+  public void teardown() {
+    injector.getInstance(PersistService.class).stop();
+  }
+
+  @SuppressWarnings("serial")
+  @Test
+  public void testRebalanceHdfsCustomCommand() {
+    try {
+      createClusterFixture();
+      
+      Map<String, String> requestProperties = new HashMap<String, String>() {
+        {
+          put(REQUEST_CONTEXT_PROPERTY, "Refresh YARN Capacity Scheduler");
+          put("command", "REBALANCEHDFS");
+          put("namenode" , "{\"threshold\":13}");//case is important here
+        }
+      };
+
+      ExecuteActionRequest actionRequest = new ExecuteActionRequest("c1",
+          "REBALANCEHDFS", new HashMap<String, String>());
+      actionRequest.getResourceFilters().add(new RequestResourceFilter("HDFS", "NAMENODE",Collections.singletonList("c6401")));
+      
+      controller.createAction(actionRequest, requestProperties);
+      
+      Mockito.verify(am, Mockito.times(1)).sendActions(stagesCaptor.capture(), any(ExecuteActionRequest.class));
+      
+      
+      List<Stage> stages = stagesCaptor.getValue();
+      Assert.assertEquals(1, stages.size());
+      Stage stage = stages.get(0);
+      
+      System.out.println(stage);
+      
+      Assert.assertEquals(1, stage.getHosts().size());
+      
+      List<ExecutionCommandWrapper> commands = stage.getExecutionCommands("c6401");
+      Assert.assertEquals(1, commands.size());
+      
+      ExecutionCommand command = commands.get(0).getExecutionCommand();
+      
+      Assert.assertEquals(AgentCommandType.BACKGROUND_EXECUTION_COMMAND, command.getCommandType());
+      Assert.assertEquals("{\"threshold\":13}", command.getCommandParams().get("namenode"));
+      
+    } catch (AmbariException e) {
+      Assert.fail(e.getMessage());
+    }
+  }
+  @SuppressWarnings("serial")
+  @Test
+  public void testCancelCommand() {
+    try {
+      createClusterFixture();
+      
+      Map<String, String> requestProperties = new HashMap<String, String>() {
+        {
+          put(REQUEST_CONTEXT_PROPERTY, "Stop background command");
+//          put("cancel_policy","SIGKILL");
+//          put("cancel_task_id","19");
+        }
+      };
+
+      ExecuteActionRequest actionRequest = new ExecuteActionRequest(
+          "c1", 
+          "actionexecute","cancel_background_task",
+          null,
+          null,
+          new HashMap<String, String>(){{
+            put("cancel_policy","SIGKILL"); // parameters/cancel_policy -- in request params
+            put("cancel_task_id","19");
+          }});
+      actionRequest.getResourceFilters().add(new RequestResourceFilter("HDFS", "NAMENODE", Collections.singletonList("c6401")));
+      
+      controller.createAction(actionRequest, requestProperties);
+      
+      Mockito.verify(am, Mockito.times(1)).sendActions(stagesCaptor.capture(), any(ExecuteActionRequest.class));
+      
+      List<Stage> stages = stagesCaptor.getValue();
+      Assert.assertEquals(1, stages.size());
+      Stage stage = stages.get(0);
+      
+      Assert.assertEquals(1, stage.getHosts().size());
+      
+      List<ExecutionCommandWrapper> commands = stage.getExecutionCommands("c6401");
+      Assert.assertEquals(1, commands.size());
+      
+      ExecutionCommand command = commands.get(0).getExecutionCommand();
+      
+      Assert.assertEquals(AgentCommandType.EXECUTION_COMMAND, command.getCommandType());
+      Assert.assertEquals("ACTIONEXECUTE", command.getRoleCommand().name());
+      Assert.assertEquals("cancel_background_task.py", command.getCommandParams().get("script"));
+      Assert.assertEquals("SIGKILL", command.getCommandParams().get("cancel_policy"));
+      Assert.assertEquals("19", command.getCommandParams().get("cancel_task_id"));
+      
+      
+    } catch (AmbariException e) {
+      Assert.fail(e.getMessage());
+    }
+  }
+  
+  private void createClusterFixture() throws AmbariException {
+    createCluster("c1");
+    addHost("c6401","c1");
+    addHost("c6402","c1");
+    
+    clusters.getCluster("c1");
+    createService("c1", "HDFS", null);
+    
+    createServiceComponent("c1","HDFS","NAMENODE", State.INIT);
+    
+    createServiceComponentHost("c1","HDFS","NAMENODE","c6401", null);
+  }
+  private void addHost(String hostname, String clusterName) throws AmbariException {
+    clusters.addHost(hostname);
+    setOsFamily(clusters.getHost(hostname), "redhat", "6.3");
+    clusters.getHost(hostname).setState(HostState.HEALTHY);
+    clusters.getHost(hostname).persist();
+    if (null != clusterName)
+      clusters.mapHostToCluster(hostname, clusterName);
+  }
+  private void setOsFamily(Host host, String osFamily, String osVersion) {
+    Map<String, String> hostAttributes = new HashMap<String, String>();
+    hostAttributes.put("os_family", osFamily);
+    hostAttributes.put("os_release_version", osVersion);
+    
+    host.setHostAttributes(hostAttributes);
+  }
+
+  private void createCluster(String clusterName) throws AmbariException {
+    ClusterRequest r = new ClusterRequest(null, clusterName, State.INSTALLED.name(), "HDP-2.0.6", null);
+    controller.createCluster(r);
+  }
+  
+  private void createService(String clusterName,
+      String serviceName, State desiredState) throws AmbariException {
+    String dStateStr = null;
+    if (desiredState != null) {
+      dStateStr = desiredState.toString();
+    }
+    ServiceRequest r1 = new ServiceRequest(clusterName, serviceName, dStateStr);
+    Set<ServiceRequest> requests = new HashSet<ServiceRequest>();
+    requests.add(r1);
+
+    ServiceResourceProviderTest.createServices(controller, requests);
+  }
+
+  private void createServiceComponent(String clusterName,
+      String serviceName, String componentName, State desiredState)
+          throws AmbariException {
+    String dStateStr = null;
+    if (desiredState != null) {
+      dStateStr = desiredState.toString();
+    }
+    ServiceComponentRequest r = new ServiceComponentRequest(clusterName,
+        serviceName, componentName, dStateStr);
+    Set<ServiceComponentRequest> requests =
+        new HashSet<ServiceComponentRequest>();
+    requests.add(r);
+    ComponentResourceProviderTest.createComponents(controller, requests);
+  }
+
+  private void createServiceComponentHost(String clusterName, String serviceName, String componentName, String hostname, State desiredState) throws AmbariException {
+    String dStateStr = null;
+    if (desiredState != null) {
+      dStateStr = desiredState.toString();
+    }
+    ServiceComponentHostRequest r = new ServiceComponentHostRequest(clusterName,
+        serviceName, componentName, hostname, dStateStr);
+    Set<ServiceComponentHostRequest> requests =
+        new HashSet<ServiceComponentHostRequest>();
+    requests.add(r);
+    controller.createHostComponents(requests);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/cb662f49/ambari-server/src/test/java/org/apache/ambari/server/customactions/ActionDefinitionManagerTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/customactions/ActionDefinitionManagerTest.java b/ambari-server/src/test/java/org/apache/ambari/server/customactions/ActionDefinitionManagerTest.java
index 7108be2..ec84922 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/customactions/ActionDefinitionManagerTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/customactions/ActionDefinitionManagerTest.java
@@ -27,27 +27,15 @@ import org.junit.Test;
 
 public class ActionDefinitionManagerTest {
 
-  private final String customActionDefinitionRoot = "./src/test/resources/custom_action_definitions/".
-          replaceAll("/", File.separator);
+  private final String customActionDefinitionRoot = "./src/test/resources/custom_action_definitions/";
 
   @Test
   public void testReadCustomActionDefinitions() throws Exception {
     ActionDefinitionManager manager = new ActionDefinitionManager();
     manager.readCustomActionDefinitions(new File(customActionDefinitionRoot));
 
-    Assert.assertEquals(3, manager.getAllActionDefinition().size());
-    ActionDefinition ad = manager.getActionDefinition("ambari_hdfs_rebalancer");
-    Assert.assertNotNull(ad);
-    Assert.assertEquals("ambari_hdfs_rebalancer", ad.getActionName());
-    Assert.assertEquals("HDFS Rebalance", ad.getDescription());
-    Assert.assertEquals("threshold,[principal],[keytab]", ad.getInputs());
-    Assert.assertEquals("NAMENODE", ad.getTargetComponent());
-    Assert.assertEquals("HDFS", ad.getTargetService());
-    Assert.assertEquals(600, (int)ad.getDefaultTimeout());
-    Assert.assertEquals(TargetHostType.ANY, ad.getTargetType());
-    Assert.assertEquals(ActionType.SYSTEM, ad.getActionType());
-
-    ad = manager.getActionDefinition("customAction1");
+    Assert.assertEquals(2, manager.getAllActionDefinition().size());
+    ActionDefinition ad = manager.getActionDefinition("customAction1");
     Assert.assertNotNull(ad);
     Assert.assertEquals("customAction1", ad.getActionName());
     Assert.assertEquals("A random test", ad.getDescription());

http://git-wip-us.apache.org/repos/asf/ambari/blob/cb662f49/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
index 0dfad30..d268f9d 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
@@ -451,3 +451,10 @@ class TestNamenode(RMFTestCase):
                               recursive = True,
                               mode = 0755,
                               )
+#   def test_rebalance_hdfs(self): ##Does not work because of exectuteScript Framework does not works with strcuturedoutput
+#     self.executeScript("2.0.6/services/HDFS/package/scripts/namenode.py",
+#                        classname = "NameNode",
+#                        command = "rebalancehdfs",
+#                        config_file="rebalancehdfs_default.json"
+#     )
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/cb662f49/ambari-server/src/test/python/stacks/2.0.6/configs/rebalancehdfs_default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/rebalancehdfs_default.json b/ambari-server/src/test/python/stacks/2.0.6/configs/rebalancehdfs_default.json
new file mode 100644
index 0000000..ba11bb5
--- /dev/null
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/rebalancehdfs_default.json
@@ -0,0 +1,388 @@
+{
+    "roleCommand": "CUSTOM_COMMAND", 
+    "clusterName": "pacan", 
+    "hostname": "c6402.ambari.apache.org", 
+    "hostLevelParams": {
+        "jdk_location": "http://c6401.ambari.apache.org:8080/resources/", 
+        "ambari_db_rca_password": "mapred", 
+        "java_home": "/usr/lib/jvm/java-1.6.0-openjdk.x86_64", 
+        "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca", 
+        "stack_name": "HDP", 
+        "custom_command": "REBALANCEHDFS", 
+        "oracle_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//ojdbc6.jar", 
+        "repo_info": "[{\"baseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.1.3.0\",\"osType\":\"redhat6\",\"repoId\":\"HDP-2.1\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"REPLACE_WITH_CENTOS6_URL\",\"latestBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.1.3.0\"},{\"baseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.17/repos/centos6\",\"osType\":\"redhat6\",\"repoId\":\"HDP-UTILS-1.1.0.17\",\"repoName\":\"HDP-UTILS\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.17/repos/centos6\",\"latestBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.17/repos/centos6\"}]", 
+        "stack_version": "2.1", 
+        "db_name": "ambari7", 
+        "ambari_db_rca_driver": "org.postgresql.Driver", 
+        "ambari_db_rca_username": "mapred", 
+        "db_driver_filename": "mysql-connector-java.jar", 
+        "mysql_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//mysql-connector-java.jar"
+    }, 
+    "commandType": "BACKGROUND_EXECUTION_COMMAND", 
+    "roleParams": {
+        "component_category": "MASTER"
+    }, 
+    "serviceName": "HDFS", 
+    "role": "NAMENODE", 
+    "commandParams": {
+        "jdk_location": "http://c6401.ambari.apache.org:8080/resources/", 
+        "service_package_folder": "HDP/2.0.6/services/HDFS/package", 
+        "script": "scripts/namenode.py", 
+        "hooks_folder": "HDP/2.0.6/hooks", 
+        "command_timeout": "600", 
+        "namenode": "{\"threshold\":\"DEBUG\"}", 
+        "script_type": "PYTHON"
+    }, 
+    "taskId": 104, 
+    "public_hostname": "c6402.ambari.apache.org", 
+    "configurations": {
+        "core-site": {
+            "io.serializations": "org.apache.hadoop.io.serializer.WritableSerialization", 
+            "fs.trash.interval": "360", 
+            "hadoop.security.authentication": "simple", 
+            "io.compression.codecs": "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec", 
+            "hadoop.proxyuser.falcon.hosts": "*", 
+            "mapreduce.jobtracker.webinterface.trusted": "false", 
+            "hadoop.security.authorization": "false", 
+            "fs.defaultFS": "hdfs://c6402.ambari.apache.org:8020", 
+            "ipc.client.connect.max.retries": "50", 
+            "ipc.client.idlethreshold": "8000", 
+            "io.file.buffer.size": "131072", 
+            "hadoop.security.auth_to_local": "\n        RULE:[2:$1@$0]([rn]m@.*)s/.*/yarn/\n        RULE:[2:$1@$0](jhs@.*)s/.*/mapred/\n        RULE:[2:$1@$0]([nd]n@.*)s/.*/hdfs/\n        RULE:[2:$1@$0](hm@.*)s/.*/hbase/\n        RULE:[2:$1@$0](rs@.*)s/.*/hbase/\n        DEFAULT", 
+            "ipc.client.connection.maxidletime": "30000", 
+            "hadoop.proxyuser.falcon.groups": "users"
+        }, 
+        "capacity-scheduler": {
+            "yarn.scheduler.capacity.root.default.maximum-capacity": "100", 
+            "yarn.scheduler.capacity.root.default.state": "RUNNING", 
+            "yarn.scheduler.capacity.root.capacity": "100", 
+            "yarn.scheduler.capacity.root.default.capacity": "100", 
+            "yarn.scheduler.capacity.root.queues": "default", 
+            "yarn.scheduler.capacity.maximum-applications": "10000", 
+            "yarn.scheduler.capacity.root.default.user-limit-factor": "1", 
+            "yarn.scheduler.capacity.node-locality-delay": "40", 
+            "yarn.scheduler.capacity.root.default.acl_submit_applications": "*", 
+            "yarn.scheduler.capacity.maximum-am-resource-percent": "0.2", 
+            "yarn.scheduler.capacity.root.acl_administer_queue": "*", 
+            "yarn.scheduler.capacity.root.default.acl_administer_jobs": "*"
+        }, 
+        "hadoop-env": {
+            "security_enabled": "false", 
+            "namenode_opt_maxnewsize": "200m", 
+            "hdfs_log_dir_prefix": "/var/log/hadoop", 
+            "ignore_groupsusers_create": "false", 
+            "namenode_heapsize": "1024m", 
+            "namenode_opt_newsize": "200m", 
+            "kerberos_domain": "EXAMPLE.COM", 
+            "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME.  All others are\n# optional.  When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use.  Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop}\n\n# Hadoop Configuration Directory\n#TODO: if env var set that can cause problems\nexport HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-{{hadoop_conf_dir}}}\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options.  Empty by defaul
 t.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appended to HADOOP_OPTS when specified\nexport HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}\"\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTime
 Stamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\nHADOOP_DATANODE_OPTS=\"-Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_DATANODE_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{namenode_heapsize
 }} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS\"\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER={{hdfs_user}}\n\n# Extra ssh options.  Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where ha
 doop code should be rsync'd from.  Unset by default.\n# export HADOOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands.  Unset by default.  This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The scheduling priority for daemon processes.  See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Use libraries from standard classpath\nJAVA_JDBC_LIBS=\"\"\n#Add lib
 raries required by mysql connector\nfor jarFile in `ls /usr/share/java/*mysql* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n#Add libraries required by oracle connector\nfor jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n#Add libraries required by nodemanager\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}\n\nif [ -d \"/usr/lib/tez\" ]; then\n  export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:/usr/lib/tez/*:/usr/lib/tez/lib/*:/etc/tez/conf\nfi\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n#Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/lib/hadoop/lib/native/Linux-amd64-64", 
+            "hdfs_user": "hdfs", 
+            "user_group": "hadoop", 
+            "dtnode_heapsize": "1024m", 
+            "proxyuser_group": "users", 
+            "smokeuser": "ambari-qa", 
+            "hadoop_heapsize": "1024", 
+            "hadoop_pid_dir_prefix": "/var/run/hadoop"
+        }, 
+        "zoo.cfg": {}, 
+        "tez-env": {
+            "content": "\n# Tez specific configuration\nexport TEZ_CONF_DIR={{config_dir}}\n\n# Set HADOOP_HOME to point to a specific hadoop install directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# The java implementation to use.\nexport JAVA_HOME={{java64_home}}", 
+            "tez_user": "tez"
+        }, 
+        "tez-site": {
+            "tez.am.shuffle-vertex-manager.max-src-fraction": "0.4", 
+            "tez.task.get-task.sleep.interval-ms.max": "200", 
+            "tez.runtime.intermediate-input.compress.codec": "org.apache.hadoop.io.compress.SnappyCodec", 
+            "tez.yarn.ats.enabled": "true", 
+            "tez.am.log.level": "INFO", 
+            "tez.runtime.intermediate-output.compress.codec": "org.apache.hadoop.io.compress.SnappyCodec", 
+            "tez.runtime.intermediate-output.should-compress": "false", 
+            "tez.lib.uris": "hdfs:///apps/tez/,hdfs:///apps/tez/lib/", 
+            "tez.am.grouping.max-size": "1073741824", 
+            "tez.runtime.intermediate-input.is-compressed": "false", 
+            "tez.am.java.opts": "-server -Xmx546m -Djava.net.preferIPv4Stack=true -XX:+UseNUMA -XX:+UseParallelGC", 
+            "tez.am.env": "LD_LIBRARY_PATH=/usr/lib/hadoop/lib/native:/usr/lib/hadoop/lib/native/`$JAVA_HOME/bin/java -d32 -version &> /dev/null;if [ $? -eq 0 ]; then echo Linux-i386-32; else echo Linux-amd64-64;fi`", 
+            "tez.am.grouping.split-waves": "1.4", 
+            "tez.am.grouping.min-size": "16777216", 
+            "tez.am.container.reuse.enabled": "true", 
+            "tez.session.am.dag.submit.timeout.secs": "300", 
+            "tez.session.client.timeout.secs": "180", 
+            "tez.staging-dir": "/tmp/${user.name}/staging", 
+            "tez.am.am-rm.heartbeat.interval-ms.max": "250", 
+            "tez.am.shuffle-vertex-manager.min-src-fraction": "0.2", 
+            "tez.am.container.reuse.non-local-fallback.enabled": "true", 
+            "tez.am.container.reuse.rack-fallback.enabled": "true", 
+            "tez.am.container.reuse.locality.delay-allocation-millis": "250", 
+            "tez.am.resource.memory.mb": "682", 
+            "tez.am.container.session.delay-allocation-millis": "10000"
+        }, 
+        "mapred-site": {
+            "mapreduce.jobhistory.address": "c6402.ambari.apache.org:10020", 
+            "mapreduce.cluster.administrators": " hadoop", 
+            "mapreduce.reduce.input.buffer.percent": "0.0", 
+            "mapreduce.output.fileoutputformat.compress": "false", 
+            "mapreduce.framework.name": "yarn", 
+            "mapreduce.map.speculative": "false", 
+            "mapreduce.reduce.shuffle.merge.percent": "0.66", 
+            "yarn.app.mapreduce.am.resource.mb": "682", 
+            "mapreduce.map.java.opts": "-Xmx546m", 
+            "mapreduce.application.classpath": "$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/*,$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/lib/*", 
+            "mapreduce.job.reduce.slowstart.completedmaps": "0.05", 
+            "mapreduce.output.fileoutputformat.compress.type": "BLOCK", 
+            "mapreduce.reduce.speculative": "false", 
+            "mapreduce.reduce.java.opts": "-Xmx546m", 
+            "mapreduce.am.max-attempts": "2", 
+            "yarn.app.mapreduce.am.admin-command-opts": "-server -XX:NewRatio=8 -Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN", 
+            "mapreduce.reduce.log.level": "INFO", 
+            "mapreduce.map.sort.spill.percent": "0.7", 
+            "mapreduce.task.timeout": "300000", 
+            "mapreduce.map.memory.mb": "682", 
+            "mapreduce.task.io.sort.factor": "100", 
+            "mapreduce.jobhistory.intermediate-done-dir": "/mr-history/tmp", 
+            "mapreduce.reduce.memory.mb": "682", 
+            "mapreduce.jobhistory.keytab.file": "/etc/security/keytabs/jhs.service.keytab", 
+            "yarn.app.mapreduce.am.log.level": "INFO", 
+            "mapreduce.map.log.level": "INFO", 
+            "mapreduce.shuffle.port": "13562", 
+            "mapreduce.admin.user.env": "LD_LIBRARY_PATH=/usr/lib/hadoop/lib/native:/usr/lib/hadoop/lib/native/`$JAVA_HOME/bin/java -d32 -version &> /dev/null;if [ $? -eq 0 ]; then echo Linux-i386-32; else echo Linux-amd64-64;fi`", 
+            "mapreduce.map.output.compress": "false", 
+            "yarn.app.mapreduce.am.staging-dir": "/user", 
+            "mapreduce.reduce.shuffle.parallelcopies": "30", 
+            "mapreduce.reduce.shuffle.input.buffer.percent": "0.7", 
+            "mapreduce.jobhistory.webapp.address": "c6402.ambari.apache.org:19888", 
+            "mapreduce.jobhistory.done-dir": "/mr-history/done", 
+            "mapreduce.admin.reduce.child.java.opts": "-server -XX:NewRatio=8 -Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN", 
+            "mapreduce.task.io.sort.mb": "273", 
+            "yarn.app.mapreduce.am.command-opts": "-Xmx546m", 
+            "mapreduce.admin.map.child.java.opts": "-server -XX:NewRatio=8 -Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN"
+        }, 
+        "hdfs-site": {
+            "dfs.namenode.avoid.write.stale.datanode": "true", 
+            "dfs.namenode.checkpoint.txns": "1000000", 
+            "dfs.block.access.token.enable": "true", 
+            "dfs.support.append": "true", 
+            "dfs.datanode.address": "0.0.0.0:50010", 
+            "dfs.cluster.administrators": " hdfs", 
+            "dfs.datanode.balance.bandwidthPerSec": "6250000", 
+            "dfs.namenode.safemode.threshold-pct": "1.0f", 
+            "dfs.namenode.checkpoint.edits.dir": "${dfs.namenode.checkpoint.dir}", 
+            "dfs.permissions.enabled": "true", 
+            "dfs.namenode.kerberos.principal": "nn/_HOST@EXAMPLE.COM", 
+            "dfs.namenode.checkpoint.dir": "/hadoop/hdfs/namesecondary", 
+            "dfs.https.port": "50470", 
+            "dfs.namenode.https-address": "c6402.ambari.apache.org:50470", 
+            "dfs.secondary.namenode.kerberos.https.principal": "HTTP/_HOST@EXAMPLE.COM", 
+            "dfs.blockreport.initialDelay": "120", 
+            "dfs.journalnode.edits.dir": "/grid/0/hdfs/journal", 
+            "dfs.blocksize": "134217728", 
+            "dfs.client.read.shortcircuit": "true", 
+            "dfs.datanode.max.transfer.threads": "1024", 
+            "dfs.heartbeat.interval": "3", 
+            "dfs.replication": "3", 
+            "dfs.namenode.handler.count": "40", 
+            "dfs.web.authentication.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab", 
+            "fs.permissions.umask-mode": "022", 
+            "dfs.namenode.stale.datanode.interval": "30000", 
+            "dfs.datanode.ipc.address": "0.0.0.0:8010", 
+            "dfs.namenode.name.dir": "/hadoop/hdfs/namenode", 
+            "dfs.datanode.data.dir": "/hadoop/hdfs/data", 
+            "dfs.namenode.http-address": "c6402.ambari.apache.org:50070", 
+            "dfs.webhdfs.enabled": "true", 
+            "dfs.datanode.failed.volumes.tolerated": "0", 
+            "dfs.namenode.accesstime.precision": "0", 
+            "dfs.namenode.avoid.read.stale.datanode": "true", 
+            "dfs.namenode.secondary.http-address": "c6402.ambari.apache.org:50090", 
+            "dfs.datanode.kerberos.principal": "dn/_HOST@EXAMPLE.COM", 
+            "dfs.datanode.http.address": "0.0.0.0:50075", 
+            "dfs.datanode.du.reserved": "1073741824", 
+            "dfs.client.read.shortcircuit.streams.cache.size": "4096", 
+            "dfs.namenode.kerberos.https.principal": "HTTP/_HOST@EXAMPLE.COM", 
+            "dfs.secondary.namenode.keytab.file": "/etc/security/keytabs/nn.service.keytab", 
+            "dfs.web.authentication.kerberos.principal": "HTTP/_HOST@EXAMPLE.COM", 
+            "dfs.secondary.namenode.kerberos.principal": "nn/_HOST@EXAMPLE.COM", 
+            "dfs.datanode.keytab.file": "/etc/security/keytabs/dn.service.keytab", 
+            "dfs.namenode.keytab.file": "/etc/security/keytabs/nn.service.keytab", 
+            "dfs.permissions.superusergroup": "hdfs", 
+            "dfs.journalnode.http-address": "0.0.0.0:8480", 
+            "dfs.domain.socket.path": "/var/lib/hadoop-hdfs/dn_socket", 
+            "dfs.namenode.write.stale.datanode.ratio": "1.0f", 
+            "dfs.hosts.exclude": "/etc/hadoop/conf/dfs.exclude", 
+            "dfs.datanode.data.dir.perm": "750", 
+            "dfs.namenode.name.dir.restore": "true", 
+            "dfs.replication.max": "50", 
+            "dfs.namenode.checkpoint.period": "21600"
+        }, 
+        "yarn-env": {
+            "yarn_pid_dir_prefix": "/var/run/hadoop-yarn", 
+            "apptimelineserver_heapsize": "1024", 
+            "nodemanager_heapsize": "1024", 
+            "content": "\nexport HADOOP_YARN_HOME={{hadoop_yarn_home}}\nexport YARN_LOG_DIR={{yarn_log_dir_prefix}}/$USER\nexport YARN_PID_DIR={{yarn_pid_dir_prefix}}/$USER\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\nexport JAVA_HOME={{java64_home}}\n\n# User for YARN daemons\nexport HADOOP_YARN_USER=${HADOOP_YARN_USER:-yarn}\n\n# resolve links - $0 may be a softlink\nexport YARN_CONF_DIR=\"${YARN_CONF_DIR:-$HADOOP_YARN_HOME/conf}\"\n\n# some Java parameters\n# export JAVA_HOME=/home/y/libexec/jdk1.6.0/\nif [ \"$JAVA_HOME\" != \"\" ]; then\n  #echo \"run java in $JAVA_HOME\"\n  JAVA_HOME=$JAVA_HOME\nfi\n\nif [ \"$JAVA_HOME\" = \"\" ]; then\n  echo \"Error: JAVA_HOME is not set.\"\n  exit 1\nfi\n\nJAVA=$JAVA_HOME/bin/java\nJAVA_HEAP_MAX=-Xmx1000m\n\n# For setting YARN specific HEAP sizes please use this\n# Parameter and set appropriately\nYARN_HEAPSIZE={{yarn_heapsize}}\n\n# check envvars which might override default args\nif [ \"$YARN_HEAPSIZE\" != \"\" ]; then\n  JAVA_HEAP_M
 AX=\"-Xmx\"\"$YARN_HEAPSIZE\"\"m\"\nfi\n\n# Resource Manager specific parameters\n\n# Specify the max Heapsize for the ResourceManager using a numerical value\n# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set\n# the value to 1000.\n# This value will be overridden by an Xmx setting specified in either YARN_OPTS\n# and/or YARN_RESOURCEMANAGER_OPTS.\n# If not specified, the default value will be picked from either YARN_HEAPMAX\n# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.\nexport YARN_RESOURCEMANAGER_HEAPSIZE={{resourcemanager_heapsize}}\n\n# Specify the JVM options to be used when starting the ResourceManager.\n# These options will be appended to the options specified as YARN_OPTS\n# and therefore may override any similar flags set in YARN_OPTS\n#export YARN_RESOURCEMANAGER_OPTS=\n\n# Node Manager specific parameters\n\n# Specify the max Heapsize for the NodeManager using a numerical value\n# in the scale of MB. For example, to 
 specify an jvm option of -Xmx1000m, set\n# the value to 1000.\n# This value will be overridden by an Xmx setting specified in either YARN_OPTS\n# and/or YARN_NODEMANAGER_OPTS.\n# If not specified, the default value will be picked from either YARN_HEAPMAX\n# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.\nexport YARN_NODEMANAGER_HEAPSIZE={{nodemanager_heapsize}}\n\n# Specify the max Heapsize for the HistoryManager using a numerical value\n# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set\n# the value to 1024.\n# This value will be overridden by an Xmx setting specified in either YARN_OPTS\n# and/or YARN_HISTORYSERVER_OPTS.\n# If not specified, the default value will be picked from either YARN_HEAPMAX\n# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.\nexport YARN_HISTORYSERVER_HEAPSIZE={{apptimelineserver_heapsize}}\n\n# Specify the JVM options to be used when starting the NodeManager.\n# These options will be
  appended to the options specified as YARN_OPTS\n# and therefore may override any similar flags set in YARN_OPTS\n#export YARN_NODEMANAGER_OPTS=\n\n# so that filenames w/ spaces are handled correctly in loops below\nIFS=\n\n\n# default log directory and file\nif [ \"$YARN_LOG_DIR\" = \"\" ]; then\n  YARN_LOG_DIR=\"$HADOOP_YARN_HOME/logs\"\nfi\nif [ \"$YARN_LOGFILE\" = \"\" ]; then\n  YARN_LOGFILE='yarn.log'\nfi\n\n# default policy file for service-level authorization\nif [ \"$YARN_POLICYFILE\" = \"\" ]; then\n  YARN_POLICYFILE=\"hadoop-policy.xml\"\nfi\n\n# restore ordinary behaviour\nunset IFS\n\n\nYARN_OPTS=\"$YARN_OPTS -Dhadoop.log.dir=$YARN_LOG_DIR\"\nYARN_OPTS=\"$YARN_OPTS -Dyarn.log.dir=$YARN_LOG_DIR\"\nYARN_OPTS=\"$YARN_OPTS -Dhadoop.log.file=$YARN_LOGFILE\"\nYARN_OPTS=\"$YARN_OPTS -Dyarn.log.file=$YARN_LOGFILE\"\nYARN_OPTS=\"$YARN_OPTS -Dyarn.home.dir=$YARN_COMMON_HOME\"\nYARN_OPTS=\"$YARN_OPTS -Dyarn.id.str=$YARN_IDENT_STRING\"\nYARN_OPTS=\"$YARN_OPTS -Dhadoop.root.logger=$
 {YARN_ROOT_LOGGER:-INFO,console}\"\nYARN_OPTS=\"$YARN_OPTS -Dyarn.root.logger=${YARN_ROOT_LOGGER:-INFO,console}\"\nif [ \"x$JAVA_LIBRARY_PATH\" != \"x\" ]; then\n  YARN_OPTS=\"$YARN_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH\"\nfi\nYARN_OPTS=\"$YARN_OPTS -Dyarn.policy.file=$YARN_POLICYFILE\"", 
+            "yarn_heapsize": "1024", 
+            "yarn_user": "yarn", 
+            "resourcemanager_heapsize": "1024", 
+            "yarn_log_dir_prefix": "/var/log/hadoop-yarn"
+        }, 
+        "yarn-log4j": {
+            "content": ""
+        }, 
+        "mapreduce2-log4j": {}, 
+        "hdfs-log4j": {
+            "content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#  http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied.  See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\n\n# Define some default values that can be overridden by system properties\nhadoop.root.logger=INFO,console\nhadoop.log.dir=.\nhadoop.log.file=hadoop.log\n\n\n
 # Define the root logger to the system property \"hadoop.root.logger\".\nlog4j.rootLogger=${hadoop.root.logger}, EventCounter\n\n# Logging Threshold\nlog4j.threshhold=ALL\n\n#\n# Daily Rolling File Appender\n#\n\nlog4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n# Rollver at midnight\nlog4j.appender.DRFA.DatePattern=.yyyy-MM-dd\n\n# 30-day backup\n#log4j.appender.DRFA.MaxBackupIndex=30\nlog4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\n\n# Pattern format: Date LogLevel LoggerName LogMessage\nlog4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n# Debugging Pattern format\n#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n#\n# console\n# Add \"console\" to rootlogger above if you want to use this\n#\n\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender.console.target=System.err\nlog4j.appender.console.layout=org.apache.
 log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\n\n#\n# TaskLog Appender\n#\n\n#Default values\nhadoop.tasklog.taskid=null\nhadoop.tasklog.iscleanup=false\nhadoop.tasklog.noKeepSplits=4\nhadoop.tasklog.totalLogFileSize=100\nhadoop.tasklog.purgeLogSplits=true\nhadoop.tasklog.logsRetainHours=12\n\nlog4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender\nlog4j.appender.TLA.taskId=${hadoop.tasklog.taskid}\nlog4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}\nlog4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}\n\nlog4j.appender.TLA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n\n#\n#Security audit appender\n#\nhadoop.security.logger=INFO,console\nhadoop.security.log.maxfilesize=256MB\nhadoop.security.log.maxbackupindex=20\nlog4j.category.SecurityLogger=${hadoop.security.logger}\nhadoop.security.log.file=SecurityAuth.audit\nlog4j.appender.DRFA
 S=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.DRFAS.DatePattern=.yyyy-MM-dd\n\nlog4j.appender.RFAS=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.RFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}\nlog4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}\n\n#\n# hdfs audit logging\n#\nhdfs.audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}\nlog4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false\nlog4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppe
 nder\nlog4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log\nlog4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# mapred audit logging\n#\nmapred.audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}\nlog4j.additivity.org.apache.hadoop.mapred.AuditLogger=false\nlog4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log\nlog4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# Rolling File Appender\n#\n\nlog4j.appender.RFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n# Logfile size and and 30-day backups\nlog4j.appender.RFA.MaxF
 ileSize=256MB\nlog4j.appender.RFA.MaxBackupIndex=10\n\nlog4j.appender.RFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n# Custom Logging levels\n\nhadoop.metrics.log.level=INFO\n#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG\n#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG\n#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG\nlog4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}\n\n# Jets3t library\nlog4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR\n\n#\n# Null Appender\n# Trap security logger on the hadoop client side\n#\nlog4j.appender.NullAppender=org.apache.log4j.varia.NullAppender\n\n#\n# Event Counter Appender\n# Sends counts of logging messages at different severity levels to Hadoop Metrics.\n#\nlog4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounte
 r\n\n# Removes \"deprecated\" messages\nlog4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN"
+        }, 
+        "yarn-site": {
+            "yarn.nodemanager.resource.memory-mb": "2048", 
+            "yarn.nodemanager.disk-health-checker.min-healthy-disks": "0.25", 
+            "yarn.nodemanager.container-executor.class": "org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor", 
+            "yarn.nodemanager.local-dirs": "/hadoop/yarn/local", 
+            "yarn.resourcemanager.resource-tracker.address": "c6402.ambari.apache.org:8025", 
+            "yarn.nodemanager.remote-app-log-dir-suffix": "logs", 
+            "yarn.resourcemanager.hostname": "c6402.ambari.apache.org", 
+            "yarn.nodemanager.health-checker.script.timeout-ms": "60000", 
+            "yarn.resourcemanager.scheduler.class": "org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler", 
+            "yarn.resourcemanager.nodes.exclude-path": "/etc/hadoop/conf/yarn.exclude", 
+            "yarn.scheduler.minimum-allocation-mb": "682", 
+            "yarn.resourcemanager.address": "c6402.ambari.apache.org:8050", 
+            "yarn.resourcemanager.scheduler.address": "c6402.ambari.apache.org:8030", 
+            "yarn.log-aggregation.retain-seconds": "2592000", 
+            "yarn.scheduler.maximum-allocation-mb": "2048", 
+            "yarn.timeline-service.ttl-ms": "2678400000", 
+            "yarn.log-aggregation-enable": "true", 
+            "yarn.nodemanager.address": "0.0.0.0:45454", 
+            "yarn.timeline-service.leveldb-timeline-store.ttl-interval-ms": "300000", 
+            "yarn.nodemanager.container-monitor.interval-ms": "3000", 
+            "yarn.timeline-service.webapp.address": "c6402.ambari.apache.org:8188", 
+            "yarn.timeline-service.enabled": "true", 
+            "yarn.timeline-service.address": "c6402.ambari.apache.org:10200", 
+            "yarn.nodemanager.log.retain-second": "604800", 
+            "yarn.nodemanager.delete.debug-delay-sec": "0", 
+            "yarn.timeline-service.store-class": "org.apache.hadoop.yarn.server.timeline.LeveldbTimelineStore", 
+            "yarn.nodemanager.log-aggregation.compression-type": "gz", 
+            "yarn.nodemanager.log-dirs": "/hadoop/yarn/log", 
+            "yarn.timeline-service.ttl-enable": "true", 
+            "yarn.nodemanager.health-checker.interval-ms": "135000", 
+            "yarn.resourcemanager.am.max-attempts": "2", 
+            "yarn.nodemanager.remote-app-log-dir": "/app-logs", 
+            "yarn.nodemanager.admin-env": "MALLOC_ARENA_MAX=$MALLOC_ARENA_MAX", 
+            "yarn.nodemanager.aux-services": "mapreduce_shuffle", 
+            "yarn.nodemanager.vmem-check-enabled": "false", 
+            "yarn.nodemanager.vmem-pmem-ratio": "2.1", 
+            "yarn.admin.acl": "", 
+            "yarn.resourcemanager.webapp.address": "c6402.ambari.apache.org:8088", 
+            "yarn.timeline-service.leveldb-timeline-store.path": "/hadoop/yarn/timeline", 
+            "yarn.nodemanager.linux-container-executor.group": "hadoop", 
+            "yarn.acl.enable": "false", 
+            "yarn.log.server.url": "http://c6402.ambari.apache.org:19888/jobhistory/logs", 
+            "yarn.application.classpath": "/etc/hadoop/conf,/usr/lib/hadoop/*,/usr/lib/hadoop/lib/*,/usr/lib/hadoop-hdfs/*,/usr/lib/hadoop-hdfs/lib/*,/usr/lib/hadoop-yarn/*,/usr/lib/hadoop-yarn/lib/*,/usr/lib/hadoop-mapreduce/*,/usr/lib/hadoop-mapreduce/lib/*", 
+            "yarn.resourcemanager.admin.address": "c6402.ambari.apache.org:8141", 
+            "yarn.timeline-service.generic-application-history.store-class": "org.apache.hadoop.yarn.server.applicationhistoryservice.NullApplicationHistoryStore", 
+            "yarn.timeline-service.webapp.https.address": "c6402.ambari.apache.org:8190", 
+            "yarn.nodemanager.aux-services.mapreduce_shuffle.class": "org.apache.hadoop.mapred.ShuffleHandler"
+        }, 
+        "zookeeper-env": {
+            "clientPort": "2181", 
+            "zk_user": "zookeeper", 
+            "zk_log_dir": "/var/log/zookeeper", 
+            "syncLimit": "5", 
+            "content": "\nexport JAVA_HOME={{java64_home}}\nexport ZOO_LOG_DIR={{zk_log_dir}}\nexport ZOOPIDFILE={{zk_pid_file}}\nexport SERVER_JVMFLAGS={{zk_server_heapsize}}\nexport JAVA=$JAVA_HOME/bin/java\nexport CLASSPATH=$CLASSPATH:/usr/share/zookeeper/*\n\n{% if security_enabled %}\nexport SERVER_JVMFLAGS=\"$SERVER_JVMFLAGS -Djava.security.auth.login.config={{zk_server_jaas_file}}\"\nexport CLIENT_JVMFLAGS=\"$CLIENT_JVMFLAGS -Djava.security.auth.login.config={{zk_client_jaas_file}}\"\n{% endif %}", 
+            "initLimit": "10", 
+            "zk_pid_dir": "/var/run/zookeeper", 
+            "zk_data_dir": "/hadoop/zookeeper", 
+            "tickTime": "2000"
+        }, 
+        "zookeeper-log4j": {
+            "content": "\n#\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied.  See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n#\n#\n\n#\n# ZooKeeper Logging Configuration\n#\n\n# DEFAULT: console appender only\nlog4j.rootLogger=INFO, CONSOLE\n\n# Example with rolling log file\n#log4
 j.rootLogger=DEBUG, CONSOLE, ROLLINGFILE\n\n# Example with rolling log file and tracing\n#log4j.rootLogger=TRACE, CONSOLE, ROLLINGFILE, TRACEFILE\n\n#\n# Log INFO level and above messages to the console\n#\nlog4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender\nlog4j.appender.CONSOLE.Threshold=INFO\nlog4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout\nlog4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n\n\n#\n# Add ROLLINGFILE to rootLogger to get log file output\n#    Log DEBUG level and above messages to a log file\nlog4j.appender.ROLLINGFILE=org.apache.log4j.RollingFileAppender\nlog4j.appender.ROLLINGFILE.Threshold=DEBUG\nlog4j.appender.ROLLINGFILE.File=zookeeper.log\n\n# Max log file size of 10MB\nlog4j.appender.ROLLINGFILE.MaxFileSize=10MB\n# uncomment the next line to limit number of backup files\n#log4j.appender.ROLLINGFILE.MaxBackupIndex=10\n\nlog4j.appender.ROLLINGFILE.layout=org.apache.log4j.PatternLayout\nlog4j.appender.ROLLIN
 GFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n\n\n\n#\n# Add TRACEFILE to rootLogger to get log file output\n#    Log DEBUG level and above messages to a log file\nlog4j.appender.TRACEFILE=org.apache.log4j.FileAppender\nlog4j.appender.TRACEFILE.Threshold=TRACE\nlog4j.appender.TRACEFILE.File=zookeeper_trace.log\n\nlog4j.appender.TRACEFILE.layout=org.apache.log4j.PatternLayout\n### Notice we are including log4j's NDC here (%x)\nlog4j.appender.TRACEFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L][%x] - %m%n"
+        }, 
+        "mapred-env": {
+            "content": "\n# export JAVA_HOME=/home/y/libexec/jdk1.6.0/\n\nexport HADOOP_JOB_HISTORYSERVER_HEAPSIZE={{jobhistory_heapsize}}\n\nexport HADOOP_MAPRED_ROOT_LOGGER=INFO,RFA\n\n#export HADOOP_JOB_HISTORYSERVER_OPTS=\n#export HADOOP_MAPRED_LOG_DIR=\"\" # Where log files are stored.  $HADOOP_MAPRED_HOME/logs by default.\n#export HADOOP_JHS_LOGGER=INFO,RFA # Hadoop JobSummary logger.\n#export HADOOP_MAPRED_PID_DIR= # The pid files are stored. /tmp by default.\n#export HADOOP_MAPRED_IDENT_STRING= #A string representing this instance of hadoop. $USER by default\n#export HADOOP_MAPRED_NICENESS= #The scheduling priority for daemons. Defaults to 0.", 
+            "mapred_pid_dir_prefix": "/var/run/hadoop-mapreduce", 
+            "mapred_user": "mapred", 
+            "jobhistory_heapsize": "900", 
+            "mapred_log_dir_prefix": "/var/log/hadoop-mapreduce"
+        }
+    }, 
+    "configurationTags": {
+        "capacity-scheduler": {
+            "tag": "version1"
+        }, 
+        "tez-site": {
+            "tag": "version1"
+        }, 
+        "zoo.cfg": {
+            "tag": "version1"
+        }, 
+        "tez-env": {
+            "tag": "version1"
+        }, 
+        "mapreduce2-log4j": {
+            "tag": "version1"
+        }, 
+        "mapred-site": {
+            "tag": "version1"
+        }, 
+        "hdfs-site": {
+            "tag": "version1"
+        }, 
+        "yarn-site": {
+            "tag": "version1"
+        }, 
+        "yarn-env": {
+            "tag": "version1"
+        }, 
+        "yarn-log4j": {
+            "tag": "version1"
+        }, 
+        "core-site": {
+            "tag": "version1"
+        }, 
+        "hdfs-log4j": {
+            "tag": "version1"
+        }, 
+        "hadoop-env": {
+            "tag": "version1"
+        }, 
+        "zookeeper-env": {
+            "tag": "version1"
+        }, 
+        "zookeeper-log4j": {
+            "tag": "version1"
+        }, 
+        "mapred-env": {
+            "tag": "version1"
+        }
+    }, 
+    "commandId": "11-1", 
+    "clusterHostInfo": {
+        "snamenode_host": [
+            "c6402.ambari.apache.org"
+        ], 
+        "nm_hosts": [
+            "c6402.ambari.apache.org"
+        ], 
+        "app_timeline_server_hosts": [
+            "c6402.ambari.apache.org"
+        ], 
+        "all_ping_ports": [
+            "8670"
+        ], 
+        "rm_host": [
+            "c6402.ambari.apache.org"
+        ], 
+        "all_hosts": [
+            "c6402.ambari.apache.org"
+        ], 
+        "slave_hosts": [
+            "c6402.ambari.apache.org"
+        ], 
+        "namenode_host": [
+            "c6402.ambari.apache.org"
+        ], 
+        "ambari_server_host": [
+            "c6401.ambari.apache.org"
+        ], 
+        "zookeeper_hosts": [
+            "c6402.ambari.apache.org"
+        ], 
+        "hs_host": [
+            "c6402.ambari.apache.org"
+        ]
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/cb662f49/ambari-server/src/test/resources/custom_action_definitions/cust_action_definitions1.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/custom_action_definitions/cust_action_definitions1.xml b/ambari-server/src/test/resources/custom_action_definitions/cust_action_definitions1.xml
index a4db87d..9cee575 100644
--- a/ambari-server/src/test/resources/custom_action_definitions/cust_action_definitions1.xml
+++ b/ambari-server/src/test/resources/custom_action_definitions/cust_action_definitions1.xml
@@ -42,14 +42,4 @@
     <targetComponent>TASKTRACKER</targetComponent>
     <description>A random test</description>
   </actionDefinition>
-  <actionDefinition>
-    <actionName>ambari_hdfs_rebalancer</actionName>
-    <actionType>SYSTEM</actionType>
-    <inputs>threshold,[principal],[keytab]</inputs>
-    <targetService>HDFS</targetService>
-    <targetComponent>NAMENODE</targetComponent>
-    <description>HDFS Rebalance</description>
-    <targetType>ANY</targetType>
-    <defaultTimeout>600</defaultTimeout>
-  </actionDefinition>
 </actionDefinitions>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/cb662f49/ambari-server/src/test/resources/custom_action_definitions/system_action_definitions.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/custom_action_definitions/system_action_definitions.xml b/ambari-server/src/test/resources/custom_action_definitions/system_action_definitions.xml
deleted file mode 100644
index ee07900..0000000
--- a/ambari-server/src/test/resources/custom_action_definitions/system_action_definitions.xml
+++ /dev/null
@@ -1,32 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="action_definition.xsl"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<actionDefinitions>
-  <actionDefinition>
-    <actionName>ambari_hdfs_rebalancer</actionName>
-    <actionType>SYSTEM</actionType>
-    <inputs>threshold,[principal],[keytab]</inputs>
-    <targetService>HDFS</targetService>
-    <targetComponent>NAMENODE</targetComponent>
-    <defaultTimeout>600</defaultTimeout>
-    <description>HDFS Rebalance</description>
-    <targetType>ANY</targetType>
-  </actionDefinition>
-</actionDefinitions>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/cb662f49/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HDFS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HDFS/metainfo.xml b/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HDFS/metainfo.xml
index be999e6..782aa65 100644
--- a/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HDFS/metainfo.xml
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HDFS/metainfo.xml
@@ -42,6 +42,15 @@
                 <timeout>600</timeout>
               </commandScript>
             </customCommand>
+            <customCommand>
+              <name>REBALANCEHDFS</name>
+              <background>true</background>
+              <commandScript>
+                <script>scripts/namenode.py</script>
+                <scriptType>PYTHON</scriptType>
+              </commandScript>
+            </customCommand>
+            
           </customCommands>
         </component>
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/cb662f49/ambari-web/app/controllers/global/background_operations_controller.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/global/background_operations_controller.js b/ambari-web/app/controllers/global/background_operations_controller.js
index b6dc603..17540c5 100644
--- a/ambari-web/app/controllers/global/background_operations_controller.js
+++ b/ambari-web/app/controllers/global/background_operations_controller.js
@@ -172,6 +172,12 @@ App.BackgroundOperationsController = Em.Controller.extend({
     task.Tasks.status = data.Tasks.status;
     task.Tasks.stdout = data.Tasks.stdout;
     task.Tasks.stderr = data.Tasks.stderr;
+
+    // Put some command information to task object
+    task.Tasks.command = data.Tasks.command;
+    task.Tasks.custom_command_name = data.Tasks.custom_command_name;
+    task.Tasks.structured_out = data.Tasks.structured_out;
+
     task.Tasks.output_log = data.Tasks.output_log;
     task.Tasks.error_log = data.Tasks.error_log;
     this.set('serviceTimestamp', App.dateTime());

http://git-wip-us.apache.org/repos/asf/ambari/blob/cb662f49/ambari-web/app/controllers/main/service/item.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/main/service/item.js b/ambari-web/app/controllers/main/service/item.js
index 0aaf78c..32c8f5d 100644
--- a/ambari-web/app/controllers/main/service/item.js
+++ b/ambari-web/app/controllers/main/service/item.js
@@ -230,15 +230,11 @@ App.MainServiceItemController = Em.Controller.extend({
    * @param event
    */
   refreshYarnQueues : function (event) {
-    var self = this;
+    var controller = this;
     return App.showConfirmationPopup(function() {
-      self.refreshYarnQueuesPrimary();
-    });
-  },
-  refreshYarnQueuesPrimary : function(){
     App.ajax.send({
       name : 'service.item.refreshQueueYarnRequest',
-      sender : this,
+        sender: controller,
       data : {
         command : "REFRESHQUEUES",
         context : Em.I18n.t('services.service.actions.run.yarnRefreshQueues.context') ,
@@ -250,6 +246,7 @@ App.MainServiceItemController = Em.Controller.extend({
       success : 'refreshYarnQueuesSuccessCallback',
       error : 'refreshYarnQueuesErrorCallback'
     });
+    });
   },
   refreshYarnQueuesSuccessCallback  : function(data, ajaxOptions, params) {
     if (data.Requests.id) {
@@ -269,6 +266,72 @@ App.MainServiceItemController = Em.Controller.extend({
     App.showAlertPopup(Em.I18n.t('services.service.actions.run.yarnRefreshQueues.error'), error);
     console.warn('Error during refreshYarnQueues:'+error);
   },
+ /**
+   * On click handler for rebalance Hdfs command from items menu
+   */
+  rebalanceHdfsNodes: function () {
+    var controller = this;
+    App.ModalPopup.show({
+      classNames: ['fourty-percent-width-modal'],
+      header: Em.I18n.t('services.service.actions.run.rebalanceHdfsNodes.context'),
+      primary: Em.I18n.t('common.start'),
+      secondary: Em.I18n.t('common.cancel'),
+      inputValue: 0,
+      errorMessage: Em.I18n.t('services.service.actions.run.rebalanceHdfsNodes.promptError'),
+      isInvalid: function () {
+        var intValue = Number(this.get('inputValue'));
+        if (this.get('inputValue')!=='DEBUG' && (isNaN(intValue) || intValue < 0 || intValue > 100)) {
+          return true;
+        }
+        return false;
+      }.property('inputValue'),
+      onPrimary: function () {
+        if (this.get('isInvalid')) {
+          return;
+        }
+    App.ajax.send({
+      name : 'service.item.rebalanceHdfsNodes',
+          sender: controller,
+      data : {
+        hosts : App.Service.find('HDFS').get('hostComponents').findProperty('componentName', 'NAMENODE').get('hostName'),
+            threshold: this.get('inputValue')
+      },
+      success : 'rebalanceHdfsNodesSuccessCallback',
+      error : 'rebalanceHdfsNodesErrorCallback'
+    });
+        this.hide();
+  },
+      bodyClass: Ember.View.extend({
+        templateName: require('templates/common/prompt_popup'),
+        text: Em.I18n.t('services.service.actions.run.rebalanceHdfsNodes.prompt'),
+        didInsertElement: function () {
+          App.tooltip(this.$(".prompt-input"), {
+            placement: "bottom",
+            title: Em.I18n.t('services.service.actions.run.rebalanceHdfsNodes.promptTooltip')
+          });
+        }
+      })
+    });
+  },
+  rebalanceHdfsNodesSuccessCallback: function (data) {
+    if (data.Requests.id) {
+      App.router.get('backgroundOperationsController').showPopup();
+    } else {
+      console.warn('Error during runRebalanceHdfsNodes');
+    }
+  },
+  rebalanceHdfsNodesErrorCallback : function(data) {
+    var error = Em.I18n.t('services.service.actions.run.rebalanceHdfsNodes.error');
+    if(data && data.responseText){
+      try {
+        var json = $.parseJSON(data.responseText);
+        error += json.message;
+      } catch (err) {
+      }
+    }
+    App.showAlertPopup(Em.I18n.t('services.service.actions.run.rebalanceHdfsNodes.error'), error);
+    console.warn('Error during runRebalanceHdfsNodes:'+error);
+  },
 
   /**
    * On click callback for <code>run compaction</code> button