You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@helix.apache.org by zz...@apache.org on 2013/05/02 03:26:44 UTC

[1/3] remove test scripts for helix-agent. disable helix-agent

Updated Branches:
  refs/heads/master d5289aebc -> 4ebc0fad9


http://git-wip-us.apache.org/repos/asf/incubator-helix/blob/4ebc0fad/helix-agent/src/main/scripts/integration-test/script/utility.py
----------------------------------------------------------------------
diff --git a/helix-agent/src/main/scripts/integration-test/script/utility.py b/helix-agent/src/main/scripts/integration-test/script/utility.py
deleted file mode 100644
index 0454f00..0000000
--- a/helix-agent/src/main/scripts/integration-test/script/utility.py
+++ /dev/null
@@ -1,813 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-'''
-====  utilities
-'''
-
-import sys, os, subprocess
-import socket, pdb, re
-import urllib, errno
-import time, shutil
-import tempfile
-import random
-import socket
- 
-sys_call_debug=False
-enable_sys_call_debug=False
-debug_enabled=False
-host_name_global = (os.popen("/bin/hostname").read()).split("\n")[0]
-#host_name_global = socket.gethostbyaddr(socket.gethostbyname(socket.gethostname()))[0]
-
-view_root=None
-test_name=None
-#pdb.set_trace()
-this_file_full_path=os.path.abspath(__file__)
-# use logical pwd so symlink can be done from root
-this_file_dirname=os.path.dirname(this_file_full_path)
-this_file_name=os.path.basename(this_file_full_path)
-#this_file_dirname="PWD" in os.environ and os.environ["PWD"] or os.path.dirname(this_file_full_path)
-
-work_dir=None
-log_dir=None
-var_dir=None
-var_dir_template="%s/integration-test/var"
-testcase_dir=None
-testcase_dir_template="%s/integration-test/testcases"
-cwd_dir=os.getcwd()
-import getpass
-username=getpass.getuser()
-# used to run cmd, can combine multiple command
-components=[
-     "test_relay"
-    ,"test_bootstrap_producer"
-    ,"bootstrap_server"
-    ,"bootstrap_consumer"
-    ,"profile_relay"
-    ,"profile_consumer"
-]
-
-def dbg_print(in_str):
-    #import pdb
-    #pdb.set_trace()
-    if debug_enabled:
-        print ("== " + sys._getframe(1).f_code.co_name + " == " + str(in_str))
-
-def sys_pipe_call(cmd):
-    dbg_print("%s:%s" % (os.getcwd(),cmd))
-    if sys_call_debug:
-        print("cmd = %s " % cmd)
-        if re.search("svn (log|info)",cmd): return os.popen(cmd).read()
-        return ""
-    return os.popen(cmd).read()               
-
-def get_this_file_dirname(): return this_file_dirname
-def get_this_file_name(): return this_file_name
-#handle the json import 
-if sys.version_info[0]==2 and sys.version_info[1]<6:
-  try:
-    import simplejson as json
-  except: 
-    out=sys_pipe_call(os.path.join(get_this_file_dirname(),"install_python_packages.sh"))
-    #print("install json = %s " % out)
-    import simplejson as json
-else:
-  import json
-
-# functions
-def setup_view_root():
-    global view_root
-    if "VIEW_ROOT" in os.environ: view_root = os.environ["VIEW_ROOT"]
-    else: view_root= os.path.abspath("%s/../../" % this_file_dirname)
-    #print("view_root = %s" % view_root)
-    #print("test_name=%s" % test_name)
-    os.chdir(view_root)
-    os.environ["VIEW_ROOT"]=view_root
-
-def get_view_root(): return view_root
-
-def setup_work_dir():
-    global var_dir, work_dir, log_dir, test_name
-    var_dir= var_dir_template % (view_root)
-    import distutils.dir_util
-    distutils.dir_util.mkpath(var_dir, verbose=1)
-
-    if "TEST_NAME" in os.environ: test_name=os.environ["TEST_NAME"]
-    else: assert False, "TEST NAME Not Defined"
-    if "WORK_SUB_DIR" in os.environ: work_dir=os.path.join(var_dir,os.environ["WORK_SUB_DIR"],test_name)
-    else: assert False, "Work Dir Not Defined"
-    if "LOG_SUB_DIR" in os.environ: log_dir=os.path.join(var_dir, os.environ["LOG_SUB_DIR"], test_name)
-    else: assert False, "Work Dir Not Defined"
-    distutils.dir_util.mkpath(work_dir, verbose=1)  
-    distutils.dir_util.mkpath(log_dir, verbose=1)  
-
-def get_test_name(): return test_name
-def get_work_dir(): return work_dir
-def get_log_dir(): return log_dir
-def get_var_dir(): return var_dir
-def get_script_dir(): return get_this_file_dirname()
-def get_testcase_dir(): return testcase_dir
-def get_cwd(): return cwd_dir
-def get_username(): return username
-
-def my_exit(ret):
-  # close all the file descriptors
-  os.close(1)  # stdin
-  os.close(2)  # stdout
-  os.close(3)  # stderr
-  sys.exit(ret)
-
-def file_exists(file):  # test both 
-    ''' return the abs path of the file if exists '''
-    if os.path.isabs(file): 
-      if os.path.exists(file): return file
-      else: return None
-    tmp_file=os.path.join(view_root, file)
-    if os.path.exists(tmp_file): return tmp_file
-    tmp_file=os.path.join(cwd_dir,file)
-    if os.path.exists(tmp_file): return tmp_file
-    return None
-  
-def set_debug(flag): 
-    global debug_enabled
-    debug_enabled=flag
-
-def set_sys_call_debug(flag): 
-    global enable_sys_call_debug
-    enable_sys_call_debug=flag
-
-def sys_call_debug_begin():
-    if not enable_sys_call_debug: return
-    global sys_call_debug
-    sys_call_debug=True
-    
-def sys_call_debug_end():
-    if not enable_sys_call_debug: return
-    global sys_call_debug
-    sys_call_debug=False
-
-def sys_call(cmd):
-    dbg_print("%s:%s" % (os.getcwd(),cmd))
-    if sys_call_debug:
-        print("cmd = %s " % cmd)
-        return
-    return os.system(cmd)
-
-def subprocess_call_1(cmd, outfp=None):
-    dbg_print("%s:%s" % (os.getcwd(), cmd))
-    if not sys_call_debug:
-        if outfp:
-          p = subprocess.Popen(cmd, shell=True, stdout=outfp, stderr=outfp, close_fds=True)
-        else:
-          #pdb.set_trace()
-          p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, close_fds=True)
-        dbg_print("subprocess pid = %s" % p.pid)
-        return p
-    else:
-        print("cmd = %s " % cmd)
-        return None
-
-def sys_pipe_call_4(cmd):
-    dbg_print("%s:%s" % (os.getcwd(), cmd))
-    if not sys_call_debug:
-        p = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE, close_fds=True)
-        dbg_print("subprocess pid = %s" % p.pid)
-        return p.stdout
-    else:
-        None
-
-def sys_pipe_call_3(cmd):
-    dbg_print("%s:%s" % (os.getcwd(), cmd))
-    if not sys_call_debug:
-        p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, close_fds=True)
-        dbg_print("subprocess pid = %s" % p.pid)
-        #p = os.popen(cmd)
-        return (p.stdout, p.pid)
-    else:
-        None
-
-def sys_pipe_call_5(cmd):
-    ''' return both stdin, stdout and pid '''
-    dbg_print("%s:%s" % (os.getcwd(), cmd))
-    if not sys_call_debug:
-        p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
-        dbg_print("subprocess pid = %s" % p.pid)
-        #p = os.popen(cmd)
-        return (p.stdout, p.stderr, p.pid)
-    else:
-        None
-
-def sys_pipe_call_21(input, cmd):
-    ''' call with input pipe to the cmd '''
-    dbg_print("%s:%s:%s" % (os.getcwd(),input, cmd))
-    if not sys_call_debug:
-      return  subprocess.Popen(cmd.split(), stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, close_fds=True).communicate(input)[0]
-    else:
-      return  ""
-
-def sys_pipe_call_2(input, cmd):
-    ''' call with input pipe to the cmd '''
-    dbg_print("%s:%s:%s" % (os.getcwd(),input, cmd))
-    if not sys_call_debug:
-      return  subprocess.Popen(cmd.split(), stdin=subprocess.PIPE, stdout=subprocess.PIPE, close_fds=True).communicate(input)[0]
-    else:
-      return  ""
-
-def sys_pipe_call_1(cmd):
-    ''' also return the errors '''
-    dbg_print("%s:%s" % (os.getcwd(),cmd))
-    if not sys_call_debug:
-      p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, close_fds=True)
-      return p.stdout.readlines()
-    else:
-      return  ""
-
-#    dbg_print("%s:%s" % (os.getcwd(),cmd))
-#    return os.popen4(cmd)[1].read()
-
-def sys_call_env(cmd):
-    cmds=cmd.split()
-    dbg_print("cmds= %s " % cmds)
-    os.spawnv( os.P_WAIT, cmds[0], cmds[1:])
-
-def whoami():
-    return sys._getframe(1).f_code.co_name
-
-def my_error(s):
-    if debug_enabled: assert False, "Error: %s" % s
-    else: 
-      print "Error: %s" % s
-      my_exit(1)
-
-def my_warning(s):
-    if debug_enabled:
-        print ("== " + sys._getframe(1).f_code.co_name + " == " + str(s))
-    else: 
-      print "WARNING: %s" % s
-
-def enter_func():
-    dbg_print ("Entering == " + sys._getframe(1).f_code.co_name + " == ")
-
-def get_time():
-    return float("%0.4f" % time.time())   # keep 2 digits
-
-def isOpen(ip,port):
-    s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
-    try:
-      s.connect((ip, int(port)))
-      s.shutdown(2)
-      return True
-    except:
-      return False
-
-def next_available_port(ip,port):
-    port_num = int(port)
-    while (isOpen(ip, port_num)): port_num +=1
-    return port_num
-
-def find_open_port(host, start_port, seq_num):
-    ''' find the seq_num th port starting from start_port ''' 
-    limit = 100
-    start_port_num = int(start_port)
-    seq = 0
-    for i in range(limit):
-      port_num = start_port_num + i
-      if isOpen(host, port_num): seq += 1
-      if seq == seq_num: return port_num
-    return None
-
-def process_exist(pid, host=None):
-    if not host:
-      try:
-	  os.kill(int(pid), 0)
-      except OSError, err:
-	  if err.errno == errno.ESRCH: return False # not running
-	  elif err.errno == errno.EPERM: return True  # own by others by running
-	  else: my_error("Unknown error")
-      else:
-          return True # running
-    else:  # remote run
-      process_cnt = sys_pipe_call("ssh %s@%s 'ps -ef | grep %s | wc -l" % (username, host, pid)).split("\n")[0]
-      return process_cnt > 0
-
-# remote execute related, by default remote execution is off
-setup_view_root()
-config_dir="%s/integration-test/config" % view_root
-remote_config_file="%s/remote_execute_on.cfg" % config_dir
-remote_run=False    # this is to indicate use of remote_config
-remote_launch=False  # this is to indicate remote ssh recursive call
-remote_run_config={}
-remote_view_root=None
-def get_remote_view_root(): return remote_view_root
-def set_remote_view_root(v_root): 
-    global remote_view_root
-    remote_view_root = v_root
-def get_remote_log_dir(): 
-    return os.path.join(var_dir_template % remote_view_root, "log")
-def get_remote_work_dir(): 
-    return os.path.join(var_dir_template % remote_view_root, "work")
-
-import ConfigParser
-
-def check_remote_config(remote_config_parser):
-  allowed_options=["host","port","view_root"]
-  section_names = remote_config_parser.sections() # returns a list of strings of section names
-  for section in section_names:
-    if not [x for x in components if re.search(x, section)]:
-      my_error("Invalid section %s in config file " % (section))
-    if [x for x in ["test_relay, profile_realy, bootstrap_server"] if re.search(x, section)]:
-      if not remote_config_parser.has_option(section, "host"):   # set the default host
-        remote_config_parser.set(section, "host",host_name_global) 
-
-def parse_config_cfg(remote_config_file):
-  remote_config_parser = ConfigParser.SafeConfigParser()
-  remote_config_parser.read(remote_config_file)
-  #check_remote_config(remote_config_parser)   # do not check for now
-  for section in remote_config_parser.sections(): # returns a list of strings of section names
-    remote_run_config[section]={}
-    for option in remote_config_parser.options(section):
-      remote_run_config[section][option]=remote_config_parser.get(section,option)
-  return remote_run_config
-
-def parse_config_json(remote_config_file):
-  return json.load(open(remote_config_file))
-
-def parse_config(remote_config_file_input):
-  global remote_run_config, remote_run
-  remote_config_file = file_exists(remote_config_file_input)
-  if not remote_config_file: my_error("remote_config_file %s does not existi!!" % remote_config_file_input)
-  file_type = os.path.splitext(remote_config_file)[1].lower()
-  if file_type not in (".cfg",".json"): my_error("remote_config_file type %s is not .json or .cfg file" % file_type)
-  file_type = file_type.lstrip(".")
-  remote_run_config = globals()["parse_config_%s" % file_type](remote_config_file)
-  remote_run = True
-
-def is_remote_run(): return remote_run
-def is_remote_launch(): return remote_launch
-def set_remote_launch(): 
-  global remote_launch
-  remote_launch=True
-def get_remote_run_config(): return remote_run_config
-
-if "REMOTE_CONFIG_FILE" in os.environ:   # can be set from env or from a file
-  parse_config(os.environ["REMOTE_CONFIG_FILE"])
-  remote_launch = True # env will not replicated across, so set env will enable launch
- 
-# url utilities
-def quote_json(in_str):
-    ret = re.sub('([{,])(\w+)(:)','\\1"\\2"\\3', in_str)
-    dbg_print("ret = %s" % ret)
-    return ret
-
-def send_url(url_str):
-    dbg_print("url_str = %s" % url_str)
-    usock = urllib.urlopen(url_str)
-    output = usock.read()
-    dbg_print("output = %s" % output)
-    usock.close()
-    return output
-
-# sqlplus
-default_db_port=1521
-conn_str_template="%%s/%%s@(DESCRIPTION=(ADDRESS_LIST=(ADDRESS=(PROTOCOL=TCP)(HOST=%%s)(PORT=%s)))(CONNECT_DATA=(SERVICE_NAME=%%s)))" % default_db_port
-sqlplus_cmd="sqlplus"
-#sqlplus_cmd="NLS_LANG=_.UTF8 sqlplus"   # handle utf8
-sqlplus_heading='''
-set echo off
-set pages 50000
-set long 2000000000
-set linesize 5000
-column xml format A5000
-set colsep ,     
-set trimspool on 
-set heading off
-set headsep off  
-set feedback 0
--- set datetime format
-ALTER SESSION SET NLS_DATE_FORMAT = 'YYYY-MON-DD HH24:MI:SS';
-ALTER SESSION SET NLS_TIMESTAMP_FORMAT='YYYY-MON-DD HH24:MI:SS.FF3';
-'''
-# use milliseconds
-
-def exec_sql_one_row(qry, user, passwd, sid, host):
-    return exec_sql(qry, user, passwd, sid, host, True)[0]
-
-def exec_sql_split_results(result_line):
-    dbg_print("result_line = %s" % result_line)
-    # validate to see if there are errors
-    err_pattern = re.compile("ORA-\d+|SP2-\d+")
-    is_err=err_pattern.search(result_line)
-    if is_err: return [["DBERROR","|".join([r.lstrip() for r in result_line.split("\n") if r != ""])]]
-    else: return [[c.strip() for c in r.split(",")] for r in result_line.split("\n") if r != ""]
-
-def exec_sql(qry, user, passwd, sid, host, do_split=False):
-    ''' returns an list of results '''
-    dbg_print("qry = %s" % (qry)) 
-    sqlplus_input="%s \n %s; \n exit \n" % (sqlplus_heading, qry)
-    #(user, passwd, sid, host) = tuple(area_conn_info[options.area])
-    dbg_print("conn info= %s %s %s %s" % (user, passwd, sid, host))
-    sqlplus_call="%s -S %s" % (sqlplus_cmd, conn_str_template % (user, passwd, host, sid))
-    os.environ["NLS_LANG"]=".UTF8"  # handle utf8
-    ret_str = sys_pipe_call_2(sqlplus_input, sqlplus_call) 
-    dbg_print("ret_str = %s" % ret_str)
-    # may skip this
-    if do_split: return exec_sql_split_results(ret_str)
-    else: return ret_str
-
-def parse_db_conf_file(db_config_file, db_src_ids_str=""):
-    global db_conn_user_id, db_sid, db_host, db_conn_user_passwd, db_src_info, db_src_ids
-    db_src_info={}
-    db_sources=json.load(open(db_config_file))
-    uri = db_sources["uri"]
-    db_conn_user_id = (uri.split("/")[0]).split(":")[-1]
-    db_conn_user_passwd = (uri.split("@")[0]).split("/")[-1]
-    db_host= (uri.split("@")[1]).split(":")[0]
-    tmp = uri.split("@")[1]
-    if tmp.find("/") != -1: db_sid = tmp.split("/")[-1]
-    else: db_sid = tmp.split(":")[-1]
-    dbg_print("db_conn_user_id = %s, db_conn_user_passwd = %s, db_host = %s, db_sid = %s" % (db_conn_user_id, db_conn_user_passwd, db_host, db_sid))
-
-    schema_registry_dir=os.path.join(get_view_root(),"schemas_registry")
-    schema_registry_list=os.listdir(schema_registry_dir)
-    schema_registry_list.sort()
-    sources={}
-    for src in db_sources["sources"]: sources[src["id"]]=src
-    if db_src_ids_str: 
-      if db_src_ids_str=="all": db_src_ids=sources.keys()
-      else: db_src_ids = [int(x) for x in db_src_ids_str.split(",")]
-    else: db_src_ids=[]
-    for src_id in db_src_ids:
-      if src_id not in sources: 
-        my_error("source id %s not in config file %s. Available source ids are %s" % (src_id, db_config_file, sources.keys()))
-      src_info = sources[src_id]
-      src_name = src_info["name"].split(".")[-1]
-      db_avro_file_path = os.path.join(schema_registry_dir,[x for x in schema_registry_list if re.search("%s.*avsc" % src_name,x)][-1])
-      if not os.path.exists(db_avro_file_path): my_error("Schema file %s does not exist" % db_avro_file_path)
-      db_user_id = db_conn_user_id
-      src_uri = src_info["uri"]
-      if src_uri.find(".") != -1: db_user_id = src_uri.split(".")[0]
-      db_src_info[src_id] = {"src_name":src_name,"db_user_id":db_user_id, "db_avro_file_path":db_avro_file_path, "uri":src_uri}
-      dbg_print("db_src_info for src_id %s = %s" % (src_id, db_src_info[src_id]))
-    return db_conn_user_id, db_sid, db_host, db_conn_user_passwd, db_src_info, db_src_ids
-
-''' mysql related stuff '''
-mysql_cmd="mysql"
-def get_mysql_call(dbname, user, passwd, host):
-    conn_str = "%s -s " % mysql_cmd
-    if dbname: conn_str += "-D%s " % dbname
-    if user: conn_str += "-u%s " % user
-    if passwd: conn_str += "-p%s " % passwd
-    if host: conn_str += "-P%s " % host
-    return conn_str
-
-def mysql_exec_sql_one_row(qry, dbname=None, user=None, passwd=None, host=None):
-    ret = mysql_exec_sql(qry, dbname, user, passwd, host, True)
-    dbg_print("ret = %s" % ret)
-    if ret: return ret[0]
-    else: return None
-
-def mysql_exec_sql_split_results(result_line):
-    dbg_print("result_line = %s" % result_line)
-    # validate to see if there are errors
-    err_pattern = re.compile("ERROR \d+")
-    is_err=err_pattern.search(result_line)
-    if is_err: return [["DBERROR","|".join([r.lstrip() for r in result_line.split("\n") if r != ""])]]
-    else: return [[c.strip() for c in r.split("\t")] for r in result_line.split("\n") if r != ""]
-
-def mysql_exec_sql(qry, dbname=None, user=None, passwd=None, host=None, do_split=False):
-    ''' returns an list of results '''
-    dbg_print("qry = %s" % (qry)) 
-    mysql_input=" %s; \n exit \n" % (qry)
-    dbg_print("conn info= %s %s %s %s" % (dbname, user, passwd, host))
-    mysql_call=get_mysql_call(dbname, user, passwd, host)
-    dbg_print("mysql_call= %s" % (mysql_call))
-    #if not re.search("select",qry): return None # test only, select only
-    ret_str = sys_pipe_call_21(mysql_input, mysql_call)   # also returns the error
-    dbg_print("ret_str = %s" % ret_str)
-    # may skip this
-    if do_split: return mysql_exec_sql_split_results(ret_str)
-    else: return ret_str
-
-def get_copy_name(input_file_name):
-    input_f = os.path.basename(input_file_name)
-    input_f_split =  input_f.split(".")
-    append_idx = min(len(input_f_split)-2,0)
-    input_f_split[append_idx] += time.strftime('_%y%m%d_%H%M%S')
-    new_file= os.path.join(work_dir, ".".join(input_f_split))
-    return new_file
-
-def save_copy(input_files):
-    for i in range(len(input_files)):
-      new_file= get_copy_name(input_files[i])
-      dbg_print("Copy %s to %s" % (input_files[i], new_file))
-      if not remote_run:
-        shutil.copy(input_files[i], new_file)
-      else:
-        remote_run_copy(input_files[i], new_file, i)
-      input_files[i] = new_file
-    return input_files
-
-def save_copy_one(input_file):
-    ''' wrapper for save copy '''
-    input_files=[input_file]
-    save_copy(input_files)
-    return input_files[0]
-
-def db_config_detect_host_nomral_open(db_host, db_port, db_user=None, passwd=None, db_sid=None):
-    return isOpen(db_host, db_port)
-
-def db_config_detect_host_oracle_open(db_host, db_port, db_user, passwd, db_sid):
-    ret = exec_sql("exit", db_user, passwd, db_sid, db_host, do_split=False)
-    return not re.search("ERROR:",ret)
-
-def db_config_detect_host(db_host, db_port=default_db_port, detect_oracle=False, db_user=None, passwd=None, db_sid=None):
-    detect_func = detect_oracle and db_config_detect_host_oracle_open or db_config_detect_host_nomral_open
-    if detect_func(db_host, db_port, db_user, passwd, db_sid): return (db_host, db_port)  # OK
-    possible_hosts = ["localhost"]        # try local host
-    found_host = False
-    for new_db_host in possible_hosts:
-      if not detect_func(new_db_host, db_port, db_user, passwd, db_sid): continue
-      found_host = True
-      break
-    if not found_host: my_error("db server on %s and possible hosts %s port %s is down" % (db_host, possible_hosts, db_port))
-    print "Substitue the host %s with %s" % (db_host, new_db_host)
-    return (new_db_host, db_port)
-
-def db_config_change(db_relay_config):
-    ''' if there is a config file, handle the case that db is on on local host '''
-    (db_conn_user_id, db_sid, db_host, db_conn_user_passwd, db_src_info, db_src_ids) = parse_db_conf_file(db_relay_config)
-    (new_db_host, new_db_port) = db_config_detect_host(db_host, detect_oracle=True, db_user=db_conn_user_id, passwd=db_conn_user_passwd, db_sid=db_sid)
-    if new_db_host == db_host: return db_relay_config
-    new_db_config_file = get_copy_name(db_relay_config)
-    print "New config file is %s" % (new_db_config_file)
-    host_port_re = re.compile("@%s:%s:" % (db_host, default_db_port))
-    new_host_port = "@%s:%s:" % (new_db_host, new_db_port)
-    new_db_config_f = open(new_db_config_file, "w")
-    for line in open(db_relay_config):
-      new_db_config_f.write("%s" % host_port_re.sub(new_host_port, line))
-    return new_db_config_file
-
-# get a certain field in url response
-def http_get_field(url_template, host, port, field_name):
-    out = send_url(url_template % (host, port)).split("\n")[1]
-    dbg_print("out = %s" % out)
-    if re.search("Exception:", out): my_error("Exception getting: %s" % out)
-    # work around the invalid json, with out the quote, DDS-379
-    out=quote_json(out)
-    field_value = json.loads(out)
-    return field_value[field_name]
-
-# wait util
-# Classes
-class RetCode:
-  OK=0
-  ERROR=1
-  TIMEOUT=2
-  DIFF=3
-  ZERO_SIZE=4
-
-# wait utility
-def wait_for_condition_1(cond_func, timeout=60, sleep_interval = 0.1):
-  ''' wait for a certain cond. cond could be a function. 
-     This cannot be in utility. Because it needs to see the cond function '''
-  #dbg_print("cond = %s" % cond)
-  if sys_call_debug: return RetCode.OK
-  sleep_cnt = 0
-  ret = RetCode.TIMEOUT
-  while (sleep_cnt * sleep_interval < timeout):
-    dbg_print("attempt %s " % sleep_cnt)
-    if cond_func():
-      dbg_print("success")
-      ret = RetCode.OK
-      break
-    time.sleep(sleep_interval)
-    sleep_cnt += 1
-  return ret
-
-def wait_for_port(host, port):
-  def test_port_not_open():
-    return not isOpen(host, port) 
-  ret = wait_for_condition_1(test_port_not_open, timeout=20, sleep_interval=2)
-  if ret != RetCode.OK:
-    print "ERROR: host:port %s%s is in use" % (host, port)
-  return ret
-
-# find child pid contains java
-# works with linux
-def find_java_pid(this_pid):
-  cmd = sys_pipe_call("ps -o command --pid %s --noheader" % this_pid).split("\n")[0]
-  dbg_print("cmd = %s" % cmd)
-  cmd_split = cmd.split()
-  if len(cmd_split) ==0: return None
-  if re.search("java$", cmd_split[0]): return this_pid
-  child_processes = [x for x in sys_pipe_call("ps -o pid --ppid %s --noheader" % this_pid).split("\n") if x!=""]
-  for child_process in child_processes:
-    dbg_print("child_process = %s" % child_process)
-    child_pid = child_process.split()[0]
-    java_pid = find_java_pid(child_pid)
-    if java_pid: return java_pid
-  return None
-
-# pid info
-PROCESS_INFO_FILE_NAME="process_info.json"
-def set_work_dir(dir):
-  global work_dir
-  work_dir = dir
-
-def get_process_info_file(dir=None):
-  if not dir: dir = get_work_dir()
-  return os.path.join(dir,PROCESS_INFO_FILE_NAME)  # need to do this after init
-
-def validate_process_info_file():
-  process_info_file = get_process_info_file()
-  if os.path.exists(process_info_file): 
-    return process_info_file
-  else: my_error("Process info file %s for test '%s' does not exist. Please run setup first or give correct test name." % (process_info_file, get_test_name()))
-
-def get_process_info(process_info_file=None):
-  if not process_info_file: process_info_file = get_process_info_file()
-  if file_exists(process_info_file): 
-    try: 
-      process_info = json.load(open(process_info_file))
-    except ValueError: 
-      my_error("file %s does not have a valid json. Please remove it." % process_info_file)
-  else: 
-    my_warning("process_info_file %s does not exist" % process_info_file)
-    process_info = {}
-  return process_info
-
-def process_info_get_pid_from_log(log_file):
-  log_file_handle = open(log_file)
-  for i in range(10):
-    line = log_file_handle.readline()
-    m = re.search("^([0-9]+)$", line)
-    if i==0 and m:
-      return m.group(1)
-    m = re.search("## java process pid = ([0-9]+)", line)
-    if m:
-      return m.group(1)
-  my_error("cannot find pid in log_file %s" % log_file)
-
-def get_process_info_key(component,id):
-  return "%s:%s" % (component, id)
-
-def split_process_info_key(key):
-  ''' split into component and id '''
-  return tuple(key.split(":"))
-
-def save_process_info(component, id, port, log_file, host=None, admin_port=None, mysql_port=None):    
-  # port can be None
-  process_info = get_process_info()
-  key = get_process_info_key (component, id)
-  process_info[key]={}
-  process_info[key]["host"] = host !=None and host or host_name_global
-  process_info[key]["port"] = port 
-  process_info[key]["view_root"] = get_view_root()
-  if not re.search("^mysql", component):
-   process_info[key]["port_byteman"] = port and int(port) + 1000 or random.randint(16000,17000)
-  process_info[key]["pid"] =  process_info_get_pid_from_log(log_file)
-  if admin_port: process_info[key]["port_admin"] =  admin_port
-  if mysql_port: process_info[key]["port_mysql"] =  mysql_port
-  process_info_file = get_process_info_file()
-  process_info_fs = open(process_info_file, "w")
-  json.dump(process_info, process_info_fs ,sort_keys=True,indent=4)
-  process_info_fs.close()
-
-  if key not in process_info: my_error("key %s is not in process_info" % (key, process_info))
-  return process_info
-
-def list_process():
-  process_info_file = validate_process_info_file()
-  print "=== Process info for test '%s': %s ===" % (get_test_name(), process_info_file)
-  print "".join(open(process_info_file).readlines())
-
-def get_down_process():
-  process_info_file = validate_process_info_file()
-  process_info = get_process_info()
-  down_process={}
-  for key in process_info:
-    pid = process_info[key]["pid"]
-    dbg_print("checking (%s:%s)" % (key,pid))
-    if not process_exist(pid,remote_run and process_info[key]["host"] or None):
-      down_process[key] = pid
-  return down_process
-
-def check_process_up():
-  down_process = get_down_process()
-  if down_process:
-    for key in down_process:
-      print  "(%s:%s) is down" % (key, down_process[key])
-    return False
-  else:
-    return True  # all the processes are up
-
-# == remote run
-def get_remote_host(component, id="1", field="host"):
-  if not is_remote_run(): return "localhost"
-  key = get_process_info_key(component,id)
-  if key not in get_remote_run_config():
-    my_error("Cannot find remote host for %s in remote_run_config" % (key))
-  return get_remote_run_config()[key][field]
-
-def get_remote_view_root(component, id="1"):
-  get_remote_host(component,id,"view_root")
-
-def need_remote_run(process_info):
-  for k, v in process_info.items():
-    if not re.search("^mysql",k):  # fiter out mysql
-      if v["host"].split(".")[0] != host_name_global.split(".")[0]:
-         return True   # need remote run 
-  return False
-
-metabuilder_file=".metabuilder.properties"
-def get_bldfwk_dir():
-    bldfwk_file = "%s/%s" % (get_view_root(), metabuilder_file)
-    bldfwk_dir = None
-    if not os.path.exists(bldfwk_file): return None
-    for line in open(bldfwk_file):
-      m = re.search("(bldshared-[0-9]+)",line)
-      if m: 
-        bldfwk_dir= m.group(1)
-        break
-    print "Warning. Cannot find bldshared-dir, run ant -f bootstrap.xml"
-    #assert bldfwk_dir, "Cannot find bldshared-dir, run ant -f bootstrap.xml"
-    return bldfwk_dir
-
-rsync_path="/usr//bin/rsync"
-remote_deploy_cmd_template='''rsync -avz  --exclude=.svn --exclude=var --exclude=test-output --exclude=lucene-indexes --exclude=mmap --exclude=mmappedBuffer --exclude=eventLog --exclude=cp_com_linkedin_events --exclude=dist --rsync-path=%s %s/ %s:%s'''
-remote_deploy_bldcmd_template='''rsync -avz  --exclude=.svn --rsync-path=%s %s %s:%s'''
-remote_deploy_change_blddir_cmd_template='''ssh %s "sed 's/%s/%s/' %s > %s_tmp; mv -f %s_tmp %s" '''
-def do_remote_deploy(reset=False):
-    global rsync_path
-    if not remote_run:  # check
-      my_error("Remote config file is not set. use --remote_config_file or set REMOTE_CONFIG_FILE!")
-    bldfwk_dir = get_bldfwk_dir()
-    view_root = get_view_root()
-    already_copied={}
-    for section in remote_run_config:
-      remote_host = remote_run_config[section]["host"]
-      remote_view_root = remote_run_config[section]["view_root"]
-      key = "%s:%s" % (remote_host, remote_view_root)
-      if key in already_copied: 
-        print "Already copied. Skip: host: %s, view_root: %s" % (remote_host, remote_view_root)
-        continue
-      else: already_copied[key]=1
-      if "rsync_path" in remote_run_config[section]: rsync_path=remote_run_config[section]["rsync_path"]
-      remote_view_root_parent = os.path.dirname(remote_view_root)
-      if reset: sys_call("ssh %s rm -rf %s" % (remote_host, remote_view_root))
-      sys_call("ssh %s mkdir -p %s" % (remote_host, remote_view_root))
-      cmd = remote_deploy_cmd_template % (rsync_path, view_root, remote_host, remote_view_root)
-      sys_call(cmd) 
-      if bldfwk_dir:
-        cmd = remote_deploy_bldcmd_template % (rsync_path, os.path.join(os.path.dirname(view_root),bldfwk_dir), remote_host, remote_view_root_parent)
-        sys_call(cmd) 
-        # replace the metabuilder, TODO, escape the /
-        metabuilder_full_path = os.path.join(remote_view_root, metabuilder_file)
-        cmd = remote_deploy_change_blddir_cmd_template % (remote_host, view_root.replace("/","\/"), remote_view_root.replace("/","\/"), metabuilder_full_path,  metabuilder_full_path,  metabuilder_full_path,  metabuilder_full_path)
-        sys_call(cmd) 
-      # copy gradle cache
-      gradle_cache_template = "%s/.gradle/cache"
-      gradle_cache_dir = gradle_cache_template % os.environ["HOME"] 
-      if remote_host.split(".")[0] != host_name_global.split(".")[0] and gradle_cache_dir:
-        ret = sys_pipe_call("ssh %s pwd" % (remote_host))
-        remote_home = ret.split("\n")[0] 
-        ret = sys_call("ssh %s mkdir -p %s " % (remote_host, (gradle_cache_template % remote_home)))
-        cmd = "rsync -avz --rsync-path=%s %s/ %s:%s" % (rsync_path, gradle_cache_dir , remote_host, gradle_cache_template % remote_home)
-        sys_call(cmd)
-    return RetCode.OK
-
-def get_remote_host_viewroot_path():
-  ''' returns host, view_root, rsync path for each pair of uniq (host,view_root) '''
-  host_viewroot_dict = {}
-  for component in remote_run_config:
-    remote_host = remote_run_config[component]["host"]
-    remote_view_root = remote_run_config[component]["view_root"]
-    combined_key = "%s,%s" % (remote_host, remote_view_root)
-    if remote_host.split(".")[0] != host_name_global.split(".")[0] and not combined_key in host_viewroot_dict:
-      host_viewroot_dict[combined_key] =  "rsync_path" in remote_run_config[component] and remote_run_config[component]["rsync_path"] or rsync_path
-  keys = host_viewroot_dict.keys()
-  ret = []
-  for k in keys: 
-    l = k.split(",")
-    l.append(host_viewroot_dict[k])
-    ret.append(tuple(l))
-  return ret
-
-#====End of Utilties============
-

http://git-wip-us.apache.org/repos/asf/incubator-helix/blob/4ebc0fad/helix-agent/src/main/scripts/integration-test/script/utility.pyc
----------------------------------------------------------------------
diff --git a/helix-agent/src/main/scripts/integration-test/script/utility.pyc b/helix-agent/src/main/scripts/integration-test/script/utility.pyc
deleted file mode 100644
index e89aeb8..0000000
Binary files a/helix-agent/src/main/scripts/integration-test/script/utility.pyc and /dev/null differ

http://git-wip-us.apache.org/repos/asf/incubator-helix/blob/4ebc0fad/helix-agent/src/main/scripts/integration-test/setup_env.inc
----------------------------------------------------------------------
diff --git a/helix-agent/src/main/scripts/integration-test/setup_env.inc b/helix-agent/src/main/scripts/integration-test/setup_env.inc
deleted file mode 100644
index 95d0f02..0000000
--- a/helix-agent/src/main/scripts/integration-test/setup_env.inc
+++ /dev/null
@@ -1,37 +0,0 @@
-# set up env, figure out the path
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-ROOT_DIR=../../../
-PATH_PREFIX_DIR=../../../integration-test
-SCRIPT_DIR=$PATH_PREFIX_DIR/script/
-CONFIG_DIR=integration-test/config
-VAR_DIR=$PATH_PREFIX_DIR/var
-LOG_DIR=$VAR_DIR/log
-WORK_DIR=$WORK_DIR/work
-WORK_DIR_FROM_ROOT=integration-test/var/work
-LOG_DIR_FROM_ROOT=integration-test/var/log
-DATA_DIR=$PATH_PREFIX_DIR/data
-# solaris tail
-TAIL_PATH=/usr/xpg4/bin/tail     
-if [ ! -f $TAIL_PATH ]; then
-  TAIL_PATH=tail
-fi
-# over all stats
-all_stat=0

http://git-wip-us.apache.org/repos/asf/incubator-helix/blob/4ebc0fad/helix-agent/src/main/scripts/integration-test/testcases/foo_test.py
----------------------------------------------------------------------
diff --git a/helix-agent/src/main/scripts/integration-test/testcases/foo_test.py b/helix-agent/src/main/scripts/integration-test/testcases/foo_test.py
deleted file mode 100755
index d9c24aa..0000000
--- a/helix-agent/src/main/scripts/integration-test/testcases/foo_test.py
+++ /dev/null
@@ -1,38 +0,0 @@
-#!/usr/bin/python
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file 
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file 
-# to you under the Apache License, Version 2.0 (the 
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-import os, time, sys
-
-this_file_full_path=os.path.abspath(__file__)
-this_file_dirname=os.path.dirname(this_file_full_path)
-
-# write pid for monitor
-pid=os.getpid()
-
-file=open(os.path.join(this_file_dirname,"../var/log/default/foo_TestDB0_0_pid.txt"), "wb")
-file.write("%s\n" % os.getpid())
-file.close()
-
-# this output tells dds_driver.py to return
-print "start"
-sys.stdout.flush()
-
-print "byebye"
-

http://git-wip-us.apache.org/repos/asf/incubator-helix/blob/4ebc0fad/helix-agent/src/main/scripts/integration-test/testcases/report_pass_fail.inc
----------------------------------------------------------------------
diff --git a/helix-agent/src/main/scripts/integration-test/testcases/report_pass_fail.inc b/helix-agent/src/main/scripts/integration-test/testcases/report_pass_fail.inc
deleted file mode 100644
index e396f58..0000000
--- a/helix-agent/src/main/scripts/integration-test/testcases/report_pass_fail.inc
+++ /dev/null
@@ -1,40 +0,0 @@
-# Report pass or fail
-#   Input:
-#      stat_txt, what to display to report 
-# output $?
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-stat=$?
-num_steps=$(($num_steps+1))
-if [ $stat == 0 ]; then
-  echo $stat_txt SUCCESS
-else
-  echo $stat_txt FAIL
- all_stat=$(($all_stat+1))
-fi
-if [ $final_report ]; then
-   if [ $all_stat -gt 0 ]; then
-     echo test FAIL. There are $all_stat failing steps.
-   else
-     if [ $num_steps -gt 1 ]; then
-       echo ALL $num_steps steps for test SUCCESS.
-     fi
-   fi
-fi
-

http://git-wip-us.apache.org/repos/asf/incubator-helix/blob/4ebc0fad/helix-agent/src/main/scripts/integration-test/testcases/setup_env.inc
----------------------------------------------------------------------
diff --git a/helix-agent/src/main/scripts/integration-test/testcases/setup_env.inc b/helix-agent/src/main/scripts/integration-test/testcases/setup_env.inc
deleted file mode 100644
index bb2e104..0000000
--- a/helix-agent/src/main/scripts/integration-test/testcases/setup_env.inc
+++ /dev/null
@@ -1,60 +0,0 @@
-# set up env, figure out the path
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-#ROOT_DIR=../../../
-export PATH_PREFIX_DIR=../
-export SCRIPT_DIR=$PATH_PREFIX_DIR/script/
-export VAR_DIR=$PATH_PREFIX_DIR/var
-export DATA_DIR=$PATH_PREFIX_DIR/data
-
-export SCRIPT_DIR=../script
-export CONFIG_DIR=integration-test/config
-#export VAR_DIR=../var
-
-DEFAULT_TEST_NAME=`basename $0`
-
-if [ -z "${TEST_NAME}" -o "${TEST_NAME}" = "-bash" ] ; then
-  TEST_NAME=${DEFAULT_TEST_NAME}
-fi
-
-export TEST_NAME
-export VAR_WORK_DIR=../var/work
-export VAR_WORK_DIR_FROM_ROOT=intergration-test/var/work
-
-if [ -z "${TEST_NAME}" ] ; then
-  export LOG_DIR=../var/log
-  export WORK_DIR=../var/work
-  export WORK_DIR_FROM_ROOT=integration-test/var/work
-  export LOG_DIR_FROM_ROOT=integration-test/var/log
-else
-  export LOG_DIR=../var/log/${TEST_NAME}
-  export WORK_DIR=../var/work/${TEST_NAME}
-  export WORK_DIR_FROM_ROOT=integration-test/var/work/${TEST_NAME}
-  export LOG_DIR_FROM_ROOT=integration-test/var/log/${TEST_NAME}
-fi
-#export DATA_DIR=../data
-export VIEW_ROOT=`cd ../../; echo $PWD`
-# solaris tail
-export TAIL_PATH=/usr/xpg4/bin/tail
-if [ ! -f $TAIL_PATH ]; then
-  export TAIL_PATH=tail
-fi
-# over all stats
-all_stat=0

http://git-wip-us.apache.org/repos/asf/incubator-helix/blob/4ebc0fad/helix-agent/src/test/java/org/apache/helix/agent/TestHelixAgent.java
----------------------------------------------------------------------
diff --git a/helix-agent/src/test/java/org/apache/helix/agent/TestHelixAgent.java b/helix-agent/src/test/java/org/apache/helix/agent/TestHelixAgent.java
index 6f5ad6c..daad645 100644
--- a/helix-agent/src/test/java/org/apache/helix/agent/TestHelixAgent.java
+++ b/helix-agent/src/test/java/org/apache/helix/agent/TestHelixAgent.java
@@ -40,7 +40,8 @@ import org.testng.annotations.Test;
 
 public class TestHelixAgent extends ZkUnitTestBase {
   
-  @Test
+  // disable this test
+  // @Test
   public void test() throws Exception {
     String className = TestHelper.getTestClassName();
     String methodName = TestHelper.getTestMethodName();


[2/3] remove test scripts for helix-agent. disable helix-agent

Posted by zz...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-helix/blob/4ebc0fad/helix-agent/src/main/scripts/integration-test/script/pexpect.py
----------------------------------------------------------------------
diff --git a/helix-agent/src/main/scripts/integration-test/script/pexpect.py b/helix-agent/src/main/scripts/integration-test/script/pexpect.py
deleted file mode 100644
index 6516cda..0000000
--- a/helix-agent/src/main/scripts/integration-test/script/pexpect.py
+++ /dev/null
@@ -1,1864 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-"""Pexpect is a Python module for spawning child applications and controlling
-them automatically. Pexpect can be used for automating interactive applications
-such as ssh, ftp, passwd, telnet, etc. It can be used to a automate setup
-scripts for duplicating software package installations on different servers. It
-can be used for automated software testing. Pexpect is in the spirit of Don
-Libes' Expect, but Pexpect is pure Python. Other Expect-like modules for Python
-require TCL and Expect or require C extensions to be compiled. Pexpect does not
-use C, Expect, or TCL extensions. It should work on any platform that supports
-the standard Python pty module. The Pexpect interface focuses on ease of use so
-that simple tasks are easy.
-
-There are two main interfaces to Pexpect -- the function, run() and the class,
-spawn. You can call the run() function to execute a command and return the
-output. This is a handy replacement for os.system().
-
-For example::
-
-    pexpect.run('ls -la')
-
-The more powerful interface is the spawn class. You can use this to spawn an
-external child command and then interact with the child by sending lines and
-expecting responses.
-
-For example::
-
-    child = pexpect.spawn('scp foo myname@host.example.com:.')
-    child.expect ('Password:')
-    child.sendline (mypassword)
-
-This works even for commands that ask for passwords or other input outside of
-the normal stdio streams.
-
-Credits: Noah Spurrier, Richard Holden, Marco Molteni, Kimberley Burchett,
-Robert Stone, Hartmut Goebel, Chad Schroeder, Erick Tryzelaar, Dave Kirby, Ids
-vander Molen, George Todd, Noel Taylor, Nicolas D. Cesar, Alexander Gattin,
-Geoffrey Marshall, Francisco Lourenco, Glen Mabey, Karthik Gurusamy, Fernando
-Perez, Corey Minyard, Jon Cohen, Guillaume Chazarain, Andrew Ryan, Nick
-Craig-Wood, Andrew Stone, Jorgen Grahn (Let me know if I forgot anyone.)
-
-Free, open source, and all that good stuff.
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the "Software"), to deal in
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
-of the Software, and to permit persons to whom the Software is furnished to do
-so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
-Pexpect Copyright (c) 2008 Noah Spurrier
-http://pexpect.sourceforge.net/
-
-$Id: pexpect.py 507 2007-12-27 02:40:52Z noah $
-"""
-
-try:
-    import os, sys, time
-    import select
-    import string
-    import re
-    import struct
-    import resource
-    import types
-    import pty
-    import tty
-    import termios
-    import fcntl
-    import errno
-    import traceback
-    import signal
-except ImportError, e:
-    raise ImportError (str(e) + """
-
-A critical module was not found. Probably this operating system does not
-support it. Pexpect is intended for UNIX-like operating systems.""")
-
-__version__ = '2.3'
-__revision__ = '$Revision: 399 $'
-__all__ = ['ExceptionPexpect', 'EOF', 'TIMEOUT', 'spawn', 'run', 'which',
-    'split_command_line', '__version__', '__revision__']
-
-# Exception classes used by this module.
-class ExceptionPexpect(Exception):
-
-    """Base class for all exceptions raised by this module.
-    """
-
-    def __init__(self, value):
-
-        self.value = value
-
-    def __str__(self):
-
-        return str(self.value)
-
-    def get_trace(self):
-
-        """This returns an abbreviated stack trace with lines that only concern
-        the caller. In other words, the stack trace inside the Pexpect module
-        is not included. """
-
-        tblist = traceback.extract_tb(sys.exc_info()[2])
-        #tblist = filter(self.__filter_not_pexpect, tblist)
-        tblist = [item for item in tblist if self.__filter_not_pexpect(item)]
-        tblist = traceback.format_list(tblist)
-        return ''.join(tblist)
-
-    def __filter_not_pexpect(self, trace_list_item):
-
-        """This returns True if list item 0 the string 'pexpect.py' in it. """
-
-        if trace_list_item[0].find('pexpect.py') == -1:
-            return True
-        else:
-            return False
-
-class EOF(ExceptionPexpect):
-
-    """Raised when EOF is read from a child. This usually means the child has exited."""
-
-class TIMEOUT(ExceptionPexpect):
-
-    """Raised when a read time exceeds the timeout. """
-
-##class TIMEOUT_PATTERN(TIMEOUT):
-##    """Raised when the pattern match time exceeds the timeout.
-##    This is different than a read TIMEOUT because the child process may
-##    give output, thus never give a TIMEOUT, but the output
-##    may never match a pattern.
-##    """
-##class MAXBUFFER(ExceptionPexpect):
-##    """Raised when a scan buffer fills before matching an expected pattern."""
-
-def run (command, timeout=-1, withexitstatus=False, events=None, extra_args=None, logfile=None, cwd=None, env=None):
-
-    """
-    This function runs the given command; waits for it to finish; then
-    returns all output as a string. STDERR is included in output. If the full
-    path to the command is not given then the path is searched.
-
-    Note that lines are terminated by CR/LF (\\r\\n) combination even on
-    UNIX-like systems because this is the standard for pseudo ttys. If you set
-    'withexitstatus' to true, then run will return a tuple of (command_output,
-    exitstatus). If 'withexitstatus' is false then this returns just
-    command_output.
-
-    The run() function can often be used instead of creating a spawn instance.
-    For example, the following code uses spawn::
-
-        from pexpect import *
-        child = spawn('scp foo myname@host.example.com:.')
-        child.expect ('(?i)password')
-        child.sendline (mypassword)
-
-    The previous code can be replace with the following::
-
-        from pexpect import *
-        run ('scp foo myname@host.example.com:.', events={'(?i)password': mypassword})
-
-    Examples
-    ========
-
-    Start the apache daemon on the local machine::
-
-        from pexpect import *
-        run ("/usr/local/apache/bin/apachectl start")
-
-    Check in a file using SVN::
-
-        from pexpect import *
-        run ("svn ci -m 'automatic commit' my_file.py")
-
-    Run a command and capture exit status::
-
-        from pexpect import *
-        (command_output, exitstatus) = run ('ls -l /bin', withexitstatus=1)
-
-    Tricky Examples
-    ===============
-
-    The following will run SSH and execute 'ls -l' on the remote machine. The
-    password 'secret' will be sent if the '(?i)password' pattern is ever seen::
-
-        run ("ssh username@machine.example.com 'ls -l'", events={'(?i)password':'secret\\n'})
-
-    This will start mencoder to rip a video from DVD. This will also display
-    progress ticks every 5 seconds as it runs. For example::
-
-        from pexpect import *
-        def print_ticks(d):
-            print d['event_count'],
-        run ("mencoder dvd://1 -o video.avi -oac copy -ovc copy", events={TIMEOUT:print_ticks}, timeout=5)
-
-    The 'events' argument should be a dictionary of patterns and responses.
-    Whenever one of the patterns is seen in the command out run() will send the
-    associated response string. Note that you should put newlines in your
-    string if Enter is necessary. The responses may also contain callback
-    functions. Any callback is function that takes a dictionary as an argument.
-    The dictionary contains all the locals from the run() function, so you can
-    access the child spawn object or any other variable defined in run()
-    (event_count, child, and extra_args are the most useful). A callback may
-    return True to stop the current run process otherwise run() continues until
-    the next event. A callback may also return a string which will be sent to
-    the child. 'extra_args' is not used by directly run(). It provides a way to
-    pass data to a callback function through run() through the locals
-    dictionary passed to a callback. """
-
-    if timeout == -1:
-        child = spawn(command, maxread=2000, logfile=logfile, cwd=cwd, env=env)
-    else:
-        child = spawn(command, timeout=timeout, maxread=2000, logfile=logfile, cwd=cwd, env=env)
-    if events is not None:
-        patterns = events.keys()
-        responses = events.values()
-    else:
-        patterns=None # We assume that EOF or TIMEOUT will save us.
-        responses=None
-    child_result_list = []
-    event_count = 0
-    while 1:
-        try:
-            index = child.expect (patterns)
-            if type(child.after) in types.StringTypes:
-                child_result_list.append(child.before + child.after)
-            else: # child.after may have been a TIMEOUT or EOF, so don't cat those.
-                child_result_list.append(child.before)
-            if type(responses[index]) in types.StringTypes:
-                child.send(responses[index])
-            elif type(responses[index]) is types.FunctionType:
-                callback_result = responses[index](locals())
-                sys.stdout.flush()
-                if type(callback_result) in types.StringTypes:
-                    child.send(callback_result)
-                elif callback_result:
-                    break
-            else:
-                raise TypeError ('The callback must be a string or function type.')
-            event_count = event_count + 1
-        except TIMEOUT, e:
-            child_result_list.append(child.before)
-            break
-        except EOF, e:
-            child_result_list.append(child.before)
-            break
-    child_result = ''.join(child_result_list)
-    if withexitstatus:
-        child.close()
-        return (child_result, child.exitstatus)
-    else:
-        return child_result
-
-class spawn (object):
-
-    """This is the main class interface for Pexpect. Use this class to start
-    and control child applications. """
-
-    def __init__(self, command, args=[], timeout=30, maxread=2000, searchwindowsize=None, logfile=None, cwd=None, env=None):
-
-        """This is the constructor. The command parameter may be a string that
-        includes a command and any arguments to the command. For example::
-
-            child = pexpect.spawn ('/usr/bin/ftp')
-            child = pexpect.spawn ('/usr/bin/ssh user@example.com')
-            child = pexpect.spawn ('ls -latr /tmp')
-
-        You may also construct it with a list of arguments like so::
-
-            child = pexpect.spawn ('/usr/bin/ftp', [])
-            child = pexpect.spawn ('/usr/bin/ssh', ['user@example.com'])
-            child = pexpect.spawn ('ls', ['-latr', '/tmp'])
-
-        After this the child application will be created and will be ready to
-        talk to. For normal use, see expect() and send() and sendline().
-
-        Remember that Pexpect does NOT interpret shell meta characters such as
-        redirect, pipe, or wild cards (>, |, or *). This is a common mistake.
-        If you want to run a command and pipe it through another command then
-        you must also start a shell. For example::
-
-            child = pexpect.spawn('/bin/bash -c "ls -l | grep LOG > log_list.txt"')
-            child.expect(pexpect.EOF)
-
-        The second form of spawn (where you pass a list of arguments) is useful
-        in situations where you wish to spawn a command and pass it its own
-        argument list. This can make syntax more clear. For example, the
-        following is equivalent to the previous example::
-
-            shell_cmd = 'ls -l | grep LOG > log_list.txt'
-            child = pexpect.spawn('/bin/bash', ['-c', shell_cmd])
-            child.expect(pexpect.EOF)
-
-        The maxread attribute sets the read buffer size. This is maximum number
-        of bytes that Pexpect will try to read from a TTY at one time. Setting
-        the maxread size to 1 will turn off buffering. Setting the maxread
-        value higher may help performance in cases where large amounts of
-        output are read back from the child. This feature is useful in
-        conjunction with searchwindowsize.
-
-        The searchwindowsize attribute sets the how far back in the incomming
-        seach buffer Pexpect will search for pattern matches. Every time
-        Pexpect reads some data from the child it will append the data to the
-        incomming buffer. The default is to search from the beginning of the
-        imcomming buffer each time new data is read from the child. But this is
-        very inefficient if you are running a command that generates a large
-        amount of data where you want to match The searchwindowsize does not
-        effect the size of the incomming data buffer. You will still have
-        access to the full buffer after expect() returns.
-
-        The logfile member turns on or off logging. All input and output will
-        be copied to the given file object. Set logfile to None to stop
-        logging. This is the default. Set logfile to sys.stdout to echo
-        everything to standard output. The logfile is flushed after each write.
-
-        Example log input and output to a file::
-
-            child = pexpect.spawn('some_command')
-            fout = file('mylog.txt','w')
-            child.logfile = fout
-
-        Example log to stdout::
-
-            child = pexpect.spawn('some_command')
-            child.logfile = sys.stdout
-
-        The logfile_read and logfile_send members can be used to separately log
-        the input from the child and output sent to the child. Sometimes you
-        don't want to see everything you write to the child. You only want to
-        log what the child sends back. For example::
-        
-            child = pexpect.spawn('some_command')
-            child.logfile_read = sys.stdout
-
-        To separately log output sent to the child use logfile_send::
-        
-            self.logfile_send = fout
-
-        The delaybeforesend helps overcome a weird behavior that many users
-        were experiencing. The typical problem was that a user would expect() a
-        "Password:" prompt and then immediately call sendline() to send the
-        password. The user would then see that their password was echoed back
-        to them. Passwords don't normally echo. The problem is caused by the
-        fact that most applications print out the "Password" prompt and then
-        turn off stdin echo, but if you send your password before the
-        application turned off echo, then you get your password echoed.
-        Normally this wouldn't be a problem when interacting with a human at a
-        real keyboard. If you introduce a slight delay just before writing then
-        this seems to clear up the problem. This was such a common problem for
-        many users that I decided that the default pexpect behavior should be
-        to sleep just before writing to the child application. 1/20th of a
-        second (50 ms) seems to be enough to clear up the problem. You can set
-        delaybeforesend to 0 to return to the old behavior. Most Linux machines
-        don't like this to be below 0.03. I don't know why.
-
-        Note that spawn is clever about finding commands on your path.
-        It uses the same logic that "which" uses to find executables.
-
-        If you wish to get the exit status of the child you must call the
-        close() method. The exit or signal status of the child will be stored
-        in self.exitstatus or self.signalstatus. If the child exited normally
-        then exitstatus will store the exit return code and signalstatus will
-        be None. If the child was terminated abnormally with a signal then
-        signalstatus will store the signal value and exitstatus will be None.
-        If you need more detail you can also read the self.status member which
-        stores the status returned by os.waitpid. You can interpret this using
-        os.WIFEXITED/os.WEXITSTATUS or os.WIFSIGNALED/os.TERMSIG. """
-
-        self.STDIN_FILENO = pty.STDIN_FILENO
-        self.STDOUT_FILENO = pty.STDOUT_FILENO
-        self.STDERR_FILENO = pty.STDERR_FILENO
-        self.stdin = sys.stdin
-        self.stdout = sys.stdout
-        self.stderr = sys.stderr
-
-        self.searcher = None
-        self.ignorecase = False
-        self.before = None
-        self.after = None
-        self.match = None
-        self.match_index = None
-        self.terminated = True
-        self.exitstatus = None
-        self.signalstatus = None
-        self.status = None # status returned by os.waitpid
-        self.flag_eof = False
-        self.pid = None
-        self.child_fd = -1 # initially closed
-        self.timeout = timeout
-        self.delimiter = EOF
-        self.logfile = logfile
-        self.logfile_read = None # input from child (read_nonblocking)
-        self.logfile_send = None # output to send (send, sendline)
-        self.maxread = maxread # max bytes to read at one time into buffer
-        self.buffer = '' # This is the read buffer. See maxread.
-        self.searchwindowsize = searchwindowsize # Anything before searchwindowsize point is preserved, but not searched.
-        # Most Linux machines don't like delaybeforesend to be below 0.03 (30 ms).
-        self.delaybeforesend = 0.05 # Sets sleep time used just before sending data to child. Time in seconds.
-        self.delayafterclose = 0.1 # Sets delay in close() method to allow kernel time to update process status. Time in seconds.
-        self.delayafterterminate = 0.1 # Sets delay in terminate() method to allow kernel time to update process status. Time in seconds.
-        self.softspace = False # File-like object.
-        self.name = '<' + repr(self) + '>' # File-like object.
-        self.encoding = None # File-like object.
-        self.closed = True # File-like object.
-        self.cwd = cwd
-        self.env = env
-        self.__irix_hack = (sys.platform.lower().find('irix')>=0) # This flags if we are running on irix
-        # Solaris uses internal __fork_pty(). All others use pty.fork().
-        if (sys.platform.lower().find('solaris')>=0) or (sys.platform.lower().find('sunos5')>=0):
-            self.use_native_pty_fork = False
-        else:
-            self.use_native_pty_fork = True
-
-
-        # allow dummy instances for subclasses that may not use command or args.
-        if command is None:
-            self.command = None
-            self.args = None
-            self.name = '<pexpect factory incomplete>'
-        else:
-            self._spawn (command, args)
-
-    def __del__(self):
-
-        """This makes sure that no system resources are left open. Python only
-        garbage collects Python objects. OS file descriptors are not Python
-        objects, so they must be handled explicitly. If the child file
-        descriptor was opened outside of this class (passed to the constructor)
-        then this does not close it. """
-
-        if not self.closed:
-            # It is possible for __del__ methods to execute during the
-            # teardown of the Python VM itself. Thus self.close() may
-            # trigger an exception because os.close may be None.
-            # -- Fernando Perez
-            try:
-                self.close()
-            except AttributeError:
-                pass
-
-    def __str__(self):
-
-        """This returns a human-readable string that represents the state of
-        the object. """
-
-        s = []
-        s.append(repr(self))
-        s.append('version: ' + __version__ + ' (' + __revision__ + ')')
-        s.append('command: ' + str(self.command))
-        s.append('args: ' + str(self.args))
-        s.append('searcher: ' + str(self.searcher))
-        s.append('buffer (last 100 chars): ' + str(self.buffer)[-100:])
-        s.append('before (last 100 chars): ' + str(self.before)[-100:])
-        s.append('after: ' + str(self.after))
-        s.append('match: ' + str(self.match))
-        s.append('match_index: ' + str(self.match_index))
-        s.append('exitstatus: ' + str(self.exitstatus))
-        s.append('flag_eof: ' + str(self.flag_eof))
-        s.append('pid: ' + str(self.pid))
-        s.append('child_fd: ' + str(self.child_fd))
-        s.append('closed: ' + str(self.closed))
-        s.append('timeout: ' + str(self.timeout))
-        s.append('delimiter: ' + str(self.delimiter))
-        s.append('logfile: ' + str(self.logfile))
-        s.append('logfile_read: ' + str(self.logfile_read))
-        s.append('logfile_send: ' + str(self.logfile_send))
-        s.append('maxread: ' + str(self.maxread))
-        s.append('ignorecase: ' + str(self.ignorecase))
-        s.append('searchwindowsize: ' + str(self.searchwindowsize))
-        s.append('delaybeforesend: ' + str(self.delaybeforesend))
-        s.append('delayafterclose: ' + str(self.delayafterclose))
-        s.append('delayafterterminate: ' + str(self.delayafterterminate))
-        return '\n'.join(s)
-
-    def _spawn(self,command,args=[]):
-
-        """This starts the given command in a child process. This does all the
-        fork/exec type of stuff for a pty. This is called by __init__. If args
-        is empty then command will be parsed (split on spaces) and args will be
-        set to parsed arguments. """
-
-        # The pid and child_fd of this object get set by this method.
-        # Note that it is difficult for this method to fail.
-        # You cannot detect if the child process cannot start.
-        # So the only way you can tell if the child process started
-        # or not is to try to read from the file descriptor. If you get
-        # EOF immediately then it means that the child is already dead.
-        # That may not necessarily be bad because you may haved spawned a child
-        # that performs some task; creates no stdout output; and then dies.
-
-        # If command is an int type then it may represent a file descriptor.
-        if type(command) == type(0):
-            raise ExceptionPexpect ('Command is an int type. If this is a file descriptor then maybe you want to use fdpexpect.fdspawn which takes an existing file descriptor instead of a command string.')
-
-        if type (args) != type([]):
-            raise TypeError ('The argument, args, must be a list.')
-
-        if args == []:
-            self.args = split_command_line(command)
-            self.command = self.args[0]
-        else:
-            self.args = args[:] # work with a copy
-            self.args.insert (0, command)
-            self.command = command
-
-        command_with_path = which(self.command)
-        if command_with_path is None:
-            raise ExceptionPexpect ('The command was not found or was not executable: %s.' % self.command)
-        self.command = command_with_path
-        self.args[0] = self.command
-
-        self.name = '<' + ' '.join (self.args) + '>'
-
-        assert self.pid is None, 'The pid member should be None.'
-        assert self.command is not None, 'The command member should not be None.'
-
-        if self.use_native_pty_fork:
-            try:
-                self.pid, self.child_fd = pty.fork()
-            except OSError, e:
-                raise ExceptionPexpect('Error! pty.fork() failed: ' + str(e))
-        else: # Use internal __fork_pty
-            self.pid, self.child_fd = self.__fork_pty()
-
-        if self.pid == 0: # Child
-            try:
-                self.child_fd = sys.stdout.fileno() # used by setwinsize()
-                self.setwinsize(24, 80)
-            except:
-                # Some platforms do not like setwinsize (Cygwin).
-                # This will cause problem when running applications that
-                # are very picky about window size.
-                # This is a serious limitation, but not a show stopper.
-                pass
-            # Do not allow child to inherit open file descriptors from parent.
-            max_fd = resource.getrlimit(resource.RLIMIT_NOFILE)[0]
-            for i in range (3, max_fd):
-                try:
-                    os.close (i)
-                except OSError:
-                    pass
-
-            # I don't know why this works, but ignoring SIGHUP fixes a
-            # problem when trying to start a Java daemon with sudo
-            # (specifically, Tomcat).
-            signal.signal(signal.SIGHUP, signal.SIG_IGN)
-
-            if self.cwd is not None:
-                os.chdir(self.cwd)
-            if self.env is None:
-                os.execv(self.command, self.args)
-            else:
-                os.execvpe(self.command, self.args, self.env)
-
-        # Parent
-        self.terminated = False
-        self.closed = False
-
-    def __fork_pty(self):
-
-        """This implements a substitute for the forkpty system call. This
-        should be more portable than the pty.fork() function. Specifically,
-        this should work on Solaris.
-
-        Modified 10.06.05 by Geoff Marshall: Implemented __fork_pty() method to
-        resolve the issue with Python's pty.fork() not supporting Solaris,
-        particularly ssh. Based on patch to posixmodule.c authored by Noah
-        Spurrier::
-
-            http://mail.python.org/pipermail/python-dev/2003-May/035281.html
-
-        """
-
-        parent_fd, child_fd = os.openpty()
-        if parent_fd < 0 or child_fd < 0:
-            raise ExceptionPexpect, "Error! Could not open pty with os.openpty()."
-
-        pid = os.fork()
-        if pid < 0:
-            raise ExceptionPexpect, "Error! Failed os.fork()."
-        elif pid == 0:
-            # Child.
-            os.close(parent_fd)
-            self.__pty_make_controlling_tty(child_fd)
-
-            os.dup2(child_fd, 0)
-            os.dup2(child_fd, 1)
-            os.dup2(child_fd, 2)
-
-            if child_fd > 2:
-                os.close(child_fd)
-        else:
-            # Parent.
-            os.close(child_fd)
-
-        return pid, parent_fd
-
-    def __pty_make_controlling_tty(self, tty_fd):
-
-        """This makes the pseudo-terminal the controlling tty. This should be
-        more portable than the pty.fork() function. Specifically, this should
-        work on Solaris. """
-
-        child_name = os.ttyname(tty_fd)
-
-        # Disconnect from controlling tty if still connected.
-        fd = os.open("/dev/tty", os.O_RDWR | os.O_NOCTTY);
-        if fd >= 0:
-            os.close(fd)
-
-        os.setsid()
-
-        # Verify we are disconnected from controlling tty
-        try:
-            fd = os.open("/dev/tty", os.O_RDWR | os.O_NOCTTY);
-            if fd >= 0:
-                os.close(fd)
-                raise ExceptionPexpect, "Error! We are not disconnected from a controlling tty."
-        except:
-            # Good! We are disconnected from a controlling tty.
-            pass
-
-        # Verify we can open child pty.
-        fd = os.open(child_name, os.O_RDWR);
-        if fd < 0:
-            raise ExceptionPexpect, "Error! Could not open child pty, " + child_name
-        else:
-            os.close(fd)
-
-        # Verify we now have a controlling tty.
-        fd = os.open("/dev/tty", os.O_WRONLY)
-        if fd < 0:
-            raise ExceptionPexpect, "Error! Could not open controlling tty, /dev/tty"
-        else:
-            os.close(fd)
-
-    def fileno (self):   # File-like object.
-
-        """This returns the file descriptor of the pty for the child.
-        """
-
-        return self.child_fd
-
-    def close (self, force=True):   # File-like object.
-
-        """This closes the connection with the child application. Note that
-        calling close() more than once is valid. This emulates standard Python
-        behavior with files. Set force to True if you want to make sure that
-        the child is terminated (SIGKILL is sent if the child ignores SIGHUP
-        and SIGINT). """
-
-        if not self.closed:
-            self.flush()
-            os.close (self.child_fd)
-            time.sleep(self.delayafterclose) # Give kernel time to update process status.
-            if self.isalive():
-                if not self.terminate(force):
-                    raise ExceptionPexpect ('close() could not terminate the child using terminate()')
-            self.child_fd = -1
-            self.closed = True
-            #self.pid = None
-
-    def flush (self):   # File-like object.
-
-        """This does nothing. It is here to support the interface for a
-        File-like object. """
-
-        pass
-
-    def isatty (self):   # File-like object.
-
-        """This returns True if the file descriptor is open and connected to a
-        tty(-like) device, else False. """
-
-        return os.isatty(self.child_fd)
-
-    def waitnoecho (self, timeout=-1):
-
-        """This waits until the terminal ECHO flag is set False. This returns
-        True if the echo mode is off. This returns False if the ECHO flag was
-        not set False before the timeout. This can be used to detect when the
-        child is waiting for a password. Usually a child application will turn
-        off echo mode when it is waiting for the user to enter a password. For
-        example, instead of expecting the "password:" prompt you can wait for
-        the child to set ECHO off::
-
-            p = pexpect.spawn ('ssh user@example.com')
-            p.waitnoecho()
-            p.sendline(mypassword)
-
-        If timeout is None then this method to block forever until ECHO flag is
-        False.
-
-        """
-
-        if timeout == -1:
-            timeout = self.timeout
-        if timeout is not None:
-            end_time = time.time() + timeout 
-        while True:
-            if not self.getecho():
-                return True
-            if timeout < 0 and timeout is not None:
-                return False
-            if timeout is not None:
-                timeout = end_time - time.time()
-            time.sleep(0.1)
-
-    def getecho (self):
-
-        """This returns the terminal echo mode. This returns True if echo is
-        on or False if echo is off. Child applications that are expecting you
-        to enter a password often set ECHO False. See waitnoecho(). """
-
-        attr = termios.tcgetattr(self.child_fd)
-        if attr[3] & termios.ECHO:
-            return True
-        return False
-
-    def setecho (self, state):
-
-        """This sets the terminal echo mode on or off. Note that anything the
-        child sent before the echo will be lost, so you should be sure that
-        your input buffer is empty before you call setecho(). For example, the
-        following will work as expected::
-
-            p = pexpect.spawn('cat')
-            p.sendline ('1234') # We will see this twice (once from tty echo and again from cat).
-            p.expect (['1234'])
-            p.expect (['1234'])
-            p.setecho(False) # Turn off tty echo
-            p.sendline ('abcd') # We will set this only once (echoed by cat).
-            p.sendline ('wxyz') # We will set this only once (echoed by cat)
-            p.expect (['abcd'])
-            p.expect (['wxyz'])
-
-        The following WILL NOT WORK because the lines sent before the setecho
-        will be lost::
-
-            p = pexpect.spawn('cat')
-            p.sendline ('1234') # We will see this twice (once from tty echo and again from cat).
-            p.setecho(False) # Turn off tty echo
-            p.sendline ('abcd') # We will set this only once (echoed by cat).
-            p.sendline ('wxyz') # We will set this only once (echoed by cat)
-            p.expect (['1234'])
-            p.expect (['1234'])
-            p.expect (['abcd'])
-            p.expect (['wxyz'])
-        """
-
-        self.child_fd
-        attr = termios.tcgetattr(self.child_fd)
-        if state:
-            attr[3] = attr[3] | termios.ECHO
-        else:
-            attr[3] = attr[3] & ~termios.ECHO
-        # I tried TCSADRAIN and TCSAFLUSH, but these were inconsistent
-        # and blocked on some platforms. TCSADRAIN is probably ideal if it worked.
-        termios.tcsetattr(self.child_fd, termios.TCSANOW, attr)
-
-    def read_nonblocking (self, size = 1, timeout = -1):
-
-        """This reads at most size characters from the child application. It
-        includes a timeout. If the read does not complete within the timeout
-        period then a TIMEOUT exception is raised. If the end of file is read
-        then an EOF exception will be raised. If a log file was set using
-        setlog() then all data will also be written to the log file.
-
-        If timeout is None then the read may block indefinitely. If timeout is -1
-        then the self.timeout value is used. If timeout is 0 then the child is
-        polled and if there was no data immediately ready then this will raise
-        a TIMEOUT exception.
-
-        The timeout refers only to the amount of time to read at least one
-        character. This is not effected by the 'size' parameter, so if you call
-        read_nonblocking(size=100, timeout=30) and only one character is
-        available right away then one character will be returned immediately.
-        It will not wait for 30 seconds for another 99 characters to come in.
-
-        This is a wrapper around os.read(). It uses select.select() to
-        implement the timeout. """
-
-        if self.closed:
-            raise ValueError ('I/O operation on closed file in read_nonblocking().')
-
-        if timeout == -1:
-            timeout = self.timeout
-
-        # Note that some systems such as Solaris do not give an EOF when
-        # the child dies. In fact, you can still try to read
-        # from the child_fd -- it will block forever or until TIMEOUT.
-        # For this case, I test isalive() before doing any reading.
-        # If isalive() is false, then I pretend that this is the same as EOF.
-        if not self.isalive():
-            r,w,e = self.__select([self.child_fd], [], [], 0) # timeout of 0 means "poll"
-            if not r:
-                self.flag_eof = True
-                raise EOF ('End Of File (EOF) in read_nonblocking(). Braindead platform.')
-        elif self.__irix_hack:
-            # This is a hack for Irix. It seems that Irix requires a long delay before checking isalive.
-            # This adds a 2 second delay, but only when the child is terminated.
-            r, w, e = self.__select([self.child_fd], [], [], 2)
-            if not r and not self.isalive():
-                self.flag_eof = True
-                raise EOF ('End Of File (EOF) in read_nonblocking(). Pokey platform.')
-
-        r,w,e = self.__select([self.child_fd], [], [], timeout)
-
-        if not r:
-            if not self.isalive():
-                # Some platforms, such as Irix, will claim that their processes are alive;
-                # then timeout on the select; and then finally admit that they are not alive.
-                self.flag_eof = True
-                raise EOF ('End of File (EOF) in read_nonblocking(). Very pokey platform.')
-            else:
-                raise TIMEOUT ('Timeout exceeded in read_nonblocking().')
-
-        if self.child_fd in r:
-            try:
-                s = os.read(self.child_fd, size)
-            except OSError, e: # Linux does this
-                self.flag_eof = True
-                raise EOF ('End Of File (EOF) in read_nonblocking(). Exception style platform.')
-            if s == '': # BSD style
-                self.flag_eof = True
-                raise EOF ('End Of File (EOF) in read_nonblocking(). Empty string style platform.')
-
-            if self.logfile is not None:
-                self.logfile.write (s)
-                self.logfile.flush()
-            if self.logfile_read is not None:
-                self.logfile_read.write (s)
-                self.logfile_read.flush()
-
-            return s
-
-        raise ExceptionPexpect ('Reached an unexpected state in read_nonblocking().')
-
-    def read (self, size = -1):   # File-like object.
-
-        """This reads at most "size" bytes from the file (less if the read hits
-        EOF before obtaining size bytes). If the size argument is negative or
-        omitted, read all data until EOF is reached. The bytes are returned as
-        a string object. An empty string is returned when EOF is encountered
-        immediately. """
-
-        if size == 0:
-            return ''
-        if size < 0:
-            self.expect (self.delimiter) # delimiter default is EOF
-            return self.before
-
-        # I could have done this more directly by not using expect(), but
-        # I deliberately decided to couple read() to expect() so that
-        # I would catch any bugs early and ensure consistant behavior.
-        # It's a little less efficient, but there is less for me to
-        # worry about if I have to later modify read() or expect().
-        # Note, it's OK if size==-1 in the regex. That just means it
-        # will never match anything in which case we stop only on EOF.
-        cre = re.compile('.{%d}' % size, re.DOTALL)
-        index = self.expect ([cre, self.delimiter]) # delimiter default is EOF
-        if index == 0:
-            return self.after ### self.before should be ''. Should I assert this?
-        return self.before
-
-    def readline (self, size = -1):    # File-like object.
-
-        """This reads and returns one entire line. A trailing newline is kept
-        in the string, but may be absent when a file ends with an incomplete
-        line. Note: This readline() looks for a \\r\\n pair even on UNIX
-        because this is what the pseudo tty device returns. So contrary to what
-        you may expect you will receive the newline as \\r\\n. An empty string
-        is returned when EOF is hit immediately. Currently, the size argument is
-        mostly ignored, so this behavior is not standard for a file-like
-        object. If size is 0 then an empty string is returned. """
-
-        if size == 0:
-            return ''
-        index = self.expect (['\r\n', self.delimiter]) # delimiter default is EOF
-        if index == 0:
-            return self.before + '\r\n'
-        else:
-            return self.before
-
-    def __iter__ (self):    # File-like object.
-
-        """This is to support iterators over a file-like object.
-        """
-
-        return self
-
-    def next (self):    # File-like object.
-
-        """This is to support iterators over a file-like object.
-        """
-
-        result = self.readline()
-        if result == "":
-            raise StopIteration
-        return result
-
-    def readlines (self, sizehint = -1):    # File-like object.
-
-        """This reads until EOF using readline() and returns a list containing
-        the lines thus read. The optional "sizehint" argument is ignored. """
-
-        lines = []
-        while True:
-            line = self.readline()
-            if not line:
-                break
-            lines.append(line)
-        return lines
-
-    def write(self, s):   # File-like object.
-
-        """This is similar to send() except that there is no return value.
-        """
-
-        self.send (s)
-
-    def writelines (self, sequence):   # File-like object.
-
-        """This calls write() for each element in the sequence. The sequence
-        can be any iterable object producing strings, typically a list of
-        strings. This does not add line separators There is no return value.
-        """
-
-        for s in sequence:
-            self.write (s)
-
-    def send(self, s):
-
-        """This sends a string to the child process. This returns the number of
-        bytes written. If a log file was set then the data is also written to
-        the log. """
-
-        time.sleep(self.delaybeforesend)
-        if self.logfile is not None:
-            self.logfile.write (s)
-            self.logfile.flush()
-        if self.logfile_send is not None:
-            self.logfile_send.write (s)
-            self.logfile_send.flush()
-        c = os.write(self.child_fd, s)
-        return c
-
-    def sendline(self, s=''):
-
-        """This is like send(), but it adds a line feed (os.linesep). This
-        returns the number of bytes written. """
-
-        n = self.send(s)
-        n = n + self.send (os.linesep)
-        return n
-
-    def sendcontrol(self, char):
-
-        """This sends a control character to the child such as Ctrl-C or
-        Ctrl-D. For example, to send a Ctrl-G (ASCII 7)::
-
-            child.sendcontrol('g')
-
-        See also, sendintr() and sendeof().
-        """
-
-        char = char.lower()
-        a = ord(char)
-        if a>=97 and a<=122:
-            a = a - ord('a') + 1
-            return self.send (chr(a))
-        d = {'@':0, '`':0,
-            '[':27, '{':27,
-            '\\':28, '|':28,
-            ']':29, '}': 29,
-            '^':30, '~':30,
-            '_':31,
-            '?':127}
-        if char not in d:
-            return 0
-        return self.send (chr(d[char]))
-
-    def sendeof(self):
-
-        """This sends an EOF to the child. This sends a character which causes
-        the pending parent output buffer to be sent to the waiting child
-        program without waiting for end-of-line. If it is the first character
-        of the line, the read() in the user program returns 0, which signifies
-        end-of-file. This means to work as expected a sendeof() has to be
-        called at the beginning of a line. This method does not send a newline.
-        It is the responsibility of the caller to ensure the eof is sent at the
-        beginning of a line. """
-
-        ### Hmmm... how do I send an EOF?
-        ###C  if ((m = write(pty, *buf, p - *buf)) < 0)
-        ###C      return (errno == EWOULDBLOCK) ? n : -1;
-        #fd = sys.stdin.fileno()
-        #old = termios.tcgetattr(fd) # remember current state
-        #attr = termios.tcgetattr(fd)
-        #attr[3] = attr[3] | termios.ICANON # ICANON must be set to recognize EOF
-        #try: # use try/finally to ensure state gets restored
-        #    termios.tcsetattr(fd, termios.TCSADRAIN, attr)
-        #    if hasattr(termios, 'CEOF'):
-        #        os.write (self.child_fd, '%c' % termios.CEOF)
-        #    else:
-        #        # Silly platform does not define CEOF so assume CTRL-D
-        #        os.write (self.child_fd, '%c' % 4)
-        #finally: # restore state
-        #    termios.tcsetattr(fd, termios.TCSADRAIN, old)
-        if hasattr(termios, 'VEOF'):
-            char = termios.tcgetattr(self.child_fd)[6][termios.VEOF]
-        else:
-            # platform does not define VEOF so assume CTRL-D
-            char = chr(4)
-        self.send(char)
-
-    def sendintr(self):
-
-        """This sends a SIGINT to the child. It does not require
-        the SIGINT to be the first character on a line. """
-
-        if hasattr(termios, 'VINTR'):
-            char = termios.tcgetattr(self.child_fd)[6][termios.VINTR]
-        else:
-            # platform does not define VINTR so assume CTRL-C
-            char = chr(3)
-        self.send (char)
-
-    def eof (self):
-
-        """This returns True if the EOF exception was ever raised.
-        """
-
-        return self.flag_eof
-
-    def terminate(self, force=False):
-
-        """This forces a child process to terminate. It starts nicely with
-        SIGHUP and SIGINT. If "force" is True then moves onto SIGKILL. This
-        returns True if the child was terminated. This returns False if the
-        child could not be terminated. """
-
-        if not self.isalive():
-            return True
-        try:
-            self.kill(signal.SIGHUP)
-            time.sleep(self.delayafterterminate)
-            if not self.isalive():
-                return True
-            self.kill(signal.SIGCONT)
-            time.sleep(self.delayafterterminate)
-            if not self.isalive():
-                return True
-            self.kill(signal.SIGINT)
-            time.sleep(self.delayafterterminate)
-            if not self.isalive():
-                return True
-            if force:
-                self.kill(signal.SIGKILL)
-                time.sleep(self.delayafterterminate)
-                if not self.isalive():
-                    return True
-                else:
-                    return False
-            return False
-        except OSError, e:
-            # I think there are kernel timing issues that sometimes cause
-            # this to happen. I think isalive() reports True, but the
-            # process is dead to the kernel.
-            # Make one last attempt to see if the kernel is up to date.
-            time.sleep(self.delayafterterminate)
-            if not self.isalive():
-                return True
-            else:
-                return False
-
-    def wait(self):
-
-        """This waits until the child exits. This is a blocking call. This will
-        not read any data from the child, so this will block forever if the
-        child has unread output and has terminated. In other words, the child
-        may have printed output then called exit(); but, technically, the child
-        is still alive until its output is read. """
-
-        if self.isalive():
-            pid, status = os.waitpid(self.pid, 0)
-        else:
-            raise ExceptionPexpect ('Cannot wait for dead child process.')
-        self.exitstatus = os.WEXITSTATUS(status)
-        if os.WIFEXITED (status):
-            self.status = status
-            self.exitstatus = os.WEXITSTATUS(status)
-            self.signalstatus = None
-            self.terminated = True
-        elif os.WIFSIGNALED (status):
-            self.status = status
-            self.exitstatus = None
-            self.signalstatus = os.WTERMSIG(status)
-            self.terminated = True
-        elif os.WIFSTOPPED (status):
-            raise ExceptionPexpect ('Wait was called for a child process that is stopped. This is not supported. Is some other process attempting job control with our child pid?')
-        return self.exitstatus
-
-    def isalive(self):
-
-        """This tests if the child process is running or not. This is
-        non-blocking. If the child was terminated then this will read the
-        exitstatus or signalstatus of the child. This returns True if the child
-        process appears to be running or False if not. It can take literally
-        SECONDS for Solaris to return the right status. """
-
-        if self.terminated:
-            return False
-
-        if self.flag_eof:
-            # This is for Linux, which requires the blocking form of waitpid to get
-            # status of a defunct process. This is super-lame. The flag_eof would have
-            # been set in read_nonblocking(), so this should be safe.
-            waitpid_options = 0
-        else:
-            waitpid_options = os.WNOHANG
-
-        try:
-            pid, status = os.waitpid(self.pid, waitpid_options)
-        except OSError, e: # No child processes
-            if e[0] == errno.ECHILD:
-                raise ExceptionPexpect ('isalive() encountered condition where "terminated" is 0, but there was no child process. Did someone else call waitpid() on our process?')
-            else:
-                raise e
-
-        # I have to do this twice for Solaris. I can't even believe that I figured this out...
-        # If waitpid() returns 0 it means that no child process wishes to
-        # report, and the value of status is undefined.
-        if pid == 0:
-            try:
-                pid, status = os.waitpid(self.pid, waitpid_options) ### os.WNOHANG) # Solaris!
-            except OSError, e: # This should never happen...
-                if e[0] == errno.ECHILD:
-                    raise ExceptionPexpect ('isalive() encountered condition that should never happen. There was no child process. Did someone else call waitpid() on our process?')
-                else:
-                    raise e
-
-            # If pid is still 0 after two calls to waitpid() then
-            # the process really is alive. This seems to work on all platforms, except
-            # for Irix which seems to require a blocking call on waitpid or select, so I let read_nonblocking
-            # take care of this situation (unfortunately, this requires waiting through the timeout).
-            if pid == 0:
-                return True
-
-        if pid == 0:
-            return True
-
-        if os.WIFEXITED (status):
-            self.status = status
-            self.exitstatus = os.WEXITSTATUS(status)
-            self.signalstatus = None
-            self.terminated = True
-        elif os.WIFSIGNALED (status):
-            self.status = status
-            self.exitstatus = None
-            self.signalstatus = os.WTERMSIG(status)
-            self.terminated = True
-        elif os.WIFSTOPPED (status):
-            raise ExceptionPexpect ('isalive() encountered condition where child process is stopped. This is not supported. Is some other process attempting job control with our child pid?')
-        return False
-
-    def kill(self, sig):
-
-        """This sends the given signal to the child application. In keeping
-        with UNIX tradition it has a misleading name. It does not necessarily
-        kill the child unless you send the right signal. """
-
-        # Same as os.kill, but the pid is given for you.
-        if self.isalive():
-            os.kill(self.pid, sig)
-
-    def compile_pattern_list(self, patterns):
-
-        """This compiles a pattern-string or a list of pattern-strings.
-        Patterns must be a StringType, EOF, TIMEOUT, SRE_Pattern, or a list of
-        those. Patterns may also be None which results in an empty list (you
-        might do this if waiting for an EOF or TIMEOUT condition without
-        expecting any pattern).
-
-        This is used by expect() when calling expect_list(). Thus expect() is
-        nothing more than::
-
-             cpl = self.compile_pattern_list(pl)
-             return self.expect_list(cpl, timeout)
-
-        If you are using expect() within a loop it may be more
-        efficient to compile the patterns first and then call expect_list().
-        This avoid calls in a loop to compile_pattern_list()::
-
-             cpl = self.compile_pattern_list(my_pattern)
-             while some_condition:
-                ...
-                i = self.expect_list(clp, timeout)
-                ...
-        """
-
-        if patterns is None:
-            return []
-        if type(patterns) is not types.ListType:
-            patterns = [patterns]
-
-        compile_flags = re.DOTALL # Allow dot to match \n
-        if self.ignorecase:
-            compile_flags = compile_flags | re.IGNORECASE
-        compiled_pattern_list = []
-        for p in patterns:
-            if type(p) in types.StringTypes:
-                compiled_pattern_list.append(re.compile(p, compile_flags))
-            elif p is EOF:
-                compiled_pattern_list.append(EOF)
-            elif p is TIMEOUT:
-                compiled_pattern_list.append(TIMEOUT)
-            elif type(p) is type(re.compile('')):
-                compiled_pattern_list.append(p)
-            else:
-                raise TypeError ('Argument must be one of StringTypes, EOF, TIMEOUT, SRE_Pattern, or a list of those type. %s' % str(type(p)))
-
-        return compiled_pattern_list
-
-    def expect(self, pattern, timeout = -1, searchwindowsize=None):
-
-        """This seeks through the stream until a pattern is matched. The
-        pattern is overloaded and may take several types. The pattern can be a
-        StringType, EOF, a compiled re, or a list of any of those types.
-        Strings will be compiled to re types. This returns the index into the
-        pattern list. If the pattern was not a list this returns index 0 on a
-        successful match. This may raise exceptions for EOF or TIMEOUT. To
-        avoid the EOF or TIMEOUT exceptions add EOF or TIMEOUT to the pattern
-        list. That will cause expect to match an EOF or TIMEOUT condition
-        instead of raising an exception.
-
-        If you pass a list of patterns and more than one matches, the first match
-        in the stream is chosen. If more than one pattern matches at that point,
-        the leftmost in the pattern list is chosen. For example::
-
-            # the input is 'foobar'
-            index = p.expect (['bar', 'foo', 'foobar'])
-            # returns 1 ('foo') even though 'foobar' is a "better" match
-
-        Please note, however, that buffering can affect this behavior, since
-        input arrives in unpredictable chunks. For example::
-
-            # the input is 'foobar'
-            index = p.expect (['foobar', 'foo'])
-            # returns 0 ('foobar') if all input is available at once,
-            # but returs 1 ('foo') if parts of the final 'bar' arrive late
-
-        After a match is found the instance attributes 'before', 'after' and
-        'match' will be set. You can see all the data read before the match in
-        'before'. You can see the data that was matched in 'after'. The
-        re.MatchObject used in the re match will be in 'match'. If an error
-        occurred then 'before' will be set to all the data read so far and
-        'after' and 'match' will be None.
-
-        If timeout is -1 then timeout will be set to the self.timeout value.
-
-        A list entry may be EOF or TIMEOUT instead of a string. This will
-        catch these exceptions and return the index of the list entry instead
-        of raising the exception. The attribute 'after' will be set to the
-        exception type. The attribute 'match' will be None. This allows you to
-        write code like this::
-
-                index = p.expect (['good', 'bad', pexpect.EOF, pexpect.TIMEOUT])
-                if index == 0:
-                    do_something()
-                elif index == 1:
-                    do_something_else()
-                elif index == 2:
-                    do_some_other_thing()
-                elif index == 3:
-                    do_something_completely_different()
-
-        instead of code like this::
-
-                try:
-                    index = p.expect (['good', 'bad'])
-                    if index == 0:
-                        do_something()
-                    elif index == 1:
-                        do_something_else()
-                except EOF:
-                    do_some_other_thing()
-                except TIMEOUT:
-                    do_something_completely_different()
-
-        These two forms are equivalent. It all depends on what you want. You
-        can also just expect the EOF if you are waiting for all output of a
-        child to finish. For example::
-
-                p = pexpect.spawn('/bin/ls')
-                p.expect (pexpect.EOF)
-                print p.before
-
-        If you are trying to optimize for speed then see expect_list().
-        """
-
-        compiled_pattern_list = self.compile_pattern_list(pattern)
-        return self.expect_list(compiled_pattern_list, timeout, searchwindowsize)
-
-    def expect_list(self, pattern_list, timeout = -1, searchwindowsize = -1):
-
-        """This takes a list of compiled regular expressions and returns the
-        index into the pattern_list that matched the child output. The list may
-        also contain EOF or TIMEOUT (which are not compiled regular
-        expressions). This method is similar to the expect() method except that
-        expect_list() does not recompile the pattern list on every call. This
-        may help if you are trying to optimize for speed, otherwise just use
-        the expect() method.  This is called by expect(). If timeout==-1 then
-        the self.timeout value is used. If searchwindowsize==-1 then the
-        self.searchwindowsize value is used. """
-
-        return self.expect_loop(searcher_re(pattern_list), timeout, searchwindowsize)
-
-    def expect_exact(self, pattern_list, timeout = -1, searchwindowsize = -1):
-
-        """This is similar to expect(), but uses plain string matching instead
-        of compiled regular expressions in 'pattern_list'. The 'pattern_list'
-        may be a string; a list or other sequence of strings; or TIMEOUT and
-        EOF.
-
-        This call might be faster than expect() for two reasons: string
-        searching is faster than RE matching and it is possible to limit the
-        search to just the end of the input buffer.
-
-        This method is also useful when you don't want to have to worry about
-        escaping regular expression characters that you want to match."""
-
-        if type(pattern_list) in types.StringTypes or pattern_list in (TIMEOUT, EOF):
-            pattern_list = [pattern_list]
-        return self.expect_loop(searcher_string(pattern_list), timeout, searchwindowsize)
-
-    def expect_loop(self, searcher, timeout = -1, searchwindowsize = -1):
-
-        """This is the common loop used inside expect. The 'searcher' should be
-        an instance of searcher_re or searcher_string, which describes how and what
-        to search for in the input.
-
-        See expect() for other arguments, return value and exceptions. """
-
-        self.searcher = searcher
-
-        if timeout == -1:
-            timeout = self.timeout
-        if timeout is not None:
-            end_time = time.time() + timeout 
-        if searchwindowsize == -1:
-            searchwindowsize = self.searchwindowsize
-
-        try:
-            incoming = self.buffer
-            freshlen = len(incoming)
-            while True: # Keep reading until exception or return.
-                index = searcher.search(incoming, freshlen, searchwindowsize)
-                if index >= 0:
-                    self.buffer = incoming[searcher.end : ]
-                    self.before = incoming[ : searcher.start]
-                    self.after = incoming[searcher.start : searcher.end]
-                    self.match = searcher.match
-                    self.match_index = index
-                    return self.match_index
-                # No match at this point
-                if timeout < 0 and timeout is not None:
-                    raise TIMEOUT ('Timeout exceeded in expect_any().')
-                # Still have time left, so read more data
-                c = self.read_nonblocking (self.maxread, timeout)
-                freshlen = len(c)
-                time.sleep (0.0001)
-                incoming = incoming + c
-                if timeout is not None:
-                    timeout = end_time - time.time()
-        except EOF, e:
-            self.buffer = ''
-            self.before = incoming
-            self.after = EOF
-            index = searcher.eof_index
-            if index >= 0:
-                self.match = EOF
-                self.match_index = index
-                return self.match_index
-            else:
-                self.match = None
-                self.match_index = None
-                raise EOF (str(e) + '\n' + str(self))
-        except TIMEOUT, e:
-            self.buffer = incoming
-            self.before = incoming
-            self.after = TIMEOUT
-            index = searcher.timeout_index
-            if index >= 0:
-                self.match = TIMEOUT
-                self.match_index = index
-                return self.match_index
-            else:
-                self.match = None
-                self.match_index = None
-                raise TIMEOUT (str(e) + '\n' + str(self))
-        except:
-            self.before = incoming
-            self.after = None
-            self.match = None
-            self.match_index = None
-            raise
-
-    def getwinsize(self):
-
-        """This returns the terminal window size of the child tty. The return
-        value is a tuple of (rows, cols). """
-
-        TIOCGWINSZ = getattr(termios, 'TIOCGWINSZ', 1074295912L)
-        s = struct.pack('HHHH', 0, 0, 0, 0)
-        x = fcntl.ioctl(self.fileno(), TIOCGWINSZ, s)
-        return struct.unpack('HHHH', x)[0:2]
-
-    def setwinsize(self, r, c):
-
-        """This sets the terminal window size of the child tty. This will cause
-        a SIGWINCH signal to be sent to the child. This does not change the
-        physical window size. It changes the size reported to TTY-aware
-        applications like vi or curses -- applications that respond to the
-        SIGWINCH signal. """
-
-        # Check for buggy platforms. Some Python versions on some platforms
-        # (notably OSF1 Alpha and RedHat 7.1) truncate the value for
-        # termios.TIOCSWINSZ. It is not clear why this happens.
-        # These platforms don't seem to handle the signed int very well;
-        # yet other platforms like OpenBSD have a large negative value for
-        # TIOCSWINSZ and they don't have a truncate problem.
-        # Newer versions of Linux have totally different values for TIOCSWINSZ.
-        # Note that this fix is a hack.
-        TIOCSWINSZ = getattr(termios, 'TIOCSWINSZ', -2146929561)
-        if TIOCSWINSZ == 2148037735L: # L is not required in Python >= 2.2.
-            TIOCSWINSZ = -2146929561 # Same bits, but with sign.
-        # Note, assume ws_xpixel and ws_ypixel are zero.
-        s = struct.pack('HHHH', r, c, 0, 0)
-        fcntl.ioctl(self.fileno(), TIOCSWINSZ, s)
-
-    def interact(self, escape_character = chr(29), input_filter = None, output_filter = None):
-
-        """This gives control of the child process to the interactive user (the
-        human at the keyboard). Keystrokes are sent to the child process, and
-        the stdout and stderr output of the child process is printed. This
-        simply echos the child stdout and child stderr to the real stdout and
-        it echos the real stdin to the child stdin. When the user types the
-        escape_character this method will stop. The default for
-        escape_character is ^]. This should not be confused with ASCII 27 --
-        the ESC character. ASCII 29 was chosen for historical merit because
-        this is the character used by 'telnet' as the escape character. The
-        escape_character will not be sent to the child process.
-
-        You may pass in optional input and output filter functions. These
-        functions should take a string and return a string. The output_filter
-        will be passed all the output from the child process. The input_filter
-        will be passed all the keyboard input from the user. The input_filter
-        is run BEFORE the check for the escape_character.
-
-        Note that if you change the window size of the parent the SIGWINCH
-        signal will not be passed through to the child. If you want the child
-        window size to change when the parent's window size changes then do
-        something like the following example::
-
-            import pexpect, struct, fcntl, termios, signal, sys
-            def sigwinch_passthrough (sig, data):
-                s = struct.pack("HHHH", 0, 0, 0, 0)
-                a = struct.unpack('hhhh', fcntl.ioctl(sys.stdout.fileno(), termios.TIOCGWINSZ , s))
-                global p
-                p.setwinsize(a[0],a[1])
-            p = pexpect.spawn('/bin/bash') # Note this is global and used in sigwinch_passthrough.
-            signal.signal(signal.SIGWINCH, sigwinch_passthrough)
-            p.interact()
-        """
-
-        # Flush the buffer.
-        self.stdout.write (self.buffer)
-        self.stdout.flush()
-        self.buffer = ''
-        mode = tty.tcgetattr(self.STDIN_FILENO)
-        tty.setraw(self.STDIN_FILENO)
-        try:
-            self.__interact_copy(escape_character, input_filter, output_filter)
-        finally:
-            tty.tcsetattr(self.STDIN_FILENO, tty.TCSAFLUSH, mode)
-
-    def __interact_writen(self, fd, data):
-
-        """This is used by the interact() method.
-        """
-
-        while data != '' and self.isalive():
-            n = os.write(fd, data)
-            data = data[n:]
-
-    def __interact_read(self, fd):
-
-        """This is used by the interact() method.
-        """
-
-        return os.read(fd, 1000)
-
-    def __interact_copy(self, escape_character = None, input_filter = None, output_filter = None):
-
-        """This is used by the interact() method.
-        """
-
-        while self.isalive():
-            r,w,e = self.__select([self.child_fd, self.STDIN_FILENO], [], [])
-            if self.child_fd in r:
-                data = self.__interact_read(self.child_fd)
-                if output_filter: data = output_filter(data)
-                if self.logfile is not None:
-                    self.logfile.write (data)
-                    self.logfile.flush()
-                os.write(self.STDOUT_FILENO, data)
-            if self.STDIN_FILENO in r:
-                data = self.__interact_read(self.STDIN_FILENO)
-                if input_filter: data = input_filter(data)
-                i = data.rfind(escape_character)
-                if i != -1:
-                    data = data[:i]
-                    self.__interact_writen(self.child_fd, data)
-                    break
-                self.__interact_writen(self.child_fd, data)
-
-    def __select (self, iwtd, owtd, ewtd, timeout=None):
-
-        """This is a wrapper around select.select() that ignores signals. If
-        select.select raises a select.error exception and errno is an EINTR
-        error then it is ignored. Mainly this is used to ignore sigwinch
-        (terminal resize). """
-
-        # if select() is interrupted by a signal (errno==EINTR) then
-        # we loop back and enter the select() again.
-        if timeout is not None:
-            end_time = time.time() + timeout
-        while True:
-            try:
-                return select.select (iwtd, owtd, ewtd, timeout)
-            except select.error, e:
-                if e[0] == errno.EINTR:
-                    # if we loop back we have to subtract the amount of time we already waited.
-                    if timeout is not None:
-                        timeout = end_time - time.time()
-                        if timeout < 0:
-                            return ([],[],[])
-                else: # something else caused the select.error, so this really is an exception
-                    raise
-
-##############################################################################
-# The following methods are no longer supported or allowed.
-
-    def setmaxread (self, maxread):
-
-        """This method is no longer supported or allowed. I don't like getters
-        and setters without a good reason. """
-
-        raise ExceptionPexpect ('This method is no longer supported or allowed. Just assign a value to the maxread member variable.')
-
-    def setlog (self, fileobject):
-
-        """This method is no longer supported or allowed.
-        """
-
-        raise ExceptionPexpect ('This method is no longer supported or allowed. Just assign a value to the logfile member variable.')
-
-##############################################################################
-# End of spawn class
-##############################################################################
-
-class searcher_string (object):
-
-    """This is a plain string search helper for the spawn.expect_any() method.
-
-    Attributes:
-
-        eof_index     - index of EOF, or -1
-        timeout_index - index of TIMEOUT, or -1
-
-    After a successful match by the search() method the following attributes
-    are available:
-
-        start - index into the buffer, first byte of match
-        end   - index into the buffer, first byte after match
-        match - the matching string itself
-    """
-
-    def __init__(self, strings):
-
-        """This creates an instance of searcher_string. This argument 'strings'
-        may be a list; a sequence of strings; or the EOF or TIMEOUT types. """
-
-        self.eof_index = -1
-        self.timeout_index = -1
-        self._strings = []
-        for n, s in zip(range(len(strings)), strings):
-            if s is EOF:
-                self.eof_index = n
-                continue
-            if s is TIMEOUT:
-                self.timeout_index = n
-                continue
-            self._strings.append((n, s))
-
-    def __str__(self):
-
-        """This returns a human-readable string that represents the state of
-        the object."""
-
-        ss =  [ (ns[0],'    %d: "%s"' % ns) for ns in self._strings ]
-        ss.append((-1,'searcher_string:'))
-        if self.eof_index >= 0:
-            ss.append ((self.eof_index,'    %d: EOF' % self.eof_index))
-        if self.timeout_index >= 0:
-            ss.append ((self.timeout_index,'    %d: TIMEOUT' % self.timeout_index))
-        ss.sort()
-        ss = zip(*ss)[1]
-        return '\n'.join(ss)
-
-    def search(self, buffer, freshlen, searchwindowsize=None):
-
-        """This searches 'buffer' for the first occurence of one of the search
-        strings.  'freshlen' must indicate the number of bytes at the end of
-        'buffer' which have not been searched before. It helps to avoid
-        searching the same, possibly big, buffer over and over again.
-
-        See class spawn for the 'searchwindowsize' argument.
-
-        If there is a match this returns the index of that string, and sets
-        'start', 'end' and 'match'. Otherwise, this returns -1. """
-
-        absurd_match = len(buffer)
-        first_match = absurd_match
-
-        # 'freshlen' helps a lot here. Further optimizations could
-        # possibly include:
-        #
-        # using something like the Boyer-Moore Fast String Searching
-        # Algorithm; pre-compiling the search through a list of
-        # strings into something that can scan the input once to
-        # search for all N strings; realize that if we search for
-        # ['bar', 'baz'] and the input is '...foo' we need not bother
-        # rescanning until we've read three more bytes.
-        #
-        # Sadly, I don't know enough about this interesting topic. /grahn
-        
-        for index, s in self._strings:
-            if searchwindowsize is None:
-                # the match, if any, can only be in the fresh data,
-                # or at the very end of the old data
-                offset = -(freshlen+len(s))
-            else:
-                # better obey searchwindowsize
-                offset = -searchwindowsize
-            n = buffer.find(s, offset)
-            if n >= 0 and n < first_match:
-                first_match = n
-                best_index, best_match = index, s
-        if first_match == absurd_match:
-            return -1
-        self.match = best_match
-        self.start = first_match
-        self.end = self.start + len(self.match)
-        return best_index
-
-class searcher_re (object):
-
-    """This is regular expression string search helper for the
-    spawn.expect_any() method.
-
-    Attributes:
-
-        eof_index     - index of EOF, or -1
-        timeout_index - index of TIMEOUT, or -1
-
-    After a successful match by the search() method the following attributes
-    are available:
-
-        start - index into the buffer, first byte of match
-        end   - index into the buffer, first byte after match
-        match - the re.match object returned by a succesful re.search
-
-    """
-
-    def __init__(self, patterns):
-
-        """This creates an instance that searches for 'patterns' Where
-        'patterns' may be a list or other sequence of compiled regular
-        expressions, or the EOF or TIMEOUT types."""
-
-        self.eof_index = -1
-        self.timeout_index = -1
-        self._searches = []
-        for n, s in zip(range(len(patterns)), patterns):
-            if s is EOF:
-                self.eof_index = n
-                continue
-            if s is TIMEOUT:
-                self.timeout_index = n
-                continue
-            self._searches.append((n, s))
-
-    def __str__(self):
-
-        """This returns a human-readable string that represents the state of
-        the object."""
-
-        ss =  [ (n,'    %d: re.compile("%s")' % (n,str(s.pattern))) for n,s in self._searches]
-        ss.append((-1,'searcher_re:'))
-        if self.eof_index >= 0:
-            ss.append ((self.eof_index,'    %d: EOF' % self.eof_index))
-        if self.timeout_index >= 0:
-            ss.append ((self.timeout_index,'    %d: TIMEOUT' % self.timeout_index))
-        ss.sort()
-        ss = zip(*ss)[1]
-        return '\n'.join(ss)
-
-    def search(self, buffer, freshlen, searchwindowsize=None):
-
-        """This searches 'buffer' for the first occurence of one of the regular
-        expressions. 'freshlen' must indicate the number of bytes at the end of
-        'buffer' which have not been searched before.
-
-        See class spawn for the 'searchwindowsize' argument.
-        
-        If there is a match this returns the index of that string, and sets
-        'start', 'end' and 'match'. Otherwise, returns -1."""
-
-        absurd_match = len(buffer)
-        first_match = absurd_match
-        # 'freshlen' doesn't help here -- we cannot predict the
-        # length of a match, and the re module provides no help.
-        if searchwindowsize is None:
-            searchstart = 0
-        else:
-            searchstart = max(0, len(buffer)-searchwindowsize)
-        for index, s in self._searches:
-            match = s.search(buffer, searchstart)
-            if match is None:
-                continue
-            n = match.start()
-            if n < first_match:
-                first_match = n
-                the_match = match
-                best_index = index
-        if first_match == absurd_match:
-            return -1
-        self.start = first_match
-        self.match = the_match
-        self.end = self.match.end()
-        return best_index
-
-def which (filename):
-
-    """This takes a given filename; tries to find it in the environment path;
-    then checks if it is executable. This returns the full path to the filename
-    if found and executable. Otherwise this returns None."""
-
-    # Special case where filename already contains a path.
-    if os.path.dirname(filename) != '':
-        if os.access (filename, os.X_OK):
-            return filename
-
-    if not os.environ.has_key('PATH') or os.environ['PATH'] == '':
-        p = os.defpath
-    else:
-        p = os.environ['PATH']
-
-    # Oddly enough this was the one line that made Pexpect
-    # incompatible with Python 1.5.2.
-    #pathlist = p.split (os.pathsep)
-    pathlist = string.split (p, os.pathsep)
-
-    for path in pathlist:
-        f = os.path.join(path, filename)
-        if os.access(f, os.X_OK):
-            return f
-    return None
-
-def split_command_line(command_line):
-
-    """This splits a command line into a list of arguments. It splits arguments
-    on spaces, but handles embedded quotes, doublequotes, and escaped
-    characters. It's impossible to do this with a regular expression, so I
-    wrote a little state machine to parse the command line. """
-
-    arg_list = []
-    arg = ''
-
-    # Constants to name the states we can be in.
-    state_basic = 0
-    state_esc = 1
-    state_singlequote = 2
-    state_doublequote = 3
-    state_whitespace = 4 # The state of consuming whitespace between commands.
-    state = state_basic
-
-    for c in command_line:
-        if state == state_basic or state == state_whitespace:
-            if c == '\\': # Escape the next character
-                state = state_esc
-            elif c == r"'": # Handle single quote
-                state = state_singlequote
-            elif c == r'"': # Handle double quote
-                state = state_doublequote
-            elif c.isspace():
-                # Add arg to arg_list if we aren't in the middle of whitespace.
-                if state == state_whitespace:
-                    None # Do nothing.
-                else:
-                    arg_list.append(arg)
-                    arg = ''
-                    state = state_whitespace
-            else:
-                arg = arg + c
-                state = state_basic
-        elif state == state_esc:
-            arg = arg + c
-            state = state_basic
-        elif state == state_singlequote:
-            if c == r"'":
-                state = state_basic
-            else:
-                arg = arg + c
-        elif state == state_doublequote:
-            if c == r'"':
-                state = state_basic
-            else:
-                arg = arg + c
-
-    if arg != '':
-        arg_list.append(arg)
-    return arg_list
-
-# vi:ts=4:sw=4:expandtab:ft=python:

http://git-wip-us.apache.org/repos/asf/incubator-helix/blob/4ebc0fad/helix-agent/src/main/scripts/integration-test/script/pexpect.pyc
----------------------------------------------------------------------
diff --git a/helix-agent/src/main/scripts/integration-test/script/pexpect.pyc b/helix-agent/src/main/scripts/integration-test/script/pexpect.pyc
deleted file mode 100644
index 1e3478f..0000000
Binary files a/helix-agent/src/main/scripts/integration-test/script/pexpect.pyc and /dev/null differ


[3/3] git commit: remove test scripts for helix-agent. disable helix-agent

Posted by zz...@apache.org.
remove test scripts for helix-agent. disable helix-agent


Project: http://git-wip-us.apache.org/repos/asf/incubator-helix/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-helix/commit/4ebc0fad
Tree: http://git-wip-us.apache.org/repos/asf/incubator-helix/tree/4ebc0fad
Diff: http://git-wip-us.apache.org/repos/asf/incubator-helix/diff/4ebc0fad

Branch: refs/heads/master
Commit: 4ebc0fad9b9f3135c64c266d7cdae9b65177ce61
Parents: d5289ae
Author: zzhang <zz...@uci.edu>
Authored: Wed May 1 18:26:36 2013 -0700
Committer: zzhang <zz...@uci.edu>
Committed: Wed May 1 18:26:36 2013 -0700

----------------------------------------------------------------------
 .../integration-test/config/log4j-info.properties  |   23 -
 .../integration-test/config/log4j.properties       |   25 -
 .../config/zookeeper-log4j2file.properties         |   26 -
 .../integration-test/lib/dds_test_infra.tar.gz     |  Bin 69424 -> 0 bytes
 .../main/scripts/integration-test/log4j.properties |   24 -
 .../scripts/integration-test/script/cm_driver.py   |   71 -
 .../scripts/integration-test/script/dds_driver.py  | 1098 ---------
 .../integration-test/script/driver_cmd_dict.py     |  312 ---
 .../scripts/integration-test/script/pexpect.py     | 1864 ---------------
 .../scripts/integration-test/script/pexpect.pyc    |  Bin 68256 -> 0 bytes
 .../scripts/integration-test/script/utility.py     |  813 -------
 .../scripts/integration-test/script/utility.pyc    |  Bin 41258 -> 0 bytes
 .../main/scripts/integration-test/setup_env.inc    |   37 -
 .../scripts/integration-test/testcases/foo_test.py |   38 -
 .../testcases/report_pass_fail.inc                 |   40 -
 .../integration-test/testcases/setup_env.inc       |   60 -
 .../org/apache/helix/agent/TestHelixAgent.java     |    3 +-
 17 files changed, 2 insertions(+), 4432 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-helix/blob/4ebc0fad/helix-agent/src/main/scripts/integration-test/config/log4j-info.properties
----------------------------------------------------------------------
diff --git a/helix-agent/src/main/scripts/integration-test/config/log4j-info.properties b/helix-agent/src/main/scripts/integration-test/config/log4j-info.properties
deleted file mode 100644
index cca0ae9..0000000
--- a/helix-agent/src/main/scripts/integration-test/config/log4j-info.properties
+++ /dev/null
@@ -1,23 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-log4j.rootLogger=INFO, ConsoleAppender
-log4j.appender.ConsoleAppender=org.apache.log4j.ConsoleAppender
-log4j.appender.ConsoleAppender.layout=org.apache.log4j.PatternLayout
-log4j.appender.ConsoleAppender.layout.ConversionPattern=%d{ISO8601} +%r [%t] (%p) {%c{1}:%M} (%F:%L) %m%n

http://git-wip-us.apache.org/repos/asf/incubator-helix/blob/4ebc0fad/helix-agent/src/main/scripts/integration-test/config/log4j.properties
----------------------------------------------------------------------
diff --git a/helix-agent/src/main/scripts/integration-test/config/log4j.properties b/helix-agent/src/main/scripts/integration-test/config/log4j.properties
deleted file mode 100644
index 7e49982..0000000
--- a/helix-agent/src/main/scripts/integration-test/config/log4j.properties
+++ /dev/null
@@ -1,25 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-log4j.rootLogger=CONSOLE, R
-log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender
-log4j.appender.R=com.linkedin.clustermanager.tools.CLMLogFileAppender
-log4j.appender.R.layout=org.apache.log4j.PatternLayout
-log4j.appender.R.layout.ConversionPattern=%5p [%C:%M] (%F:%L) - %m%n
-log4j.appender.R.File=${user.home}/EspressoLogs/log.txt

http://git-wip-us.apache.org/repos/asf/incubator-helix/blob/4ebc0fad/helix-agent/src/main/scripts/integration-test/config/zookeeper-log4j2file.properties
----------------------------------------------------------------------
diff --git a/helix-agent/src/main/scripts/integration-test/config/zookeeper-log4j2file.properties b/helix-agent/src/main/scripts/integration-test/config/zookeeper-log4j2file.properties
deleted file mode 100644
index 2ce940f..0000000
--- a/helix-agent/src/main/scripts/integration-test/config/zookeeper-log4j2file.properties
+++ /dev/null
@@ -1,26 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-log4j.rootLogger=INFO, ConsoleAppender
-
-log4j.appender.ConsoleAppender=org.apache.log4j.ConsoleAppender
-
-log4j.appender.ConsoleAppender.layout=org.apache.log4j.PatternLayout
-log4j.appender.ConsoleAppender.layout.ConversionPattern=%d{yyyy/MM/dd HH:mm:ss.SSS} %t %p [%c] %m%n
-
-#log4j.logger.org.apache=WARN
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-helix/blob/4ebc0fad/helix-agent/src/main/scripts/integration-test/lib/dds_test_infra.tar.gz
----------------------------------------------------------------------
diff --git a/helix-agent/src/main/scripts/integration-test/lib/dds_test_infra.tar.gz b/helix-agent/src/main/scripts/integration-test/lib/dds_test_infra.tar.gz
deleted file mode 100644
index cc621e0..0000000
Binary files a/helix-agent/src/main/scripts/integration-test/lib/dds_test_infra.tar.gz and /dev/null differ

http://git-wip-us.apache.org/repos/asf/incubator-helix/blob/4ebc0fad/helix-agent/src/main/scripts/integration-test/log4j.properties
----------------------------------------------------------------------
diff --git a/helix-agent/src/main/scripts/integration-test/log4j.properties b/helix-agent/src/main/scripts/integration-test/log4j.properties
deleted file mode 100644
index 562afe0..0000000
--- a/helix-agent/src/main/scripts/integration-test/log4j.properties
+++ /dev/null
@@ -1,24 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-log4j.rootLogger=CONSOLE, R
-log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender
-log4j.appender.R=com.linkedin.espresso.cm.tools.CLMLogFileAppender
-log4j.appender.R.layout=org.apache.log4j.PatternLayout
-log4j.appender.R.layout.ConversionPattern=%5p [%C:%M] (%F:%L) - %m%n
-log4j.appender.R.File=${user.home}/EspressoLogs/log.txt
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-helix/blob/4ebc0fad/helix-agent/src/main/scripts/integration-test/script/cm_driver.py
----------------------------------------------------------------------
diff --git a/helix-agent/src/main/scripts/integration-test/script/cm_driver.py b/helix-agent/src/main/scripts/integration-test/script/cm_driver.py
deleted file mode 100755
index 5884976..0000000
--- a/helix-agent/src/main/scripts/integration-test/script/cm_driver.py
+++ /dev/null
@@ -1,71 +0,0 @@
-#!/usr/bin/env python
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-'''
-  Load the dds driver and support file if needed
-
-'''
-__version__ = "$Revision: 0.1 $"
-__date__ = "$Date: 2011/6/27 $"
-
-import os, pdb
-
-#pdb.set_trace()
-
-# Global varaibles
-meta_data_file=".metadata_infra"
-dds_test_infra_tarball="dds_test_infra.tar.gz"
-
-this_file_full_path=os.path.abspath(__file__)
-this_file_dirname=os.path.dirname(this_file_full_path)
-meta_data_file_full_path=os.path.join(this_file_dirname, meta_data_file)
-dds_test_infra_tarball_full_path="%s/../lib/%s" % (this_file_dirname,dds_test_infra_tarball)
-
-need_reload=False
-file_change_time = str(os.path.getmtime(dds_test_infra_tarball_full_path))
-view_root= os.path.abspath("%s/../../../../../" % this_file_dirname)  # script dir is 5 levels lower
-if not os.path.exists(os.path.join(view_root,"integration-test")):
-  view_root= os.path.abspath("%s/../../" % this_file_dirname)  # script dir is 5 levels lower
-if not os.path.exists(os.path.join(view_root,"integration-test")):
-  print "VIEW_ROOT %s is not correct" % view_root
-  assert False, "VIEW_ROOT %s is not correct" % view_root
-
-if not os.path.exists(meta_data_file_full_path): need_reload = True
-else: 
-  last_change_time = open(meta_data_file_full_path).readlines()[0].split("=")[-1]
-  if file_change_time != last_change_time:
-    need_reload = True
-if need_reload:
-  open(meta_data_file_full_path,"w").write("change time of %s=%s" % (meta_data_file_full_path, file_change_time))
-  # specific to the cm 
-  os.system("tar zxf %s > /dev/null" %  dds_test_infra_tarball_full_path)
-  #os.system("tar zxf %s " %  dds_test_infra_tarball_full_path)
-  integ_java_dir=os.path.join(view_root,"src/test")
-  config_dir=os.path.join(this_file_dirname,"../config")
-  os.system("cp -rf integ/java %s" % integ_java_dir)
-  os.system("rm -rf integ")
-  os.system("cp script/* %s" % this_file_dirname)
-  os.system("rm -rf script")
-  os.system("cp config/* %s" % config_dir)
-  os.system("rm -rf config")
-
-os.environ["VIEW_ROOT"]=view_root
-execfile(os.path.join(this_file_dirname,"dds_driver.py"))
-

http://git-wip-us.apache.org/repos/asf/incubator-helix/blob/4ebc0fad/helix-agent/src/main/scripts/integration-test/script/dds_driver.py
----------------------------------------------------------------------
diff --git a/helix-agent/src/main/scripts/integration-test/script/dds_driver.py b/helix-agent/src/main/scripts/integration-test/script/dds_driver.py
deleted file mode 100755
index 886c769..0000000
--- a/helix-agent/src/main/scripts/integration-test/script/dds_driver.py
+++ /dev/null
@@ -1,1098 +0,0 @@
-#!/usr/bin/env python
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-'''
-  Start and stop dbus2 servers, consumers
-  Will handle remote run in the future
-
-  bootstrap_relay  start/stop
-  bootstrap_producer  start/stop
-  bootstrap_server  start/stop
-  bootstrap_consumer  start/stop, stop_scn, stop_after_secs
-  profile_relay
-  profile_consumer
-  
-  zookeeper  start/stop/wait_exist/wait_no_exist/wait_value/cmd
-$SCRIPT_DIR/dbus2_driver.py -c zookeeper -o start --zookeeper_server_ports=${zookeeper_server_ports}  --cmdline_props="tickTime=2000;initLimit=5;syncLimit=2" --zookeeper_cmds=<semicolon separate list of command> --zookeeper_path= zookeeper_value=
-  -. start, parse the port, generate the local file path in var/work/zookeeper_data/1, start, port default from 2181, generate log4j file
-  -. stop, find the process id, id is port - 2181 + 1, will stop all the processes
-  -. wait, query client and get the status 
-  -. execute the cmd
-
-'''
-__version__ = "$Revision: 0.1 $"
-__date__ = "$Date: 2010/11/16 $"
-
-import sys, os, fcntl
-import pdb
-import time, copy, re
-from optparse import OptionParser, OptionGroup
-import logging
-import threading
-import pexpect
-from utility import *
-import distutils.dir_util
-        
-# Global varaibles
-options=None
-server_host="localhost"
-server_port="8080"
-consumer_host="localhost"
-consumer_port=8081
-consumer_http_start_port=8081     # may need to be changed?
-consumer_jmx_service_start_port=10000     # may need to be changed?
-rmi_registry_port="1099"
-log_file_pattern="%s_%s_%s_%s.%s.log"  # testname, component, oper, time, pid
-#stats_cmd_pattern='''jps | grep %%s | awk '{printf "open "$1"\\nbean com.linkedin.databus2:relayId=1408230481,type=OutboundTrafficTotalStats\\nget *"}' | java -jar %s/../lib/jmxterm-1.0-alpha-4-uber.jar -i -n''' % get_this_file_dirname()
-stats_cmd_pattern='''jps -J-Xms5M -J-Xmx5M | grep %%s | awk '{printf "open "$1"\\nbean com.linkedin.databus2:relayId=1408230481,type=OutboundTrafficTotalStats\\nget *"}' | java -jar %s/../lib/jmxterm-1.0-alpha-4-uber.jar -i -n''' % get_this_file_dirname()
-#config_sub_cmd='''dbus2_config_sub.py''' % get_this_file_dirname()
-jmx_cli = None
-
-def zookeeper_opers(oper):
-    if options.zookeeper_reset: zookeeper_opers_stop()
-    zookeeper_setup(oper)
-    globals()["zookeeper_opers_%s" % oper]()
-
-def conf_and_deploy(ant_file):
-    ''' to deploy a service only, substitue the cmd_line ops
-        explored-war build-app-conf change the conf deploy.only
-    '''
-    conf_and_deploy_1(ant_file)
-
-def get_stats(pattern):
-    ''' called to get stats for a process '''
-    pids = [x for x in sys_pipe_call_1("jps | grep %s" % pattern) if x]
-    if not pids: my_error("pid for component '%s' ('%s') is not find" % (options.component, pattern))
-    pid = pids[0].split()[0]
-    get_stats_1(pid, options.jmx_bean, options.jmx_attr)
-
-def wait_event(func, option=None):
-    ''' called to wait for  '''
-    wait_event_1(func(), option)
-
-def producer_wait_event(name, func):
-    ''' called to wait for  '''
-    producer_wait_event_1(name, func())
-
-def shutdown(oper="normal"):
-    pid = send_shutdown(server_host, options.http_port or server_port, oper == "force")
-    dbg_print("shutdown pid = %s" % (pid))
-    ret = wait_for_condition('not process_exist(%s)' % (pid), 120)
-
-def get_wait_timeout():
-    if options.timeout: return options.timeout
-    else: return 10
-
-def pause_resume_consumer(oper):
-    global consumer_port
-    if options.component_id: consumer_port=find_open_port(consumer_host, consumer_http_start_port, options.component_id) 
-    url = "http://%s:%s/pauseConsumer/%s" % (consumer_host, consumer_port, oper)
-    out = send_url(url).split("\n")[1]
-    dbg_print("out = %s" % out)
-    time.sleep(0.1)
-
-def get_bootstrap_db_conn_info():
-    return ("bootstrap", "bootstrap", "bootstrap")
-
-lock_tab_sql_file = tempfile.mkstemp()[1]
-def producer_lock_tab(oper):
-    dbname, user, passwd = get_bootstrap_db_conn_info()
-    if oper == "lock" or oper == "save_file":
-      qry = '''
-drop table if exists lock_stat_tab_1;
-CREATE TABLE lock_stat_tab_1 (session_id int) ENGINE=InnoDB;
-drop procedure if exists my_session_wait;
-delimiter $$
-create procedure my_session_wait()
-begin
-  declare tmp int;
-  LOOP
-   select sleep(3600) into tmp;
-  END LOOP;
-end$$
-delimiter ;
-
-set @cid = connection_id();
-insert into lock_stat_tab_1 values (@cid);
-commit;
-lock table tab_1 read local;
-call my_session_wait(); 
-unlock tables;
-'''
-      if oper == "save_file": open(lock_tab_sql_file, "w").write(qry)
-      else:
-        ret = mysql_exec_sql(qry, dbname, user, passwd)
-        print ret
-    #ret = cmd_call(cmd, options.timeout, "ERROR 2013", get_outf())
-    else:
-      ret = mysql_exec_sql_one_row("select session_id from lock_stat_tab_1", dbname, user, passwd)
-      dbg_print(" ret = %s" % ret)
-      if not ret: my_error("No lock yet")
-      session_id = ret[0]
-      qry = "kill %s" % session_id
-      ret = mysql_exec_sql(qry, dbname, user, passwd)
-
-def producer_purge_log():
-    ''' this one is deprecated. Use the cleaner instead '''
-    dbname, user, passwd = get_bootstrap_db_conn_info()
-    ret = mysql_exec_sql("select id from bootstrap_sources", dbname, user, passwd, None, True)
-    for srcid in [x[0] for x in ret]: # for each source
-      dbg_print("srcid = %s" % srcid)
-      applied_logid = mysql_exec_sql_one_row("select logid from bootstrap_applier_state", dbname, user, passwd)[0]
-      qry = "select logid from bootstrap_loginfo where srcid=%s and logid<%s order by logid limit %s" % (srcid, applied_logid, options.producer_log_purge_limit)
-      ret =  mysql_exec_sql(qry, dbname, user, passwd, None, True)
-      logids_to_purge = [x[0] for x in ret]
-      qry = ""
-      for logid in logids_to_purge: qry += "drop table if exists log_%s_%s;" % (srcid, logid)
-      mysql_exec_sql(qry, dbname, user, passwd)
-      dbg_print("logids_to_purge = %s" % logids_to_purge)
-      mysql_exec_sql("delete from bootstrap_loginfo where srcid=%s and logid in (%s); commit" % (srcid, ",".join(logids_to_purge)), dbname, user, passwd)
-
-# load the command dictionary
-parser = OptionParser(usage="usage: %prog [options]")
-execfile(os.path.join(get_this_file_dirname(),"driver_cmd_dict.py"))
-
-allowed_opers=[]
-for cmd in cmd_dict: allowed_opers.extend(cmd_dict[cmd].keys())
-allowed_opers=[x for x in list(set(allowed_opers)) if x!="default"]
-
-ct=None  # global variale of the cmd thread, use to access subprocess
-def is_starting_component():
-  return options.operation != "default" and "%s_%s" % (options.component, options.operation) in cmd_ret_pattern
-
-# need to check pid to determine if process is dead
-# Thread and objects
-class cmd_thread(threading.Thread):
-    ''' execute one cmd in parallel, check output. there should be a timer. '''
-    def __init__ (self, cmd, ret_pattern=None, outf=None):
-      threading.Thread.__init__(self)
-      self.daemon=True      # make it daemon, does not matter if use sys.exit()
-      self.cmd = cmd
-      self.ret_pattern = ret_pattern
-      self.outf = sys.stdout
-      if outf: self.outf = outf
-      self.thread_wait_end=False
-      self.thread_ret_ok=False
-      self.subp=None
-      self.ok_to_run=True
-    def run(self):
-      self.subp = subprocess_call_1(self.cmd)
-      if not self.subp: 
-         self.thread_wait_end=True
-         return
-      # capture java call here
-      if options.capture_java_call: cmd_call_capture_java_call()     # test only remote
-      # print the pid
-      if is_starting_component():
-        java_pid_str = "## java process pid = %s\n## hostname = %s\n" % (find_java_pid(self.subp.pid), host_name_global)
-        if java_pid_str: open(options.logfile,"a").write(java_pid_str)
-        self.outf.write(java_pid_str)
-      # no block
-      fd = self.subp.stdout.fileno()
-      fl = fcntl.fcntl(fd, fcntl.F_GETFL)
-      fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
-      while (self.ok_to_run):  # for timeout case, must terminate the thread, need non block read 
-        try: line = self.subp.stdout.readline()
-        except IOError, e: 
-          time.sleep(0.1)
-          #dbg_print("IOError %s" % e)
-          continue
-        dbg_print("line = %s" % line)
-        if not line: break
-        self.outf.write("%s" % line)
-        if self.ret_pattern and self.ret_pattern.search(line):
-          self.thread_ret_ok=True
-          break
-      if not self.ret_pattern: self.thread_ret_ok=True   # no pattern ok
-      self.thread_wait_end=True
-      # has pattern but not find, then not ok
-      #while (1):  # read the rest and close the pipe
-      #  try: line = self.subp.stdout.readline()
-      #  except IOError, e:
-      #    break
-      self.subp.stdout.close()
-      # close all the file descriptors
-      #os.close(1)  # stdin
-      #os.close(2)  # stdout
-      #os.close(3)  # stderr
-      dbg_print("end of thread run")
-
-def cmd_call_capture_java_call():
-    ''' this one depends on the ivy path and ps length. may not work for all '''
-    if options.capture_java_call!="auto":
-      short_class_name=options.capture_java_call
-    else:
-      short_class_name=cmd_dict[options.component]["stop"].split("grep ")[-1].split(" ")[0]
-    ret = wait_for_condition('sys_pipe_call("ps -ef | grep java | grep -v grep | grep %s")' % short_class_name, 20)
-    java_ps_call = sys_pipe_call('ps -ef | grep "/java -d64" | grep -v grep | grep -v capture_java_call| grep %s' % short_class_name)
-    #java_ps_call = tmp_str
-    ivy_dir=get_ivy_dir()     # espresso has different ivy
-    dbg_print("ivy_dir = %s, java_ps_call=%s" % (ivy_dir,java_ps_call))
-    view_root=get_view_root()
-    class_path_list = []
-    #pdb.set_trace()
-    for jar_path in java_ps_call.split("-classpath ")[-1].split(" com.linkedin")[0].split(":"):  # classpath
-      if not jar_path: continue
-      if not re.search("(%s|%s)" % (ivy_dir,view_root),jar_path): 
-        class_path_list.append(jar_path)
-        continue
-      if re.search(ivy_dir,jar_path): 
-        sub_dir= ivy_dir
-        sub_str = "IVY_DIR"
-      if re.search(view_root,jar_path): 
-        sub_dir= view_root 
-        sub_str = "VIEW_ROOT"
-      class_path_list.append('\"%s\"' % re.sub(sub_dir,sub_str,jar_path))
-    class_path_list.sort()
-    class_path = "[\n      %s\n]" % "\n      ,".join(class_path_list)
-    class_name = java_ps_call.split(short_class_name)[0].split(" ")[-1] + short_class_name
-#cmd_direct_call={
-    print '''
-  ,"%s":
-   {
-    "class_path":%s
-  ,"class_name":"%s"
-   }
-''' % (options.component, class_path, class_name)
-#}    
-
-    #dbg_print("class_path = %s, class_name = %s" % (class_path, class_name))
-    #sys.exit(0)
-
-def cmd_call(cmd, timeout, ret_pattern=None, outf=None):
-    ''' return False if timed out. timeout is in secs '''
-    #if options.capture_java_call: cmd_call_capture_java_call()     # test only remote
-    if options.operation=="stop" and options.component_id:
-      process_info = get_process_info()
-      key=get_process_info_key(options.component, options.component_id)
-      if key in process_info:
-        kill_cmd="kill -9"
-        if "stop" in cmd_dict[options.component]: 
-          kill_cmd = cmd_dict[options.component]["stop"]
-          m = re.search("^.*(kill.*)\s*$",kill_cmd)
-          if m: kill_cmd = m.group(1)
-        sys_call("%s %s" % (kill_cmd, process_info[key]["pid"]))
-        return RetCode.OK
-    global ct
-    ct = cmd_thread(cmd, ret_pattern, outf)
-    ct.start()
-    sleep_cnt = 0
-    sleep_interval = 0.5
-    ret = RetCode.TIMEOUT
-    while (sleep_cnt * sleep_interval < timeout):
-      if ct.thread_wait_end or (ct.subp and not process_exist(ct.subp.pid)): 
-        print "end"
-        if ct.thread_ret_ok: ret = RetCode.OK  # include find pattern or no pattern given
-        else: ret= RetCode.ERROR
-        if options.save_process_id:
-          id = options.component_id and options.component_id or 0
-          save_process_info(options.component, str(id), None, options.logfile)  # no port of cm
-        #if options.capture_java_call: cmd_call_capture_java_call()
-        break    # done
-      time.sleep(sleep_interval)
-      sleep_cnt += 1
-    while (not ct.thread_wait_end):
-      ct.ok_to_run = False  # terminate the thread in timeout case
-      time.sleep(0.1)
-    return ret
-
-remote_component=None
-remote_cmd_template='''ssh %s "bash -c 'source /export/home/eng/dzhang/bin/jdk6_env; cd %s; %s'"'''
-def run_cmd_remote_setup():
-    print "!!! REMOTE RUN ENABLED !!!"
-    global remote_component
-    component_cnt = 0
-    # find the one in the cfg file, so multiple consumers must be in sequence
-    for section in remote_run_config:
-      if re.search(options.component, section): 
-        remote_component=section
-        component_cnt +=1
-        if not options.component_id or compnent_cnt == options.component_id: break
-    if not remote_component: my_error("No section for component %s, id %s" % (options.component, options.component_id))
-    remote_component_properties = remote_run_config[remote_component]
-    set_remote_view_root(remote_component_properties["view_root"])
-    # create the remote var/work dir, may not be needed as the current view have them
-    #sys_call("ssh %s mkdir -p %s %s" % remote_run_config[remote_component]["host"], get_remote_work_dir(), get_remote_var_dir()
-   
-def run_cmd_remote(cmd):
-    ret = remote_cmd_template % (remote_run_config[remote_component]["host"], get_remote_view_root(),  cmd)
-    return ret
-
-
-run_cmd_added_options=[]
-def run_cmd_add_option(cmd, option_name, value=None, check_exist=False):
-    global direct_java_call_jvm_args
-    dbg_print("option_name = %s, value = %s" % (option_name, value))
-    #option_name = option_name.split(".")[-1]  # get rid of the options., which is for readability only
-    if option_name not in dir(options): my_error("invalid option name %s" % option_name)
-    global run_cmd_added_options
-    run_cmd_added_options.append(option_name)
-    if not getattr(options, option_name): return cmd  # not such option
-    if not value: value = getattr(options,option_name) 
-    dbg_print("after option_name = %s, value = %s" % (option_name, value))
-    #pdb.set_trace()
-    if check_exist:
-      full_path = file_exists(value)
-      if not full_path: my_error("File does not exists! %s" % value)
-      value=full_path
-    is_jvm_option = re.search("jvm_",option_name)
-    if isinstance(value, str) and value[0]!='"' and not (option_name in ["cmdline_args"] or is_jvm_option) and options.enable_direct_java_call:   # do not quote the cmdline args
-      #value = value.replace(' ','\\ ')     # escape the white space
-      value = '"%s"' % value   # quote it 
-    if options.enable_direct_java_call:
-      option_mapping = direct_java_call_option_mapping
-      option_prefix = ""
-      option_assign = ""
-      if is_jvm_option or option_name in direct_java_call_jvm_args:  # must start with jvm
-        #pdb.set_trace()
-        direct_java_call_jvm_args[option_name][1]=value  # overide the default value
-        dbg_print("direct_java_call_jvm_args[%s]=%s" % (option_name,direct_java_call_jvm_args[option_name]))
-        return cmd
-    else:
-      option_mapping = ant_call_option_mapping
-      option_prefix = "-D"
-      option_assign = "="
-    option_mapping_name = option_name # default same as the option name
-    if option_name in option_mapping: option_mapping_name = option_mapping[option_name]
-    option_str = option_prefix + option_mapping_name + option_assign + value
-    dbg_print("option_str = %s" % (option_str))
-    if not option_str: return cmd
-    cmd_split=cmd.split()
-    if options.enable_direct_java_call: # add option to the end
-      cmd += " %s" % option_str     
-    else:
-      cmd_split.insert(len(cmd_split)-1,option_str) # here it handles insert before the last one
-      cmd = " ".join(cmd_split)
-    dbg_print("cmd = %s" % cmd)
-    return cmd
-    
-def run_cmd_add_log_file(cmd):
-    global options
-    if options.logfile: log_file = options.logfile 
-    else: log_file= log_file_pattern % (options.testname, options.component, options.operation, time.strftime('%y%m%d_%H%M%S'), os.getpid())
-    #log_file = os.path.join(remote_run and get_remote_log_dir() or get_log_dir(), log_file)
-    # TODO: maybe we want to put the logs in the remote host
-    log_file = os.path.join(get_log_dir(), log_file)
-    dbg_print("log_file = %s" % log_file)
-    options.logfile = log_file
-    open(log_file,"w").write("TEST_NAME=%s\n" % options.testname) 
-    # logging for all the command
-    cmd += " 2>&1 | tee -a %s" % log_file 
-    return cmd
-
-def run_cmd_get_return_pattern():
-    ret_pattern = None
-    pattern_key = "%s_%s" % (options.component, options.operation)
-    if pattern_key in cmd_ret_pattern: ret_pattern = cmd_ret_pattern[pattern_key]
-    if options.wait_pattern: ret_pattern = re.compile(options.wait_pattern)
-    dbg_print("ret_pattern = %s" % ret_pattern)
-    return ret_pattern
-
-def run_cmd_setup():
-    if re.search("_consumer",options.component): 
-      global consumer_host
-      if remote_run: consumer_host = remote_component_properties["host"]
-      else: consumer_host = "localhost"
-      dbg_print("consumer_host= %s" % consumer_host)
-
-# need to remove from ant_call_option_mapping and run_cmd_add_option to avoid invalid option name 
-def run_cmd_add_config(cmd):
-    if options.operation in ["start","clean_log","default"]: 
-      if options.enable_direct_java_call:
-        pass_down_options=direct_java_call_option_mapping.keys()
-        pass_down_options.extend(direct_java_call_jvm_args.keys())
-        #pass_down_options.extend(direct_java_call_jvm_args_ordered)
-      else:
-        pass_down_options=ant_call_option_mapping.keys()
-      #option_mapping = options.enable_direct_java_call and direct_java_call_option_mapping or ant_call_option_mapping
-      #if options.enable_direct_java_call: pass_down_options.append("jvm_args")
-      if options.config: 
-        if not remote_run: 
-          cmd = run_cmd_add_option(cmd, "config", options.config, check_exist=True)      # check exist will figure out
-        else: 
-          cmd = run_cmd_add_option(cmd, "config", os.path.join(get_remote_view_root(), options.config), check_exist=False)  
-      run_cmd_view_root = remote_run and get_remote_view_root() or get_view_root()
-      #cmd = run_cmd_add_option(cmd, "dump_file", options.dump_file and os.path.join(run_cmd_view_root, options.dump_file) or None)
-      #cmd = run_cmd_add_option(cmd, "value_file", options.value_file and os.path.join(run_cmd_view_root, options.value_file) or None)
-      #cmd = run_cmd_add_option(cmd, "log4j_file", options.log4j_file and os.path.join(run_cmd_view_root, options.log4j_file) or None)
-      #cmd = run_cmd_add_option(cmd, "jvm_direct_memory_size")
-      #cmd = run_cmd_add_option(cmd, "jvm_max_heap_size")
-      #cmd = run_cmd_add_option(cmd, "jvm_gc_log")
-      #cmd = run_cmd_add_option(cmd, "jvm_args")
-      #cmd = run_cmd_add_option(cmd, "db_config_file")
-      #cmd = run_cmd_add_option(cmd, "cmdline_props")
-#      cmd = run_cmd_add_option(cmd, "filter_conf_file")
-
-      if options.checkpoint_dir: 
-         if options.checkpoint_dir == "auto":
-           checkpoint_dir = os.path.join(get_work_dir(), "databus2_checkpoint_%s_%s" % time.strftime('%y%m%d_%H%M%S'), os.getpid())
-         else:
-           checkpoint_dir = options.checkpoint_dir
-         checkpoint_dir = os.path.join(run_cmd_view_root(), checkpoint_dir) 
-         cmd = run_cmd_add_option(cmd, "checkpoint_dir", checkpoint_dir)   
-         # clear up the directory
-         if not options.checkpoint_keep and os.path.exists(checkpoint_dir): distutils.dir_util.remove_tree(checkpoint_dir)
-
-      # options can be changed during remote run
-      if remote_run: 
-        remote_component_properties = remote_run_config[remote_component]
-        if not options.relay_host and "relay_host" in remote_component_properties: options.relay_host = remote_component_properties["relay_host"]
-        if not options.relay_port and "relay_port" in remote_component_properties: options.relay_port = remote_component_properties["relay_port"]
-        if not options.bootstrap_host and "bootstrap_host" in remote_component_properties: options.bootstrap_host = remote_component_properties["bootstrap_host"]
-        if not options.bootstrap_port and "bootstrap_port" in remote_component_properties: options.bootstrap_port = remote_component_properties["bootstrap_port"]
-      #cmd = run_cmd_add_option(cmd, "relay_host")
-      #cmd = run_cmd_add_option(cmd, "relay_port")
-      #cmd = run_cmd_add_option(cmd, "bootstrap_host")
-      #cmd = run_cmd_add_option(cmd, "bootstrap_port")
-      #cmd = run_cmd_add_option(cmd, "consumer_event_pattern")
-      if re.search("_consumer",options.component): 
-        # next available port
-        if options.http_port: http_port = options.http_port
-        else: http_port = next_available_port(consumer_host, consumer_http_start_port)   
-        #cmd = run_cmd_add_option(cmd, "http_port", http_port)
-        #cmd = run_cmd_add_option(cmd, "jmx_service_port", next_available_port(consumer_host, consumer_jmx_service_start_port))   
-      # this will take care of the passdown, no need for run_cmd_add_directly
-      for option in [x for x in pass_down_options if x not in run_cmd_added_options]:
-        cmd = run_cmd_add_option(cmd, option)
-
-
-    if options.component=="espresso-relay": cmd+= " -d " # temp hack. TODO: remove
-        
-    if options.enable_direct_java_call: 
-      #cmd = re.sub("java -classpath","java -d64 -ea %s -classpath" % " ".join([x[0]+x[1] for x in [direct_java_call_jvm_args[y] for y in direct_java_call_jvm_args_ordered] if x[1]]) ,cmd) # d64 here
-      cmd = re.sub("java -classpath","java -d64 -ea %s -classpath" % " ".join([x[0]+x[1] for x in direct_java_call_jvm_args.values() if x[1]]) ,cmd) # d64 here
-    dbg_print("cmd = %s" % cmd)
-    return cmd
-
-def run_cmd_add_ant_debug(cmd): 
-    if re.search("^ant", cmd): cmd = re.sub("^ant","ant -d", cmd)
-    dbg_print("cmd = %s" % cmd)
-    return cmd
-
-def run_cmd_save_cmd(cmd):
-    if not options.logfile: return
-    re_suffix = re.compile("\.\w+$")
-    if re_suffix.search(options.logfile): command_file = re_suffix.sub(".sh", options.logfile)  
-    else: command_file = "%s.sh" % options.logfile 
-    dbg_print("command_file = %s" % command_file)
-    open(command_file,"w").write("%s\n" % cmd)
-
-def run_cmd_restart(cmd):
-    ''' restart using a previous .sh file ''' 
-    if not options.logfile: return cmd
-    previous_run_sh_pattern = "%s_*.sh" % "_".join(options.logfile.split("_")[:-3])
-    import glob
-    previous_run_sh = glob.glob(previous_run_sh_pattern)
-    my_warning("No previous run files. Cannot restart. Start with new options.")
-    if not previous_run_sh: return cmd
-    previous_run_sh.sort()
-    run_sh = previous_run_sh[-1]
-    print "Use previous run file %s" % run_sh
-    lines = open(run_sh).readlines()
-    cmd = lines[0].split("2>&1")[0]
-    return cmd
-
-def run_cmd_direct_java_call(cmd, component): 
-    ''' this needs to be consistent with adding option 
-        currently ant -f ; will mess up if there are options
-    ''' 
-
-    if not component in cmd_direct_call:
-      options.enable_direct_java_call = False   # disable direct java call if classpath not given
-      return cmd
-    #if re.search("^ant", cmd): # only component in has class path given will be 
-    #if True: # every thing
-    if re.search("ant ", cmd): # only component in has class path given will be 
-      ivy_dir = get_ivy_dir()
-      view_root = get_view_root()
-      class_path_list=[]
-      for class_path in cmd_direct_call[component]["class_path"]:
-        if re.search("IVY_DIR",class_path): 
-          class_path_list.append(re.sub("IVY_DIR", ivy_dir,class_path))
-          continue
-        if re.search("VIEW_ROOT",class_path): 
-          class_path_list.append(re.sub("VIEW_ROOT", view_root,class_path))
-          if not os.path.exists(class_path_list[-1]): # some jars not in VIEW_ROOT, trigger before command
-            if "before_cmd" in cmd_direct_call[component]: 
-              before_cmd = "%s; " % cmd_direct_call[component]["before_cmd"]
-              sys_call(before_cmd)
-          continue
-        class_path_list.append(class_path)
-      if options.check_class_path: 
-        for jar_file in class_path_list: 
-          if not os.path.exists(jar_file): 
-            print "==WARNING NOT EXISTS: " + jar_file
-            new_jar_path = sys_pipe_call("find %s -name %s" % (ivy_dir, os.path.basename(jar_file))).split("\n")[0]
-            if new_jar_path: 
-              print "==found " + new_jar_path
-            class_path_list[class_path_list.index(jar_file)] = new_jar_path
-      direct_call_cmd = "java -classpath %s %s" % (":".join(class_path_list), cmd_direct_call[component]["class_name"])
-      if re.search("ant .*;",cmd): cmd = re.sub("ant .*;","%s" % direct_call_cmd, cmd)
-      else: cmd = re.sub("ant .*$",direct_call_cmd, cmd)
-    dbg_print("cmd = %s" % cmd)
-    return cmd
-
-def run_cmd():
-    if (options.component=="bootstrap_dbreset"): setup_rmi("stop")
-    if (not options.operation): options.operation="default"
-    if (not options.testname): 
-      options.testname = "TEST_NAME" in os.environ and os.environ["TEST_NAME"] or "default"
-    if (options.operation not in cmd_dict[options.component]): 
-      my_error("%s is not one of the command for %s. Valid values are %s " % (options.operation, options.component, cmd_dict[options.component].keys()))
-    # handle the different connetion string for hudson
-    if (options.component=="db_relay" and options.db_config_file): 
-       options.db_config_file = db_config_change(options.db_config_file)
-    if (options.component=="test_bootstrap_producer" and options.operation=="lock_tab"): 
-      producer_lock_tab("save_file")
-    cmd = cmd_dict[options.component][options.operation]  
-    # cmd can be a funciton call
-    if isinstance(cmd, list): 
-      if not callable(cmd[0]): my_error("First element should be function")
-      cmd[0](*tuple(cmd[1:]))        # call the function
-      return
-    if options.enable_direct_java_call: cmd = run_cmd_direct_java_call(cmd, options.component)
-    if remote_run: run_cmd_remote_setup()
-    if options.ant_debug: cmd = run_cmd_add_ant_debug(cmd) # need ant debug call or not
-    cmd = run_cmd_add_config(cmd) # handle config file
-    if remote_run: cmd = run_cmd_remote(cmd) 
-    ret_pattern = run_cmd_get_return_pattern()
-    if options.restart: cmd = run_cmd_restart(cmd)
-    cmd = run_cmd_add_log_file(cmd)
-    if is_starting_component(): run_cmd_save_cmd(cmd)
-    ret = cmd_call(cmd, options.timeout, ret_pattern, get_outf())
-    if options.operation == "stop": time.sleep(0.1)
-    return ret
-
-def setup_rmi_cond(oper):
-    rmi_up = isOpen(server_host, rmi_registry_port)
-    dbg_print("rmi_up = %s" % rmi_up)
-    if oper=="start": return rmi_up
-    if oper=="stop": return not rmi_up
-
-def setup_rmi(oper="start"):
-    ''' start rmi registry if not alreay started '''
-    ret = RetCode.OK
-    dbg_print("oper = %s" % oper)
-    rmi_up = isOpen(server_host, rmi_registry_port)
-    rmi_str = "ant -f sitetools/rmiscripts/build.xml; ./rmiservers/bin/rmiregistry%s" % oper
-    if oper=="stop": sys_call(kill_cmd_template % "RegistryImpl")  # make sure it stops
-    if (oper=="start" and not rmi_up) or (oper=="stop" and rmi_up):
-      sys_call(rmi_str)
-      # wait for rmi
-      ret = wait_for_condition('setup_rmi_cond("%s")' % oper)
-
-def setup_env():
-    #setup_rmi()
-    pass
-
-def get_outf():
-    outf = sys.stdout
-    if options.output: outf = open(options.output,"w")
-    return outf
-
-def start_jmx_cli():
-    global jmx_cli
-    if not jmx_cli:
-      jmx_cli = pexpect.spawn("java -jar %s/../lib/jmxterm-1.0-alpha-4-uber.jar" % get_this_file_dirname())
-      jmx_cli.expect("\$>")
-
-def stop_jmx_cli():
-    global jmx_cli
-    if jmx_cli:
-      jmx_cli.sendline("quit")
-      jmx_cli.expect(pexpect.EOF)
-      jmx_cli = None
-
-def jmx_cli_cmd(cmd):
-    if not jmx_cli: start_jmx_cli()
-    dbg_print("jmx cmd = %s" % cmd)
-    jmx_cli.sendline(cmd)
-    jmx_cli.expect("\$>")
-    ret = jmx_cli.before.split("\r\n")[1:]
-    dbg_print("jmx cmd ret = %s" % ret)
-    return ret
-
-def get_stats_1(pid, jmx_bean, jmx_attr):
-    outf = get_outf()
-    start_jmx_cli()
-    jmx_cli_cmd("open %s" % pid)
-    ret = jmx_cli_cmd("beans")
-    if jmx_bean=="list": 
-      stat_re = re.compile("^com.linkedin.databus2:")
-      stats = [x for x in ret if stat_re.search(x)]
-      outf.write("%s\n" % "\n".join(stats))
-      return
-    stat_re = re.compile("^com.linkedin.databus2:.*%s$" % jmx_bean)
-    stats = [x for x in ret if stat_re.search(x)]
-    if not stats: # stats not find
-      stat_re = re.compile("^com.linkedin.databus2:")
-      stats = [x.split("=")[-1].rstrip() for x in ret if stat_re.search(x)]
-      my_error("Possible beans are %s" % stats)
-    full_jmx_bean = stats[0] 
-    jmx_cli_cmd("bean %s" % full_jmx_bean)
-    if jmx_attr == "all": jmx_attr = "*"
-    ret = jmx_cli_cmd("get %s" % jmx_attr)
-    outf.write("%s\n" % "\n".join(ret))
-    stop_jmx_cli()
-
-def run_testcase(testcase):
-    dbg_print("testcase = %s" % testcase)
-    os.chdir(get_testcase_dir()) 
-    if not re.search("\.test$", testcase): testcase += ".test"
-    if not os.path.exists(testcase): 
-      my_error("Test case %s does not exist" % testcase)
-    dbg_print("testcase = %s" % testcase)
-    ret = sys_call("/bin/bash %s" % testcase)
-    os.chdir(view_root)
-    return ret
-
-def get_ebuf_inbound_total_maxStreamWinScn(host, port, option=None):
-    url_template = "http://%s:%s/containerStats/inbound/events/total"    
-    if option == "bootstrap":
-       url_template = "http://%s:%s/clientStats/bootstrap/events/total"
-    return http_get_field(url_template, host, port, "maxSeenWinScn")
-
-def consumer_reach_maxStreamWinScn(maxWinScn, host, port, option=None):
-    consumerMaxWinScn = get_ebuf_inbound_total_maxStreamWinScn(host, port, option)
-    dbg_print("consumerMaxWinScn = %s, maxWinScn = %s" % (consumerMaxWinScn, maxWinScn))
-    return consumerMaxWinScn >= maxWinScn
-
-def producer_reach_maxStreamWinScn(name, maxWinScn):
-    ''' select max of all the sources '''
-    dbname, user, passwd = get_bootstrap_db_conn_info()
-    tab_name = (name == "producer") and "bootstrap_producer_state" or "bootstrap_applier_state"
-    qry = "select max(windowscn) from %s " % tab_name
-    ret = mysql_exec_sql_one_row(qry, dbname, user, passwd)
-    producerMaxWinScn = ret and ret[0] or 0   # 0 if no rows
-    dbg_print("producerMaxWinScn = %s, maxWinScn = %s" % (producerMaxWinScn, maxWinScn))
-    return producerMaxWinScn >= maxWinScn
-
-def wait_for_condition(cond, timeout=60, sleep_interval = 0.1):
-    ''' wait for a certain cond. cond could be a function. 
-       This cannot be in utility. Because it needs to see the cond function '''
-    dbg_print("cond = %s" % cond)
-    sleep_cnt = 0
-    ret = RetCode.TIMEOUT
-    while (sleep_cnt * sleep_interval < timeout):
-      if eval(cond): 
-        ret = RetCode.OK
-        break
-      time.sleep(sleep_interval)
-      sleep_cnt += 1
-    return ret
-
-def producer_wait_event_1(name, timeout):
-    ''' options.relay_host should be set for remote_run '''
-    relay_host = options.relay_host and options.relay_host or server_host
-    relay_port = options.relay_port and options.relay_port or server_port
-    if options.sleep_before_wait: time.sleep(options.sleep_before_wait)
-    maxWinScn = get_ebuf_inbound_total_maxStreamWinScn(relay_host, relay_port)
-    dbg_print("maxWinScn = %s, timeout = %s" % (maxWinScn, timeout))
-    ret = wait_for_condition('producer_reach_maxStreamWinScn("%s", %s)' % (name,maxWinScn), timeout)
-    if ret == RetCode.TIMEOUT: print "Timed out waiting consumer to reach maxWinScn %s" % maxWinScn
-    return ret
-
-def send_shutdown(host, port, force=False):
-    ''' use kill which is much faster '''
-    #url_template = "http://%s:%s/operation/shutdown" 
-    url_template = "http://%s:%s/operation/getpid" 
-    pid = http_get_field(url_template, host, port, "pid")
-    force_str = force and "-9" or ""
-    sys_call("kill %s %s" % (force_str,pid))
-    return pid
-
-def wait_event_1(timeout, option=None):
-    relay_host = options.relay_host and options.relay_host or server_host
-    relay_port = options.relay_port and options.relay_port or server_port
-    maxWinScn = get_ebuf_inbound_total_maxStreamWinScn(relay_host, relay_port)
-    print "Wait maxWinScn:%s" % maxWinScn
-    dbg_print("maxWinScn = %s, timeout = %s" % (maxWinScn, timeout))
-    # consumer host is defined already
-    global consumer_port
-    if options.component_id: consumer_port=find_open_port(consumer_host, consumer_http_start_port, options.component_id) 
-    if options.http_port: consumer_port = options.http_port
-    ret = wait_for_condition('consumer_reach_maxStreamWinScn(%s, "%s", %s, "%s")' % (maxWinScn, consumer_host, consumer_port, option and option or ""), timeout)
-    if ret == RetCode.TIMEOUT: print "Timed out waiting consumer to reach maxWinScn %s" % maxWinScn
-    if options.sleep_after_wait: time.sleep(options.sleep_after_wait)
-    return ret
-
-def conf_and_deploy_1_find_dir_name(ant_target, screen_out):
-    found_target = False
-    copy_file_re = re.compile("\[copy\] Copying 1 file to (.*)")
-    for line in screen_out:
-      if not found_target and line == ant_target: found_target = True
-      if found_target:
-         dbg_print("line = %s" % line)
-         m = copy_file_re.search(line) 
-         if m: return m.group(1)
-    return None
-
-def conf_and_deploy_1_find_extservice(dir_name):
-    extservice_re = re.compile("extservices.*\.springconfig")
-    flist = os.listdir(dir_name)
-    flist.sort(reverse=True)
-    for fname in flist:
-      if extservice_re.search(fname): return os.path.join(dir_name, fname)
-    return None
-
-def conf_and_deploy_1_find_extservice_name(ant_target, screen_out):
-    found_target = False
-    copy_file_re = re.compile("\[copy\] Copying (\S*) to ")
-    for line in screen_out:
-      if not found_target and line == ant_target: found_target = True
-      if found_target:
-         dbg_print("line = %s" % line)
-         m = copy_file_re.search(line) 
-         if m: return m.group(1)
-    return None
-
-
-from xml.dom.minidom import parse
-from xml.dom.minidom import Element
-def conf_and_deploy_1_add_conf(file_name):
-    dom1 = parse(file_name)
-    map_element=[x for x in dom1.getElementsByTagName("map")][0]
-    for prop in options.extservice_props: 
-      #props = prop.split(";")
-      props = prop.split("=")
-      len_props = len(props)
-      if len_props not in (2,3): 
-        print "WARNING: prop %s is not a valid setting. IGNORED" % prop
-        continue
-      is_top_level= (len_props == 2)
-      find_keys=[x for x in dom1.getElementsByTagName("entry") if x.attributes["key"].value == props[0]]
-      dbg_print("find_keys = %s" % find_keys)
-      if not find_keys: 
-        print "WARNING: prop %s part %s is not in file %s. " % (prop, props[0], file_name)
-        if is_top_level:  # only add when is top level
-          print "WARNING: prop %s part %s is added to file %s. " % (prop, props[0], file_name)
-          new_entry=Element("entry")
-          new_entry.setAttribute("key", props[0])
-          new_entry.setAttribute("value", props[1])
-          map_element.appendChild(new_entry)
-        continue
-      keyNode = find_keys[0] 
-      if is_top_level: 
-        keyNode.attributes["value"].value=props[-1]
-        continue
-      find_props= [x for x in keyNode.getElementsByTagName("prop") if x.attributes["key"].value == props[1]]
-      dbg_print("find_props = %s" % find_props)
-      if not find_props: 
-        print "WARNING: prop %s part %s is not in file %s. IGNORED" % (prop, props[1], file_name)
-        continue
-      find_props[0].childNodes[0].nodeValue=props[-1]
-    open(file_name,"w").write(dom1.toxml())
-
-def conf_and_deploy_1(ant_file):
-    ''' to deploy a service only, do exploded-war first,
-        then build-app-conf substitute the extservice_props into the extservice file
-        the deploy.only.noconf to deploy the service using the new conf
-    '''
-    #pdb.set_trace()
-    #out = sys_pipe_call("ant -f %s build-app-conf" % (ant_file))
-    #dir_name = conf_and_deploy_1_find_dir_name("build-app-conf:", out.split("\n"))
-    tmp_file = tempfile.mkstemp()[1]
-    cmd = "ant -f %s exploded-war 2>&1 | tee %s" % (ant_file, tmp_file)
-    ret = cmd_call(cmd, 60, re.compile("BUILD SUCCESSFUL"))
-    cmd = "ant -f %s build-app-conf 2>&1 | tee %s" % (ant_file, tmp_file)
-    ret = cmd_call(cmd, 5, re.compile("BUILD SUCCESSFUL"))
-    dir_name = conf_and_deploy_1_find_dir_name("build-app-conf:", [x.rstrip() for x in open(tmp_file).readlines()])
-    dbg_print("dir_name = %s" % dir_name)
-    if dir_name: extservice_file_name = conf_and_deploy_1_find_extservice(dir_name)
-    if not dir_name or not extservice_file_name: my_error("No extservice file in dir %s" % dir_name)
-    #out = sys_pipe_call("ant -f %s -d build-app-conf" % (ant_file))
-    #extservice_file_name = conf_and_deploy_1_find_extservice_name("build-app-conf:", out.split("\n"))
-    dbg_print("extservice_file_name = %s" % extservice_file_name)
-    if options.extservice_props: 
-      tmp_files = [extservice_file_name]
-      tmp_files = save_copy([extservice_file_name])
-      dbg_print("new_files = %s" % tmp_files)
-      conf_and_deploy_1_add_conf(extservice_file_name)
-      #shutil.copy(tmp_files[0], extservice_file_name)
-    # do the deploy
-    #pdb.set_trace()
-    cmd = "ant -f %s deploy.only.noconf 2>&1 | tee %s" % (ant_file, tmp_file)
-    ret = cmd_call(cmd, 60, re.compile("BUILD SUCCESSFUL"))
-
-zookeeper_cmd=None
-zookeeper_server_ports=None
-zookeeper_server_dir=None
-zookeeper_server_ids=None
-
-#possible_ivy_dir=[os.path.join(os.environ["HOME"],".ivy2/lin-cache/ivy-cache"),os.path.join(os.environ["HOME"],".ivy2/lin-cache"),"/ivy/.ivy2/ivy-cache","/ivy/.ivy2"]
-#possible_ivy_dir=[os.path.join(os.environ["HOME"],".m2/repository"), os.path.join(os.environ["HOME"],".ivy2/lin-cache/"),"/ivy/.ivy2"]
-def get_ivy_dir():
-    for ivy_dir in possible_ivy_dir:
-      if os.path.exists(ivy_dir): break
-    if not os.path.exists(ivy_dir): raise
-    return ivy_dir
- 
-def zookeeper_setup(oper):
-    ''' may need to do a find later. find $HOME/.ivy2/lin-cache -name zookeeper-3.3.0.jar '''
-    global zookeeper_cmd, zookeeper_server_ports, zookeeper_server_dir, zookeeper_server_ids, zookeeper_classpath
-    #possible_ivy_home_dir=[os.path.join(os.environ["HOME"],".ivy2/lin-cache/"),"/ivy/.ivy2"]
-    possible_ivy_home_dir=[os.path.join(os.environ["HOME"],".m2/repository/"), os.path.join(os.environ["HOME"],".ivy2/lin-cache/"),"/ivy/.ivy2"]
-    ivy_dir = get_ivy_dir()
-    zookeeper_class= (oper=="start") and  "org.apache.zookeeper.server.quorum.QuorumPeerMain" or "org.apache.zookeeper.ZooKeeperMain"
-    log4j_file=os.path.join(get_view_root(),"integration-test/config/zookeeper-log4j2file.properties")
-    dbg_print("zookeeper_classpath = %s" % zookeeper_classpath)
-    if not "zookeeper_classpath" in globals(): 
-      zookeeper_classpath="IVY_DIR/org/apache/zookeeper/zookeeper/3.3.0/zookeeper-3.3.0.jar:IVY_DIR/log4j/log4j/1.2.15/log4j-1.2.15.jar"
-    if re.search("IVY_DIR",zookeeper_classpath): zookeeper_classpath=re.sub("IVY_DIR", ivy_dir,zookeeper_classpath)
-    if re.search("VIEW_ROOT",zookeeper_classpath): zookeeper_classpath=re.sub("VIEW_ROOT", view_root,zookeeper_classpath)
-    run_cmd_add_option("", "config", options.config, check_exist=True)      #  just add the jvm args
-    zookeeper_cmd="java -d64 -Xmx512m -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.local.only=false -Dcom.sun.management.jmxremote.port=%%s -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dlog4j.configuration=file://%s %s -cp %s %s" % (log4j_file, " ".join([x[0]+x[1] for x in direct_java_call_jvm_args.values() if x[1]]), zookeeper_classpath, zookeeper_class)
-    dbg_print("zookeeper_cmd=%s" % (zookeeper_cmd))
-    zookeeper_server_ports= options.zookeeper_server_ports and options.zookeeper_server_ports or "localhost:2181"
-    zookeeper_server_dir=os.path.join(get_work_dir(),"zookeeper_data")
-    dbg_print("zookeeper_server_dir=%s" % (zookeeper_server_dir))
-    #zookeeper_server_ids= options.zookeeper_server_ids and [int(x) for x in options.zookeeper_server_ids.split(",")] or range(1,len(zookeeper_server_ports.split(","))+1)
-    zookeeper_server_ids= options.zookeeper_server_ids and [int(x) for x in options.zookeeper_server_ids.split(",")] or range(len(zookeeper_server_ports.split(",")))
-    dbg_print("zookeeper_server_ids=%s" % (zookeeper_server_ids))
-
-def zookeeper_opers_start_create_conf(zookeeper_server_ports_split):
-    zookeeper_num_servers = len(zookeeper_server_ports_split)
-    zookeeper_server_conf_files=[]
-    zookeeper_internal_port_1_start = 2800
-    zookeeper_internal_port_2_start = 3800
-    # overide the default config
-    server_conf={"tickTime":2000,"initLimit":5,"syncLimit":2,"maxClientCnxns":0}
-    if options.cmdline_props:
-      for pair in options.cmdline_props.split(";"):
-        (k, v) = pair.split("=")
-        if k in server_conf: server_conf[k] = v
-    # get the server
-    zookeeper_internal_conf=""
-    for k in server_conf: zookeeper_internal_conf+="%s=%s\n" % (k, server_conf[k])
-    dbg_print("zookeeper_internal_conf = %s" % zookeeper_internal_conf)
-    #for server_id in range(1,zookeeper_num_servers+1):
-    for server_id in range(zookeeper_num_servers):
-      zookeeper_host = zookeeper_server_ports_split[server_id].split(":")[0]
-      zookeeper_internal_port_1 = zookeeper_internal_port_1_start + server_id  
-      zookeeper_internal_port_2 = zookeeper_internal_port_2_start +  server_id 
-      if zookeeper_num_servers>1:
-        zookeeper_internal_conf += "server.%s=%s:%s:%s\n" % (server_id, zookeeper_host, zookeeper_internal_port_1, zookeeper_internal_port_2) 
-    dbg_print("zookeeper_internal_conf = %s" % zookeeper_internal_conf)
-
-    #for server_id in range(1,zookeeper_num_servers+1):
-    for server_id in range(zookeeper_num_servers):
-      if server_id not in zookeeper_server_ids: continue
-      conf_file = os.path.join(zookeeper_server_dir,"conf_%s" % server_id)
-      dataDir=os.path.join(zookeeper_server_dir,str(server_id))
-      zookeeper_port = zookeeper_server_ports_split[server_id].split(":")[1]
-      conf_file_p = open(conf_file, "w")
-      conf_file_p.write("clientPort=%s\n" % zookeeper_port)
-      conf_file_p.write("dataDir=%s\n" % dataDir)
-      conf_file_p.write("%s\n" % zookeeper_internal_conf)
-      conf_file_p.close()
-      dbg_print("==conf file %s: \n %s" % (conf_file, open(conf_file).readlines()))
-      zookeeper_server_conf_files.append(conf_file)
-    return zookeeper_server_conf_files
-
-def zookeeper_opers_start_create_dirs(zookeeper_server_ports_split):
-    #for server_id in range(1,len(zookeeper_server_ports_split)+1):
-    for server_id in range(len(zookeeper_server_ports_split)):
-      if server_id not in zookeeper_server_ids: continue
-      current_server_dir=os.path.join(zookeeper_server_dir,str(server_id))
-      dbg_print("current_server_dir = %s" % current_server_dir)
-      if os.path.exists(current_server_dir): 
-        if not options.zookeeper_reset: continue
-        distutils.dir_util.remove_tree(current_server_dir)
-      try: distutils.dir_util.mkpath(current_server_dir)
-      except Exception as e: print ("ERROR: Exception = %s" % e)
-      my_id_file=os.path.join(current_server_dir, "myid")
-      dbg_print("my_id_file = %s" % my_id_file)
-      open(my_id_file,"w").write("%s\n" % server_id)
-    
-def zookeeper_opers_start():
-    zookeeper_server_ports_split = zookeeper_server_ports.split(",")
-    zookeeper_opers_start_create_dirs(zookeeper_server_ports_split)
-    conf_files = zookeeper_opers_start_create_conf(zookeeper_server_ports_split)
-    cnt = 0
-    for conf_file in conf_files:
-      # no log file for now
-      #cmd = run_cmd_add_log_file(cmd)
-      search_str=len(conf_files)>1 and "My election bind port" or "binding to port"
-      cmd = "%s %s" % (zookeeper_cmd % (int(options.zookeeper_jmx_start_port) + cnt), conf_file)
-      cmd = run_cmd_add_log_file(cmd)
-      ret = cmd_call(cmd, 60, re.compile(search_str))
-      cnt +=1
-    
-def zookeeper_opers_stop():
-    # may be better to use pid, but somehow it is not in the datadir
-    sys_call(kill_cmd_template % "QuorumPeerMain")
-
-def zookeeper_opers_wait_for_exist():
-    pass
-def zookeeper_opers_wait_for_nonexist():
-    pass
-def zookeeper_opers_wait_for_value():
-    pass
-def zookeeper_opers_cmd():
-    if not options.zookeeper_cmds: 
-      print "No zookeeper_cmds given"
-      return
-    splitted_cmds = ";".join(["echo %s" % x for x in options.zookeeper_cmds.split(";")])
-    sys_call("(%s) | %s -server %s" % (splitted_cmds, zookeeper_cmd, zookeeper_server_ports))
- 
-def main(argv):
-    # default 
-    global options
-    parser.add_option("-n", "--testname", action="store", dest="testname", default=None, help="A test name identifier")
-    parser.add_option("-c", "--component", action="store", dest="component", default=None, choices=cmd_dict.keys(),
-                       help="%s" % cmd_dict.keys())
-    parser.add_option("-o", "--operation", action="store", dest="operation", default=None, choices=allowed_opers,
-                       help="%s" % allowed_opers)
-    parser.add_option("--wait_pattern", action="store", dest="wait_pattern", default=None,
-                       help="the pattern to wait for the operation to finish")
-    parser.add_option("", "--output", action="store", dest="output", default=None,
-                       help="Output file name. Default to stdout")
-    parser.add_option("", "--logfile", action="store", dest="logfile", default=None,
-                       help="log file for both stdout and stderror. Default auto generated")
-    parser.add_option("","--timeout", action="store", type="long", dest="timeout", default=600,
-                       help="Time out in secs before waiting for the success pattern. [default: %default]")
-    parser.add_option("", "--save_process_id", action="store_true", dest="save_process_id", default = False,
-                       help="Store the process id if set.  [default: %default]")
-    parser.add_option("", "--restart", action="store_true", dest="restart", default = False,
-                       help="Restart the process using previos config if set.  [default: %default]")
- 
-    jvm_group = OptionGroup(parser, "jvm options", "")
-    jvm_group.add_option("", "--jvm_direct_memory_size", action="store", dest="jvm_direct_memory_size", default = None,
-                       help="Set the jvm direct memory size. e.g., 2048m. Default using the one driver_cmd_dict.")
-    jvm_group.add_option("", "--jvm_max_heap_size", action="store", dest="jvm_max_heap_size", default = None,
-                       help="Set the jvm max heap size. e.g., 1024m. Default using the one in driver_cmd_dict.")
-    jvm_group.add_option("", "--jvm_min_heap_size", action="store", dest="jvm_min_heap_size", default = None,
-                       help="Set the jvm min heap size. e.g., 1024m. Default using the one in driver_cmd_dict.")
-    jvm_group.add_option("", "--jvm_args", action="store", dest="jvm_args", default = None,
-                       help="Other jvm args. e.g., '-Xms24m -Xmx50m'")
-    jvm_group.add_option("", "--jvm_gc_log", action="store", dest="jvm_gc_log", default = None,
-                       help="Enable gc and give jvm gc log file")
-
-    test_case_group = OptionGroup(parser, "Testcase options", "")
-    test_case_group.add_option("", "--testcase", action="store", dest="testcase", default = None,
-                       help="Run a test. Report error. Default no test")
-
-    stats_group = OptionGroup(parser, "Stats options", "")
-    stats_group.add_option("","--jmx_bean", action="store", dest="jmx_bean", default="list",
-                       help="jmx bean to get. [default: %default]")
-    stats_group.add_option("","--jmx_att", action="store", dest="jmx_attr", default="all",
-                       help="jmx attr to get. [default: %default]")
-
-    remote_group = OptionGroup(parser, "Remote options", "")
-    remote_group.add_option("", "--remote_run", action="store_true", dest="remote_run", default = False,
-                       help="Run remotely based on config file. Default False")
-    remote_group.add_option("", "--remote_deploy", action="store_true", dest="remote_deploy", default = False,
-                       help="Deploy the source tree to the remote machine based on config file. Default False")
-    remote_group.add_option("", "--remote_config_file", action="store", dest="remote_config_file", default = None,
-                       help="Remote config file")
-
-    zookeeper_group = OptionGroup(parser, "Zookeeper options", "")
-    zookeeper_group.add_option("", "--zookeeper_server_ports", action="store", dest="zookeeper_server_ports", default = None,
-                       help="comma separated zookeeper ports, used to start/stop/connect to zookeeper")
-    zookeeper_group.add_option("", "--zookeeper_path", action="store", dest="zookeeper_path", default = None,
-                       help="the zookeeper path to wait for")
-    zookeeper_group.add_option("", "--zookeeper_value", action="store", dest="zookeeper_value", default = None,
-                       help="zookeeper path value")
-    zookeeper_group.add_option("", "--zookeeper_cmds", action="store", dest="zookeeper_cmds", default = None,
-                       help="cmds to send to zookeeper client. Comma separated ")
-    zookeeper_group.add_option("", "--zookeeper_server_ids", action="store", dest="zookeeper_server_ids", default = None,
-                       help="Comma separated list of server to start. If not given, start the number of servers in zookeeper_server_ports. This is used to start server on multiple machines ")
-    zookeeper_group.add_option("", "--zookeeper_jmx_start_port", action="store", dest="zookeeper_jmx_start_port", default = 27960,
-                       help="Starting port for jmx")
-    zookeeper_group.add_option("", "--zookeeper_reset", action="store_true", dest="zookeeper_reset", default = False,
-                       help="If true recreate server dir, otherwise start from existing server dir")
-
-
-    debug_group = OptionGroup(parser, "Debug options", "")
-    debug_group.add_option("-d", "--debug", action="store_true", dest="debug", default = False,
-                       help="debug mode")
-    debug_group.add_option("--ant_debug", action="store_true", dest="ant_debug", default = False,
-                       help="ant debug mode")
-    debug_group.add_option("--capture_java_call", action="store", dest="capture_java_call", default = None,
-                       help="capture the java call. give the class name or auto")
-    debug_group.add_option("--enable_direct_java_call", action="store_true", dest="enable_direct_java_call", default = True,
-    #debug_group.add_option("--enable_direct_java_call", action="store_true", dest="enable_direct_java_call", default = False,
-                       help="enable direct java call. ")
-    debug_group.add_option("--check_class_path", action="store_true", dest="check_class_path", default = True,
-                       help="check if class path exists. ")
-    debug_group.add_option("", "--sys_call_debug", action="store_true", dest="enable_sys_call_debug", default = False,
-                       help="debug sys call")
-
-    # load local options
-    #execfile(os.path.join(get_this_file_dirname(),"driver_local_options.py"))
-    #pdb.set_trace()
-   
-    parser.add_option_group(jvm_group)
-    parser.add_option_group(config_group)
-    parser.add_option_group(other_option_group)
-    parser.add_option_group(test_case_group)
-    parser.add_option_group(stats_group)
-    parser.add_option_group(remote_group)
-    parser.add_option_group(zookeeper_group)
-    parser.add_option_group(debug_group)
-
-    (options, args) = parser.parse_args()
-    set_debug(options.debug)
-    set_sys_call_debug(options.enable_sys_call_debug)
-    dbg_print("options = %s  args = %s" % (options, args))
-
-    arg_error=False
-    if not options.component and not options.testcase and not options.remote_deploy:
-       print("\n!!!Please give component!!!\n")
-       arg_error=True
-    if arg_error: 
-      parser.print_help()
-      parser.exit()
-    
-    if afterParsingHook: afterParsingHook(options)   # the hook to call after parsing, change options
-
-    setup_env()
-    if (not options.testname):
-      options.testname = "TEST_NAME" in os.environ and os.environ["TEST_NAME"] or "default"
-    os.environ["TEST_NAME"]= options.testname;
-    
-    if (not "WORK_SUB_DIR" in os.environ): 
-        os.environ["WORK_SUB_DIR"] = "log"
-    if (not "LOG_SUB_DIR" in os.environ):
-        os.environ["LOG_SUB_DIR"] = "log"
-    setup_work_dir()
-
-    if options.testcase:
-      ret = run_testcase(options.testcase)
-      if ret!=0: ret=1     # workaround a issue that ret of 256 will become 0 after sys.exit
-      my_exit(ret)
-    if options.remote_deploy or options.remote_run:
-      if options.remote_config_file:
-        parse_config(options.remote_config_file)
-      if options.remote_deploy: 
-        sys_call_debug_begin()
-        ret = do_remote_deploy()
-        sys_call_debug_end()
-        my_exit(ret)
-    sys_call_debug_begin()
-    ret = run_cmd()
-    sys_call_debug_end()
-
-    my_exit(ret)
-    
-if __name__ == "__main__":
-    main(sys.argv[1:])
-
-

http://git-wip-us.apache.org/repos/asf/incubator-helix/blob/4ebc0fad/helix-agent/src/main/scripts/integration-test/script/driver_cmd_dict.py
----------------------------------------------------------------------
diff --git a/helix-agent/src/main/scripts/integration-test/script/driver_cmd_dict.py b/helix-agent/src/main/scripts/integration-test/script/driver_cmd_dict.py
deleted file mode 100644
index a09fcf2..0000000
--- a/helix-agent/src/main/scripts/integration-test/script/driver_cmd_dict.py
+++ /dev/null
@@ -1,312 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-#
-# this is the file to be included by dds_driver.py for product (e.g., espresso) specific setting
-#
-possible_ivy_dir=[os.path.join(os.environ["HOME"],".m2/repository"),os.path.join(os.environ["HOME"],".gradle/cache"),os.path.join(os.environ["HOME"],".ivy2/lin-cache/ivy-cache"),os.path.join(os.environ["HOME"],".ivy2/lin-cache"),"/ivy/.ivy2/ivy-cache","/ivy/.ivy2", os.path.join(os.environ["VIEW_ROOT"],"build/ivy2/cache")]
-zookeeper_classpath="IVY_DIR/org/apache/zookeeper/zookeeper/3.3.3/zookeeper-3.3.3.jar:IVY_DIR/log4j/log4j/1.2.15/log4j-1.2.15.jar"
-
-# espresso use -2
-kill_cmd_template="jps | grep %s | cut -f1 -d\\  | xargs kill -2"
-kill_container_template="ps -ef | grep tail | grep %s | awk '{print $2}' | xargs kill -9"
-
-# This is not used in helix
-afterParsingHook=None
-
-# some global variables
-router_http_port=12917
-router_mgmt_port=12920
-storage_node_http_port=12918
-storage_node_mgmt_port=12919
-curl_kill_cmd_template="curl -s http://localhost:%d/pid | xargs kill -2"
-
-# used to run cmd, can combine multiple command
-cmd_dict={
-     "storage-node":{"start":"%s; %s" % (curl_kill_cmd_template % storage_node_mgmt_port,"ant -f espresso-storage-node/run/build.xml run-storage-node"),"stop":curl_kill_cmd_template % storage_node_mgmt_port,"stats":[get_stats,"EspressoSingleNode"]}
-    ,"router":{"start":"ant -f espresso-router/run/build.xml run-router","stop":curl_kill_cmd_template % router_mgmt_port,"stats":[get_stats,"EspressoRouter"]}
-    ,"zookeeper":{"start":[zookeeper_opers,"start"],"stop":[zookeeper_opers,"stop"],"wait_for_exist":[zookeeper_opers,"wait_for_exist"],"wait_for_nonexist":[zookeeper_opers,"wait_for_nonexist"],"wait_for_value":[zookeeper_opers,"wait_for_value"],"cmd":[zookeeper_opers,"cmd"]}
-    ,"cluster-manager":{"start":"ant -f cluster-manager/run/build.xml run-cluster-manager", "stop":kill_cmd_template % "HelixControllerMain"}
-    ,"mock-storage":{"start":"ant -f cluster-manager/run/build.xml run-mock-storage", "stop":kill_cmd_template % "MockStorageProcess"}
-    ,"cluster-state-verifier":{"start":"ant -d -f cluster-manager/run/build.xml run-cluster-state-verifier", "stop":kill_cmd_template % "ClusterStateVerifier"}
-    ,"dummy-process":{"start":"ant -f cluster-manager/run/build.xml run-dummy-process", "stop":kill_cmd_template % "DummyProcess"}
-    ,"mock-health-report-process":{"start":"ant -f cluster-manager/run/build.xml run-mock-health-report-process", "stop":kill_cmd_template % "MockHealthReportParticipant"}
-    ,"clm_console":{"default":"ant -f cluster-manager/run/build.xml run-cm-console","stop":kill_cmd_template % "ClusterSetup"}
-    ,"foo_test":{"start":"integration-test/testcases/foo_test.py"}
-}
-
-cmd_ret_pattern={    # the pattern when the call is considered return successfully
-    "storage-node_start":re.compile("Espresso service started")
-   ,"router_start":re.compile("Espresso service started")
-   ,"cluster-manager_start":re.compile("No Messages to process")
-   ,"mock-storage_start":re.compile("Mock storage started") 
-   ,"dummy-process_start":re.compile("Dummy process started") 
-   ,"mock-health-report-process_start":re.compile("MockHealthReportParticipant process started") 
-   ,"foo_test_start":re.compile("start")
-}
-
-# the mapping of option to the java options, if not give, then use directly
-direct_java_call_option_mapping={
-   "dump_file":"-f "
-   ,"value_file":"--value.dump.file="
-   #,"log4j_file":"-Dlog4j.configuration=file://"   # this is only for cluster manager
-   #,"log4j_file":"--log_props="
-   ,"config":"--container_props="
-   ,"consumer_event_pattern":"event_pattern"
-   ,"cmdline_props":"--cmdline_props="
-   ,"cmdline_args":" "   # just put the cmdline_args directly
-   ,"relay_host":"--relay_host="
-   ,"relay_port":"--relay_port="
-   #,"jmx_service_port":"--jmx_service_port="
-   ,"bootstrap_host":"--bootstrap_host="
-   ,"bootstrap_port":"--bootstrap_port="
-   ,"http_port":"--http_port="
-   ,"checkpoint_dir":"--checkpoint_dir="
-   ,"dbname":"--dbname="
-   ,"tablename":"--tablename="
-   ,"dburi":"--dburi="
-   ,"dbuser":"--dbuser="
-   ,"dbpasswd":"--dbpassword="
-   ,"schemareg":"--schemareg="
-   ,"schemareg":"--schemareg="
-   ,"db_relay_config":"--db_relay_config="
-}
-
-# has default value, append to the beginning
-direct_java_call_jvm_args={
-   "jvm_direct_memory_size":["-XX:MaxDirectMemorySize=","100m"]
-   ,"jvm_max_heap_size":["-Xmx","512m"]
-   ,"jvm_min_heap_size":["-Xms","100m"]
-   ,"jvm_gc_log":["-Xloggc:",""]
-   ,"jvm_args":["",""] 
-   ,"log4j_file":["-Dlog4j.configuration=file://",""]   # this is only for cluster manager
-}
-direct_java_call_jvm_args_ordered=[
-   "jvm_direct_memory_size"
-   ,"jvm_max_heap_size"
-   ,"jvm_min_heap_size"
-   ,"jvm_gc_log"
-   ,"jvm_args"
-   ,"log4j_file"
-]
-# mapping from option to ant
-ant_call_option_mapping={
-   "dump_file":"dump.file"
-   ,"value_file":"value.dump.file"
-   ,"log4j_file":"log4j.file"
-   ,"config":"config.file"
-   ,"jvm_direct_memory_size":"jvm.direct.memory.size"
-   ,"jvm_max_heap_size":"jvm.max.heap.size"
-   ,"jvm_gc_log":"jvm.gc.log" 
-   ,"jvm_args":"jvm.args" 
-   ,"cmdline_props":"cmdline.props"
-   ,"cmdline_args":"config.cmdline"
-   ,"relay_host":"relay.host"
-   ,"relay_port":"relay.port"
-   #,"jmx_service_port":"jmx.service.port"
-   ,"bootstrap_host":"bootstrap.host"
-   ,"bootstrap_port":"bootstrap.port"
-   ,"consumer_event_pattern":"consumer.event.pattern"
-   ,"http_port":"http.port"
-   ,"checkpoint_dir":"checkpoint.dir"
-#   ,"db_relay_config":"db.relay.config" 
-}
-
-# class path
-import glob
-#print "view_root=" + get_view_root()
-cm_jar_files=glob.glob(os.path.join(get_view_root(),"../../../target/helix-core-*.jar"))
-#cm_jar_file=os.path.basename(cm_jar_file)
-#print cm_jar_file
-cmd_direct_call={
-   "clm_console":
-   {
-    "class_path":[
-      "IVY_DIR/com/github/sgroschupf/zkclient/0.1/zkclient-0.1.jar"
-      ,"IVY_DIR/com/thoughtworks/xstream/xstream/1.3.1/xstream-1.3.1.jar"
-      ,"IVY_DIR/commons-cli/commons-cli/1.2/commons-cli-1.2.jar"
-      ,"IVY_DIR/commons-io/commons-io/1.4/commons-io-1.4.jar"
-      ,"IVY_DIR/commons-lang/commons-lang/2.4/commons-lang-2.4.jar"
-      ,"IVY_DIR/jdom/jdom/1.0/jdom-1.0.jar"
-      ,"IVY_DIR/log4j/log4j/1.2.15/log4j-1.2.15.jar"
-      ,"IVY_DIR/org/apache/zookeeper/zookeeper/3.3.3/zookeeper-3.3.3.jar"
-      ,"IVY_DIR/org/codehaus/jackson/jackson-core-asl/1.8.5/jackson-core-asl-1.8.5.jar"
-      ,"IVY_DIR/org/codehaus/jackson/jackson-mapper-asl/1.8.5/jackson-mapper-asl-1.8.5.jar"
-      ,"IVY_DIR/xpp3/xpp3_min/1.1.4c/xpp3_min-1.1.4c.jar"
-      ,"IVY_DIR/org/restlet/org.restlet/1.1.10/org.restlet-1.1.10.jar"
-      ,"IVY_DIR/com/noelios/restlet/com.noelios.restlet/1.1.10/com.noelios.restlet-1.1.10.jar"
-]+cm_jar_files
-  ,"class_name":"org.apache.helix.tools.ClusterSetup"
-  ,"before_cmd":"../../../mvn package -Dmaven.test.skip.exec=true"  # build jar first
-   }
-
-  ,"dummy-process":
-   {
-    "class_path":[
-      "IVY_DIR/com/thoughtworks/xstream/xstream/1.3.1/xstream-1.3.1.jar"
-      ,"IVY_DIR/commons-cli/commons-cli/1.2/commons-cli-1.2.jar"
-      ,"IVY_DIR/commons-io/commons-io/1.4/commons-io-1.4.jar"
-      ,"IVY_DIR/commons-lang/commons-lang/2.4/commons-lang-2.4.jar"
-      ,"IVY_DIR/jdom/jdom/1.0/jdom-1.0.jar"
-      ,"IVY_DIR/log4j/log4j/1.2.15/log4j-1.2.15.jar"
-      ,"IVY_DIR/org/apache/zookeeper/zookeeper/3.3.3/zookeeper-3.3.3.jar"
-      ,"IVY_DIR/org/codehaus/jackson/jackson-core-asl/1.8.5/jackson-core-asl-1.8.5.jar"
-      ,"IVY_DIR/org/codehaus/jackson/jackson-mapper-asl/1.8.5/jackson-mapper-asl-1.8.5.jar"
-      ,"IVY_DIR/xpp3/xpp3_min/1.1.4c/xpp3_min-1.1.4c.jar"
-      ,"IVY_DIR/com/github/sgroschupf/zkclient/0.1/zkclient-0.1.jar"
-      ,"IVY_DIR/org/apache/commons/commons-math/2.1/commons-math-2.1.jar"
-]+cm_jar_files
-
-  ,"class_name":"org.apache.helix.mock.participant.DummyProcess"
-  ,"before_cmd":"../../../mvn package -Dmaven.test.skip.exec=true"  # build jar first
-   }
-
-  ,"mock-health-report-process":
-   {
-    "class_path":[
-      "IVY_DIR/com/thoughtworks/xstream/xstream/1.3.1/xstream-1.3.1.jar"
-      ,"IVY_DIR/commons-cli/commons-cli/1.2/commons-cli-1.2.jar"
-      ,"IVY_DIR/commons-io/commons-io/1.4/commons-io-1.4.jar"
-      ,"IVY_DIR/commons-lang/commons-lang/2.4/commons-lang-2.4.jar"
-      ,"IVY_DIR/jdom/jdom/1.0/jdom-1.0.jar"
-      ,"IVY_DIR/log4j/log4j/1.2.15/log4j-1.2.15.jar"
-      ,"IVY_DIR/org/apache/zookeeper/zookeeper/3.3.3/zookeeper-3.3.3.jar"
-      ,"IVY_DIR/org/codehaus/jackson/jackson-core-asl/1.8.5/jackson-core-asl-1.8.5.jar"
-      ,"IVY_DIR/org/codehaus/jackson/jackson-mapper-asl/1.8.5/jackson-mapper-asl-1.8.5.jar"
-      ,"IVY_DIR/xpp3/xpp3_min/1.1.4c/xpp3_min-1.1.4c.jar"
-      ,"IVY_DIR/com/github/sgroschupf/zkclient/0.1/zkclient-0.1.jar"
-      ,"IVY_DIR/org/apache/commons/commons-math/2.1/commons-math-2.1.jar"
-      ,"IVY_DIR/org/restlet/org.restlet/1.1.10/org.restlet-1.1.10.jar"
-      ,"IVY_DIR/com/noelios/restlet/com.noelios.restlet/1.1.10/com.noelios.restlet-1.1.10.jar"
-]+cm_jar_files
-
-  ,"class_name":"org.apache.helix.mock.participant.MockHealthReportParticipant"
-  ,"before_cmd":"../../../mvn package -Dmaven.test.skip.exec=true"  # build jar first
-   }
-
-  ,"cluster-manager":
-   {
-    "class_path":[
-      "IVY_DIR/com/thoughtworks/xstream/xstream/1.3.1/xstream-1.3.1.jar"
-      ,"IVY_DIR/commons-cli/commons-cli/1.2/commons-cli-1.2.jar"
-      ,"IVY_DIR/commons-io/commons-io/1.4/commons-io-1.4.jar"
-      ,"IVY_DIR/commons-lang/commons-lang/2.4/commons-lang-2.4.jar"
-      ,"IVY_DIR/jdom/jdom/1.0/jdom-1.0.jar"
-      ,"IVY_DIR/log4j/log4j/1.2.15/log4j-1.2.15.jar"
-      ,"IVY_DIR/org/apache/zookeeper/zookeeper/3.3.3/zookeeper-3.3.3.jar"
-      ,"IVY_DIR/org/codehaus/jackson/jackson-core-asl/1.8.5/jackson-core-asl-1.8.5.jar"
-      ,"IVY_DIR/org/codehaus/jackson/jackson-mapper-asl/1.8.5/jackson-mapper-asl-1.8.5.jar"
-      ,"IVY_DIR/xpp3/xpp3_min/1.1.4c/xpp3_min-1.1.4c.jar"
-      ,"IVY_DIR/com/github/sgroschupf/zkclient/0.1/zkclient-0.1.jar"
-      ,"IVY_DIR/org/apache/commons/commons-math/2.1/commons-math-2.1.jar"
-      ,"IVY_DIR/org/restlet/org.restlet/1.1.10/org.restlet-1.1.10.jar"
-      ,"IVY_DIR/com/noelios/restlet/com.noelios.restlet/1.1.10/com.noelios.restlet-1.1.10.jar"
-]+cm_jar_files
-  ,"class_name":"org.apache.helix.controller.HelixControllerMain"
-  ,"before_cmd":"../../../mvn package -Dmaven.test.skip.exec=true"  # build jar first
-   }
-
-  ,"cluster-state-verifier":
-   {
-    "class_path":[
-      "IVY_DIR/com/github/sgroschupf/zkclient/0.1/zkclient-0.1.jar"
-      ,"IVY_DIR/com/thoughtworks/xstream/xstream/1.3.1/xstream-1.3.1.jar"
-      ,"IVY_DIR/commons-cli/commons-cli/1.2/commons-cli-1.2.jar"
-      ,"IVY_DIR/commons-io/commons-io/1.4/commons-io-1.4.jar"
-      ,"IVY_DIR/commons-lang/commons-lang/2.4/commons-lang-2.4.jar"
-      ,"IVY_DIR/jdom/jdom/1.0/jdom-1.0.jar"
-      ,"IVY_DIR/log4j/log4j/1.2.15/log4j-1.2.15.jar"
-      ,"IVY_DIR/org/apache/zookeeper/zookeeper/3.3.3/zookeeper-3.3.3.jar"
-      ,"IVY_DIR/org/codehaus/jackson/jackson-core-asl/1.8.5/jackson-core-asl-1.8.5.jar"
-      ,"IVY_DIR/org/codehaus/jackson/jackson-mapper-asl/1.8.5/jackson-mapper-asl-1.8.5.jar"
-      ,"IVY_DIR/xpp3/xpp3_min/1.1.4c/xpp3_min-1.1.4c.jar"
-      ,"IVY_DIR/org/restlet/org.restlet/1.1.10/org.restlet-1.1.10.jar"
-      ,"IVY_DIR/com/noelios/restlet/com.noelios.restlet/1.1.10/com.noelios.restlet-1.1.10.jar"
-]+cm_jar_files
-  ,"class_name":"org.apache.helix.tools.ClusterStateVerifier"
-  ,"before_cmd":"../../../mvn package -Dmaven.test.skip.exec=true"  # build jar first
-   }
-
-  ,"mock-storage":
-   {
-    "class_path":[
-      "IVY_DIR/com/github/sgroschupf/zkclient/0.1/zkclient-0.1.jar"
-     ,"IVY_DIR/log4j/log4j/1.2.15/log4j-1.2.15.jar"
-     ,"IVY_DIR/commons-cli/commons-cli/1.2/commons-cli-1.2.jar"
-     ,"IVY_DIR/commons-math/commons-math/2.1/commons-math-2.1.jar"
-]+cm_jar_files
-  ,"class_name":"org.apache.helix.mock.participant.MockStorageProcess"
-   }
-}
-
-# file the log4j file
-def log4j_file_store_value(option, opt_str, value, parser):
-  setattr(parser.values, option.dest, file_exists(value))
-# configure
-config_group = OptionGroup(parser, "Config options", "")
-config_group.add_option("-p", "--config", action="store", dest="config", default=None,
-                   help="config file path")
-config_group.add_option("--dump_file", action="store", dest="dump_file", default=None,
-                   help="Event dump file")
-config_group.add_option("--value_file", action="store", dest="value_file", default=None,
-                   help="Event value dump file")
-config_group.add_option("-l", "--log4j_file", action="callback", callback=log4j_file_store_value, type="str", dest="log4j_file", default=None,
-                   help="Log4j config file")
-#config_group.add_option("-l", "--log4j_file", action="store", dest="log4j_file", default=None,
-#                   help="Log4j config file")
-config_group.add_option("--relay_host", action="store", dest="relay_host", default=None,
-                   help="Host of relay for a consumer")
-config_group.add_option("--relay_port", action="store", dest="relay_port", default=None,
-                   help="Port of relay for a consumer")
-config_group.add_option("--http_port", action="store", dest="http_port", default=None,
-                   help="Http Port of the current started component")
-config_group.add_option("--db_relay_config", action="store", dest="db_relay_config", default=None,
-                   help="DB relay config file")
-config_group.add_option("--cmdline_props", action="store", dest="cmdline_props", default=None,
-                   help="Command line config props. Comma separate config parameter, e.g., --cmdline_props=databus.relay.eventBuffer.maxSize=1024000;...")
-config_group.add_option("--bootstrap_host", action="store", dest="bootstrap_host", default=None,
-                   help="Host of bootstrap server")
-config_group.add_option("--bootstrap_port", action="store", dest="bootstrap_port", default=None,
-                   help="Port of bootstrap server")
-config_group.add_option("--checkpoint_dir", action="store", dest="checkpoint_dir", default=None,
-                   help="Client checkpoint dir")
-config_group.add_option("--checkpoint_keep", action="store_true", dest="checkpoint_keep", default=False,
-                   help="Do NOT clean client checkpoint dir")
-config_group.add_option("--consumer_event_pattern", action="store", dest="consumer_event_pattern", default=None,
-                   help="Check consumer event pattern if set")
-config_group.add_option("--dbname", action="store", dest="dbname", default=None, help="Espresso db name")
-config_group.add_option("--tablename", action="store", dest="tablename", default=None, help="Espresso table name")
-config_group.add_option("--dburi", action="store", dest="dburi", default=None, help="Espresso db uri")
-config_group.add_option("--dbuser", action="store", dest="dbuser", default=None, help="Espresso db user")
-config_group.add_option("--dbpasswd", action="store", dest="dbpasswd", default=None, help="Espresso db password")
-config_group.add_option("--schemareg", action="store", dest="schemareg", default=None, help="Espresso schemareg ")
-config_group.add_option("-x","--extservice_props", action="append", dest="extservice_props", default=None,
-                       help="Config props to override the extservices. Can give multiple times. One for each property. <entry name>;<prop name>;value. e.g., databus2.relay.local.bizfollow;db.bizfollow.db_url;jdbc.. ")
-config_group.add_option("--cmdline_args", action="store", dest="cmdline_args", default=None, help="Command line arguments")
-
-other_option_group = OptionGroup(parser, "Other options", "")
-other_option_group.add_option("", "--component_id", action="store", dest="component_id", default = None,
-                   help="The compnent id (1,2..) if there are mutliple instance of a component")
-parser.add_option("","--sleep_before_wait", action="store", type="long", dest="sleep_before_wait", default=0,
-                   help="Sleep secs before waiting consumer reaching maxEventWindowScn. [default: %default]")
-parser.add_option("","--sleep_after_wait", action="store", type="long", dest="sleep_after_wait", default=1,
-                   help="Sleep secs after consumer reaching maxEventWindowScn. [default: %default]")
-parser.add_option("","--producer_log_purge_limit", action="store", type="int", dest="producer_log_purge_limit", default=1000,
-                   help="The limit on number of logs to purge for producer [default: %default]")
-