You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@trafodion.apache.org by li...@apache.org on 2016/12/20 08:54:44 UTC

[3/4] incubator-trafodion git commit: [TRAFODION-2393] python installer - reorganize script directories for better user experience

[TRAFODION-2393] python installer - reorganize script directories for better user experience


Project: http://git-wip-us.apache.org/repos/asf/incubator-trafodion/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-trafodion/commit/2886c023
Tree: http://git-wip-us.apache.org/repos/asf/incubator-trafodion/tree/2886c023
Diff: http://git-wip-us.apache.org/repos/asf/incubator-trafodion/diff/2886c023

Branch: refs/heads/master
Commit: 2886c0232fccb20d45a57c0efdafacf4590c5110
Parents: 2581c0d
Author: Eason <hf...@gmail.com>
Authored: Fri Dec 16 21:40:17 2016 +0800
Committer: Eason <hf...@gmail.com>
Committed: Fri Dec 16 22:34:03 2016 +0800

----------------------------------------------------------------------
 install/python-installer/README.md              |   2 +-
 install/python-installer/apache_mods.py         |  72 ---
 install/python-installer/bashrc.template        |  78 ---
 install/python-installer/common.py              | 478 -----------------
 .../configs/db_config_default.ini               | 125 +++++
 .../python-installer/configs/default_ports.ini  |  25 +
 install/python-installer/configs/mod_cfgs.json  |  60 +++
 install/python-installer/configs/prompt.json    | 236 ++++++++
 install/python-installer/configs/script.json    |  87 +++
 install/python-installer/configs/version.json   |  10 +
 install/python-installer/copy_files.py          |  64 ---
 install/python-installer/db_config_default      | 120 -----
 install/python-installer/db_install.py          | 101 ++--
 install/python-installer/dcs_setup.py           | 114 ----
 install/python-installer/discovery.py           |  12 +-
 install/python-installer/hadoop_mods.py         | 203 -------
 install/python-installer/hdfs_cmds.py           |  68 ---
 install/python-installer/mod_cfgs.json          |  73 ---
 install/python-installer/prompt.json            | 227 --------
 install/python-installer/script.json            |  87 ---
 install/python-installer/scripts/__init__.py    |   0
 install/python-installer/scripts/apache_mods.py |  72 +++
 install/python-installer/scripts/common.py      | 532 +++++++++++++++++++
 install/python-installer/scripts/copy_files.py  |  76 +++
 install/python-installer/scripts/dcs_setup.py   | 117 ++++
 install/python-installer/scripts/hadoop_mods.py | 187 +++++++
 install/python-installer/scripts/hdfs_cmds.py   |  68 +++
 install/python-installer/scripts/traf_check.py  |  87 +++
 install/python-installer/scripts/traf_dep.py    | 106 ++++
 .../python-installer/scripts/traf_discover.py   | 270 ++++++++++
 .../python-installer/scripts/traf_kerberos.py   | 116 ++++
 install/python-installer/scripts/traf_ldap.py   |  73 +++
 .../python-installer/scripts/traf_package.py    |  50 ++
 install/python-installer/scripts/traf_setup.py  | 131 +++++
 .../python-installer/scripts/traf_sqconfig.py   |  74 +++
 install/python-installer/scripts/traf_start.py  |  80 +++
 install/python-installer/scripts/traf_user.py   | 158 ++++++
 install/python-installer/scripts/wrapper.py     | 274 ++++++++++
 .../python-installer/templates/bashrc.template  |  81 +++
 .../templates/traf_authentication_conf.template |  71 +++
 .../traf_authentication_conf.template           |  71 ---
 install/python-installer/traf_check.py          |  87 ---
 install/python-installer/traf_dep.py            | 110 ----
 install/python-installer/traf_discover.py       | 253 ---------
 install/python-installer/traf_kerberos.py       | 116 ----
 install/python-installer/traf_ldap.py           |  73 ---
 install/python-installer/traf_package.py        |  47 --
 install/python-installer/traf_setup.py          | 128 -----
 install/python-installer/traf_sqconfig.py       |  74 ---
 install/python-installer/traf_start.py          |  70 ---
 install/python-installer/traf_user.py           | 146 -----
 install/python-installer/version.json           |  10 -
 install/python-installer/wrapper.py             | 301 -----------
 53 files changed, 3237 insertions(+), 3114 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/2886c023/install/python-installer/README.md
----------------------------------------------------------------------
diff --git a/install/python-installer/README.md b/install/python-installer/README.md
index feadaa2..1844e09 100644
--- a/install/python-installer/README.md
+++ b/install/python-installer/README.md
@@ -15,7 +15,7 @@
 ## How to use:
 - Two ways:
  - Simply invoke `./db_install.py` to start the installation in guided mode
- - Copy the `db_config_default` file to `your_db_config` and modify it, then invoke `./db_config.py --config-file your_db_config` to start installation in config mode
+ - Copy `configs/db_config_default.ini` file to `your_db_config` and modify it, then invoke `./db_config.py --config-file your_db_config` to start installation in config mode
 - For a quick install with default settings, you only need to put Trafodion package file in installer's directory, provide CDH/HDP web URL in `your_db_config` file and then it's ready to go!
 - Use `./db_install.py --help` for more options
 - Invoke `./discovery.py` to get the system basic info on all nodes

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/2886c023/install/python-installer/apache_mods.py
----------------------------------------------------------------------
diff --git a/install/python-installer/apache_mods.py b/install/python-installer/apache_mods.py
deleted file mode 100755
index 17a54d5..0000000
--- a/install/python-installer/apache_mods.py
+++ /dev/null
@@ -1,72 +0,0 @@
-#!/usr/bin/env python
-
-# @@@ START COPYRIGHT @@@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-# @@@ END COPYRIGHT @@@
-
-### this script should be run on all nodes with sudo user ###
-
-import sys
-import json
-import socket
-from common import MODCFG_FILE, ParseJson, ParseXML, err, run_cmd
-
-def run():
-    dbcfgs = json.loads(dbcfgs_json)
-    if 'APACHE' in dbcfgs['distro']:
-        modcfgs = ParseJson(MODCFG_FILE).load()
-        MOD_CFGS = modcfgs['MOD_CFGS']
-
-        hdfs_xml_file = dbcfgs['hdfs_xml_file']
-        hbase_xml_file = dbcfgs['hbase_xml_file']
-
-        hbasexml = ParseXML(hbase_xml_file)
-        for key, value in MOD_CFGS['hbase-site'].items():
-            hbasexml.add_property(key, value)
-        hbasexml.write_xml()
-
-        hdfsxml = ParseXML(hdfs_xml_file)
-        for key, value in MOD_CFGS['hdfs-site'].items():
-            hdfsxml.add_property(key, value)
-        hdfsxml.write_xml()
-
-        print 'Apache Hadoop modification completed'
-        first_node = dbcfgs['first_rsnode']
-        local_host = socket.gethostname()
-        if first_node in local_host:
-            hadoop_home = dbcfgs['hadoop_home']
-            hbase_home = dbcfgs['hbase_home']
-            # stop
-            run_cmd(hbase_home + '/bin/stop-hbase.sh')
-            run_cmd(hadoop_home + '/sbin/stop-dfs.sh')
-            # start
-            run_cmd(hadoop_home + '/sbin/start-dfs.sh')
-            run_cmd(hbase_home + '/bin/start-hbase.sh')
-
-            print 'Apache Hadoop restart completed'
-    else:
-        print 'no apache distribution found, skipping'
-
-# main
-try:
-    dbcfgs_json = sys.argv[1]
-except IndexError:
-    err('No db config found')
-run()

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/2886c023/install/python-installer/bashrc.template
----------------------------------------------------------------------
diff --git a/install/python-installer/bashrc.template b/install/python-installer/bashrc.template
deleted file mode 100644
index a85de3c..0000000
--- a/install/python-installer/bashrc.template
+++ /dev/null
@@ -1,78 +0,0 @@
-#!/bin/sh
-
-# @@@ START COPYRIGHT @@@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-# @@@ END COPYRIGHT @@@
-
-
-# This is the .bashrc for the Trafodion environment
-#
-#-------------------------------------------
-# Execute the system's default .bashrc first
-#-------------------------------------------
-if [ -f /etc/bashrc ]; then
-	. /etc/bashrc
-fi
-
-cd $HOME
-
-#-------------------------------------------
-# full path of your Trafodion installation
-#-------------------------------------------
-export TRAF_HOME="{{ traf_home }}"
-
-#-------------------------------------------
-# other env vars needed by Trafodion
-#-------------------------------------------
-
-# These env vars define all nodes in the cluster
-export JAVA_HOME="{{ java_home }}"
-export NODE_LIST="{{ node_list }}"
-export MY_NODES="{{ my_nodes }}"
-export node_count="{{ node_count }}"
-export HADOOP_TYPE="{{ hadoop_type }}"
-export ENABLE_HA="{{ enable_ha }}"
-
-#-------------------------------------------
-# Execute the sqenv.sh script if it exists.
-#-------------------------------------------
-PATH=".:$PATH"
-if [ -f $TRAF_HOME/sqenv.sh ]; then
-	pushd . >/dev/null
-	cd $TRAF_HOME
-	source ./sqenv.sh
-	popd >/dev/null
-	export MANPATH=$MANPATH:$MPI_ROOT/share/man
-fi
-
-#-------------------------------------------
-# additional settings for Trafodion environment
-#-------------------------------------------
-ETC_SECURITY_MSG="***ERROR: To fix this please configure /etc/security/limits.conf properly on $HOSTNAME."
-
-# set core file size
-ulimit -c unlimited
-
-# set max open files
-ulimit -n 32768
-if [ $? -ne 0 ]; then
-    echo "***ERROR: Unable to set max open files. Current value $(ulimit -n)"
-    echo $ETC_SECURITY_MSG
-fi

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/2886c023/install/python-installer/common.py
----------------------------------------------------------------------
diff --git a/install/python-installer/common.py b/install/python-installer/common.py
deleted file mode 100644
index 97b93f2..0000000
--- a/install/python-installer/common.py
+++ /dev/null
@@ -1,478 +0,0 @@
-#!/usr/bin/env python
-
-# @@@ START COPYRIGHT @@@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-# @@@ END COPYRIGHT @@@
-
-### The common functions ###
-
-import os
-import pty
-import sys
-import json
-import re
-import time
-import base64
-import subprocess
-import logging
-try:
-    import xml.etree.cElementTree as ET
-except ImportError:
-    import xml.etree.ElementTree as ET
-from ConfigParser import ConfigParser
-from collections import defaultdict
-
-INSTALLER_LOC = sys.path[0]
-
-USER_PROMPT_FILE = INSTALLER_LOC + '/prompt.json'
-SCRCFG_FILE = INSTALLER_LOC + '/script.json'
-VERSION_FILE = INSTALLER_LOC + '/version.json'
-MODCFG_FILE = INSTALLER_LOC + '/mod_cfgs.json'
-
-DBCFG_FILE = INSTALLER_LOC + '/db_config'
-DBCFG_TMP_FILE = INSTALLER_LOC + '/.db_config_temp'
-
-TMP_DIR = '/tmp/.trafodion_install_temp'
-MARK = '[ERR]'
-
-def version():
-    print 'Installer version: %s' % __VERSION__
-    exit(0)
-
-def ok(msg):
-    print '\n\33[32m***[OK]: %s \33[0m' % msg
-
-def info(msg):
-    print '\n\33[33m***[INFO]: %s \33[0m' % msg
-
-def err_m(msg):
-    """ used by main script """
-    sys.stderr.write('\n\33[31m***[ERROR]: %s \33[0m\n' % msg)
-    sys.exit(1)
-
-def err(msg):
-    """ used by sub script """
-    sys.stderr.write(MARK + msg)
-    sys.exit(1)
-
-def get_logger(log_file):
-
-    log_dir = os.path.dirname(log_file)
-    if not os.path.exists(log_dir): os.mkdir(log_dir)
-
-    logger = logging.getLogger()
-    logger.setLevel(logging.INFO)
-
-    formatter = logging.Formatter('[%(asctime)s %(levelname)s]: %(message)s')
-
-    fh = logging.FileHandler(log_file)
-    fh.setFormatter(formatter)
-
-    logger.addHandler(fh)
-
-    return logger
-
-def run_cmd(cmd):
-    """ check command return value and return stdout """
-    p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
-    stdout, stderr = p.communicate()
-    if p.returncode != 0:
-        err('Failed to run command %s: %s' % (cmd, stderr))
-    return stdout.strip()
-
-def run_cmd_as_user(user, cmd):
-    return run_cmd('sudo -n su - %s -c \'%s\'' % (user, cmd))
-
-def cmd_output(cmd):
-    """ return command output but not check return value """
-    p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
-    stdout, stderr = p.communicate()
-
-    return stdout.strip() if stdout else stderr
-
-def mod_file(template_file, change_items):
-    """
-        @params: change_items: a dict includes:
-        {regular_expression : replace_string}
-    """
-    try:
-        with open(template_file, 'r') as f:
-            lines = f.read()
-    except IOError:
-        err('Failed to open file %s to modify' % template_file)
-
-    for regexp, replace in change_items.iteritems():
-        lines = re.sub(regexp, replace, lines)
-
-    with open(template_file, 'w') as f:
-        f.write(lines)
-
-def append_file(template_file, string, position=''):
-    try:
-        with open(template_file, 'r') as f:
-            lines = f.readlines()
-        pos = 0
-        if position:
-            for index, line in enumerate(lines):
-                if position in line:
-                    pos = index + 1
-
-        if pos == 0: pos = len(lines)
-        newlines = lines[:pos] + [string + '\n'] + lines[pos:]
-        if not string in lines:
-            with open(template_file, 'w') as f:
-                f.writelines(newlines)
-    except IOError:
-        err('Failed to open file %s to append' % template_file)
-
-
-def write_file(template_file, string):
-    try:
-        with open(template_file, 'w') as f:
-            f.write(string)
-    except IOError:
-        err('Failed to open file %s to write' % template_file)
-
-
-class Version(object):
-    def __init__(self):
-        self.support_ver = ParseJson(VERSION_FILE).load()
-
-    def get_version(self, component):
-        if self.support_ver[component] == '':
-            err('Failed to get version info for "%s" from config file' % component)
-
-        return self.support_ver[component]
-
-class Remote(object):
-    """
-        copy files to/fetch files from remote host using ssh
-        can also use paramiko, but it's not a build-in module
-    """
-
-    def __init__(self, host, user='', pwd=''):
-        self.host = host
-        self.user = user
-        self.rc = 0
-        self.pwd = pwd
-        self.sshpass = self._sshpass_available()
-
-    @staticmethod
-    def _sshpass_available():
-        sshpass_available = True
-        try:
-            p = subprocess.Popen(['sshpass'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
-            p.communicate()
-        except OSError:
-            sshpass_available = False
-
-        return sshpass_available
-
-    def _commands(self, method):
-        cmd = []
-        if self.sshpass and self.pwd: cmd = ['sshpass', '-p', self.pwd]
-        cmd += [method]
-        if not (self.sshpass and self.pwd): cmd += ['-oPasswordAuthentication=no']
-        return cmd
-
-    def _execute(self, cmd, verbose=False, shell=False):
-        try:
-            if verbose: print 'cmd:', cmd
-
-            master, slave = pty.openpty()
-            if shell:
-                p = subprocess.Popen(cmd, stdin=slave, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
-            else:
-                p = subprocess.Popen(cmd, stdin=slave, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
-
-            self.stdout, self.stderr = p.communicate()
-            if p.returncode:
-                self.rc = p.returncode
-                # 'ssh -tt' will overwrite stderr, so manually handle it
-                if MARK in self.stdout:
-                    self.stdout, self.stderr = self.stdout.split(MARK)
-                else:
-                    self.stderr = self.stdout
-        except Exception as e:
-            err_m('Failed to run commands on remote host: %s' % e)
-
-    def execute(self, user_cmd):
-        cmd = self._commands('ssh')
-        if self.user:
-            cmd += ['%s@%s' % (self.user, self.host)]
-        else:
-            cmd += [self.host]
-
-        cmd += user_cmd.split()
-        self._execute(cmd)
-
-    def copy(self, files, remote_folder='.'):
-        """ copy file to user's home folder """
-        for f in files:
-            if not os.path.exists(f):
-                err_m('Copy file error: %s doesn\'t exist' % f)
-
-        cmd = self._commands('scp')
-        cmd += ['-r']
-        cmd += files # files should be full path
-        if self.user:
-            cmd += ['%s@%s:%s/' % (self.user, self.host, remote_folder)]
-        else:
-            cmd += ['%s:%s/' % (self.host, remote_folder)]
-
-        self._execute(cmd)
-        if self.rc != 0: err('Failed to copy files to remote nodes')
-
-    def fetch(self, files, local_folder='.'):
-        """ fetch file from user's home folder """
-        cmd = self._commands('scp')
-        cmd += ['-r']
-        if self.user:
-            cmd += ['%s@%s:~/{%s}' % (self.user, self.host, ','.join(files))]
-        else:
-            cmd += ['%s:~/{%s}' % (self.host, ','.join(files))]
-        cmd += [local_folder]
-
-        self._execute(cmd)
-        if self.rc != 0: err('Failed to fetch files from remote nodes')
-
-
-class ParseHttp(object):
-    def __init__(self, user, passwd, json_type=True):
-        # httplib2 is not installed by default
-        try:
-            import httplib2
-        except ImportError:
-            err_m('Python module httplib2 is not found. Install python-httplib2 first.')
-
-        self.user = user
-        self.passwd = passwd
-        self.h = httplib2.Http(disable_ssl_certificate_validation=True)
-        self.h.add_credentials(self.user, self.passwd)
-        self.headers = {}
-        self.headers['X-Requested-By'] = 'trafodion'
-        if json_type:
-            self.headers['Content-Type'] = 'application/json'
-        self.headers['Authorization'] = 'Basic %s' % (base64.b64encode('%s:%s' % (self.user, self.passwd)))
-
-    def _request(self, url, method, body=None):
-        try:
-            resp, content = self.h.request(url, method, headers=self.headers, body=body)
-            # return code is not 2xx
-            if not 200 <= resp.status < 300:
-                err_m('Error return code {0} when {1}ting configs'.format(resp.status, method.lower()))
-            return content
-        except Exception as exc:
-            err_m('Error with {0}ting configs using URL {1}. Reason: {2}'.format(method.lower(), url, exc))
-
-    def get(self, url):
-        try:
-            return defaultdict(str, json.loads(self._request(url, 'GET')))
-        except ValueError:
-            err_m('Failed to get data from URL, check password if URL requires authentication')
-
-    def put(self, url, config):
-        if not isinstance(config, dict): err_m('Wrong HTTP PUT parameter, should be a dict')
-        result = self._request(url, 'PUT', body=json.dumps(config))
-        if result: return defaultdict(str, json.loads(result))
-
-    def post(self, url):
-        try:
-            return defaultdict(str, json.loads(self._request(url, 'POST')))
-        except ValueError:
-            err_m('Failed to send command to URL')
-
-
-class ParseXML(object):
-    """ handle *-site.xml with format
-        <property><name></name><value></value></proerty>
-    """
-    def __init__(self, xml_file):
-        self.__xml_file = xml_file
-        if not os.path.exists(self.__xml_file): err_m('Cannot find xml file %s' % self.__xml_file)
-        try:
-            self._tree = ET.parse(self.__xml_file)
-        except Exception as e:
-            err_m('failed to parsing xml: %s' % e)
-
-        self._root = self._tree.getroot()
-        self._properties = self._root.findall('property')
-        # name, value list
-        self._nvlist = [[elem.text for elem in p] for p in self._properties]
-
-    def __indent(self, elem):
-        """Return a pretty-printed XML string for the Element."""
-        if len(elem):
-            if not elem.text: elem.text = '\n' + '  '
-            if not elem.tail: elem.tail = '\n'
-            for subelem in elem:
-                self.__indent(subelem)
-        else:
-            if not elem.tail: elem.tail = '\n' + '  '
-
-    def get_property(self, name):
-        try:
-            return [x[1] for x in self._nvlist if x[0] == name][0]
-        except:
-            return ''
-
-    def rm_property(self, name):
-        for p in self._properties:
-            if p[0].text == name:
-                self._root.remove(p)
-
-    def add_property(self, name, value):
-        # don't add property if already exists
-        if self.get_property(name): return
-
-        elem_p = ET.Element('property')
-        elem_name = ET.Element('name')
-        elem_value = ET.Element('value')
-
-        elem_name.text = name
-        elem_value.text = value
-        elem_p.append(elem_name)
-        elem_p.append(elem_value)
-
-        self._nvlist.append([name, value])
-        self._root.append(elem_p)
-
-    def write_xml(self):
-        self.__indent(self._root)
-        self._tree.write(self.__xml_file)
-
-    def print_xml(self):
-        for name, value in self._nvlist:
-            print name, value
-
-class ParseJson(object):
-    def __init__(self, js_file):
-        self.__js_file = js_file
-
-    def load(self):
-        """ load json file to a dict """
-        if not os.path.exists(self.__js_file): err_m('Cannot find json file %s' % self.__js_file)
-        with open(self.__js_file, 'r') as f:
-            tmparray = f.readlines()
-        content = ''
-        for t in tmparray:
-            content += t
-
-        try:
-            return defaultdict(str, json.loads(content))
-        except ValueError:
-            err_m('No json format found in config file %s' % self.__js_file)
-
-    def save(self, dic):
-        """ save dict to json file with pretty format """
-        with open(self.__js_file, 'w') as f:
-            f.write(json.dumps(dic, indent=4))
-        return 0
-
-
-class ParseInI(object):
-    def __init__(self, ini_file):
-        self.__ini_file = ini_file
-        self.section = 'def'
-
-    def load(self):
-        """ load content from ini file and return a dict """
-        if not os.path.exists(self.__ini_file):
-            err_m('Cannot find ini file %s' % self.__ini_file)
-
-        cfgs = {}
-        cf = ConfigParser()
-        cf.read(self.__ini_file)
-
-        if not cf.has_section(self.section):
-            err_m('Cannot find the default section [%s]' % self.section)
-
-        for cfg in cf.items(self.section):
-            cfgs[cfg[0]] = cfg[1]
-
-        return defaultdict(str, cfgs)
-
-    def save(self, dic):
-        """ save a dict as an ini file """
-        cf = ConfigParser()
-        cf.add_section(self.section)
-        for key, value in dic.iteritems():
-            cf.set(self.section, key, value)
-
-        with open(self.__ini_file, 'w') as f:
-            cf.write(f)
-
-def http_start(repo_dir, repo_port):
-    info('Starting temporary python http server')
-    os.system("cd %s; python -m SimpleHTTPServer %s > /dev/null 2>&1 &" % (repo_dir, repo_port))
-
-def http_stop():
-    #info('Stopping temporary python http server')
-    os.system("ps -ef|grep SimpleHTTPServer |grep -v grep | awk '{print $2}' |xargs kill -9 >/dev/null 2>&1")
-
-def format_output(text):
-    num = len(text) + 4
-    print '*' * num
-    print '  ' + text
-    print '*' * num
-
-def expNumRe(text):
-    """
-    expand numeric regular expression to list
-    e.g. 'n[01-03],n1[0-1]': ['n01','n02','n03','n10','n11']
-    e.g. 'n[09-11].com': ['n09.com','n10.com','n11.com']
-    """
-    explist = []
-    for regex in text.split(','):
-        regex = regex.strip()
-        r = re.match(r'(.*)\[(\d+)-(\d+)\](.*)', regex)
-        if r:
-            h = r.group(1)
-            d1 = r.group(2)
-            d2 = r.group(3)
-            t = r.group(4)
-
-            convert = lambda d: str(('%0' + str(min(len(d1), len(d2))) + 'd') % d)
-            if d1 > d2: d1, d2 = d2, d1
-            explist.extend([h + convert(c) + t for c in range(int(d1), int(d2)+1)])
-
-        else:
-            # keep original value if not matched
-            explist.append(regex)
-
-    return explist
-
-def time_elapse(func):
-    """ time elapse decorator """
-    def wrapper(*args, **kwargs):
-        start_time = time.time()
-        output = func(*args, **kwargs)
-        end_time = time.time()
-        seconds = end_time - start_time
-        hours = seconds / 3600
-        seconds = seconds % 3600
-        minutes = seconds / 60
-        seconds = seconds % 60
-        print '\nTime Cost: %d hour(s) %d minute(s) %d second(s)' % (hours, minutes, seconds)
-        return output
-    return wrapper
-
-if __name__ == '__main__':
-    exit(0)

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/2886c023/install/python-installer/configs/db_config_default.ini
----------------------------------------------------------------------
diff --git a/install/python-installer/configs/db_config_default.ini b/install/python-installer/configs/db_config_default.ini
new file mode 100644
index 0000000..52f1f47
--- /dev/null
+++ b/install/python-installer/configs/db_config_default.ini
@@ -0,0 +1,125 @@
+# @@@ START COPYRIGHT @@@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# @@@ END COPYRIGHT @@@
+
+[dbconfigs]
+# NOTICE: if you are using CDH/HDP hadoop distro,
+# you can only specifiy management url address for a quick install
+
+##################################
+# Common Settings
+##################################
+
+# trafodion username and password
+traf_user = trafodion
+traf_pwd = traf123
+# trafodion user's home directory
+home_dir = /home
+# the directory location of trafodion binary
+# if not provided, the default value will be {package_name}-{version}
+traf_dirname =
+
+# trafodion used java(JDK) path on trafodion nodes
+# if not provided, installer will auto detect installed JDK
+java_home =
+
+# cloudera/ambari management url(i.e. http://192.168.0.1:7180 or just 192.168.0.1)
+# if 'http' or 'https' prefix is not provided, the default one is 'http'
+# if port is not provided, the default port is cloudera port '7180'
+mgr_url =
+# user name for cloudera/ambari management url
+mgr_user = admin
+# password for cloudera/ambari management url
+mgr_pwd = admin
+# set the cluster number if multiple clusters managed by one Cloudera manager
+# ignore it if only one cluster being managed
+cluster_no = 1
+
+# trafodion tar package file location
+# no need to provide it if the package can be found in current installer's directory
+traf_package =
+
+# the number of dcs servers on each node
+dcs_cnt_per_node = 4
+
+# scratch file location, seperated by comma if more than one
+scratch_locs = $TRAF_HOME/tmp
+
+# start trafodion instance after installation completed
+traf_start = Y
+
+
+##################################
+# DCS HA configuration
+##################################
+
+# set it to 'Y' if enable DCS HA
+dcs_ha = N
+# if HA is enabled, provide floating ip, network interface and the hostname of backup dcs master nodes
+dcs_floating_ip =
+# network interface that dcs used
+dcs_interface =
+# backup dcs master nodes, seperated by comma if more than one
+dcs_backup_nodes =
+
+
+##################################
+# Offline installation setting
+##################################
+
+# set offline mode to Y if no internet connection
+offline_mode = N
+# if offline mode is set, you must provide a local repository directory with all needed RPMs
+local_repo_dir =
+
+
+##################################
+# LDAP security configuration
+##################################
+
+# set it to 'Y' if enable LDAP security
+ldap_security = N
+# LDAP user name and password to be assigned as DB admin privilege
+db_admin_user = admin
+db_admin_pwd = traf123
+# LDAP user to be assigned DB root privileges (DB__ROOT)
+db_root_user = trafodion
+# if LDAP security is enabled, provide the following items
+ldap_hosts =
+# 389 for no encryption or TLS, 636 for SSL
+ldap_port = 389
+ldap_identifiers =
+ldap_encrypt = 0
+ldap_certpath =
+# provide if have
+ladp_user =
+ladp_pwd =
+
+##################################
+# Kerberos security configuration
+##################################
+# if kerberos is enabled in your hadoop system, provide below info
+
+# KDC server address
+kdc_server =
+# include realm, i.e. admin/admin@EXAMPLE.COM
+admin_principal =
+# admin password for admin principal, it is used to create trafodion user's principal and keytab
+kdcadmin_pwd =

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/2886c023/install/python-installer/configs/default_ports.ini
----------------------------------------------------------------------
diff --git a/install/python-installer/configs/default_ports.ini b/install/python-installer/configs/default_ports.ini
new file mode 100644
index 0000000..9b2898e
--- /dev/null
+++ b/install/python-installer/configs/default_ports.ini
@@ -0,0 +1,25 @@
+# @@@ START COPYRIGHT @@@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# @@@ END COPYRIGHT @@@
+
+[ports]
+dcs_master_port = 23400
+dcs_info_port = 24400
+repo_http_port = 9900

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/2886c023/install/python-installer/configs/mod_cfgs.json
----------------------------------------------------------------------
diff --git a/install/python-installer/configs/mod_cfgs.json b/install/python-installer/configs/mod_cfgs.json
new file mode 100644
index 0000000..ca9ac96
--- /dev/null
+++ b/install/python-installer/configs/mod_cfgs.json
@@ -0,0 +1,60 @@
+{
+"MOD_CFGS": {
+    "hbase-site": {
+        "hbase.master.distributed.log.splitting": "true",
+        "hbase.snapshot.master.timeoutMillis": "600000",
+        "hbase.coprocessor.region.classes": "org.apache.hadoop.hbase.coprocessor.transactional.TrxRegionObserver,org.apache.hadoop.hbase.coprocessor.transactional.TrxRegionEndpoint,org.apache.hadoop.hbase.coprocessor.AggregateImplementation",
+        "hbase.hregion.impl": "org.apache.hadoop.hbase.regionserver.transactional.TransactionalRegion",
+        "hbase.regionserver.region.split.policy": "org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy",
+        "hbase.snapshot.enabled": "true",
+        "hbase.bulkload.staging.dir": "/hbase-staging",
+        "hbase.regionserver.region.transactional.tlog": "true",
+        "hbase.snapshot.region.timeout": "600000",
+        "hbase.client.scanner.timeout.period": "600000"
+    },
+    "hdfs-site": { "dfs.namenode.acls.enabled": "true" },
+    "zoo.cfg": { "maxClientCnxns": "0" }
+},
+
+"HBASE_MASTER_CONFIG": {
+"roleTypeConfigs" :  [ {
+        "roleType" : "MASTER",
+        "items" : [ {
+                "name" : "hbase_master_config_safety_valve",
+        "value" : "<property>\r\n   <name>hbase.master.distributed.log.splitting</name>\r\n   <value>true</value>\r\n</property>\r\n <property>\r\n   <name>hbase.snapshot.master.timeoutMillis</name>\r\n   <value>600000</value>\r\n</property>\r\n"
+                } ]
+    } ]
+},
+
+"HBASE_RS_CONFIG": {
+"items" : [ {
+                "name" : "hbase_coprocessor_region_classes",
+                "value" : "org.apache.hadoop.hbase.coprocessor.transactional.TrxRegionObserver,org.apache.hadoop.hbase.coprocessor.transactional.TrxRegionEndpoint,org.apache.hadoop.hbase.coprocessor.AggregateImplementation"
+                }, {
+                "name" : "hbase_regionserver_lease_period",
+                "value" : "600000"
+                }, {
+                "name" : "hbase_regionserver_config_safety_valve",
+                "value" : "<property>\r\n   <name>hbase.hregion.impl</name>\r\n   <value>org.apache.hadoop.hbase.regionserver.transactional.TransactionalRegion</value>\r\n</property>\r\n <property>\r\n   <name>hbase.regionserver.region.split.policy</name>\r\n   <value>org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy</value>\r\n</property>\r\n  <property>\r\n   <name>hbase.snapshot.enabled</name>\r\n   <value>true</value>\r\n</property>\r\n <property>\r\n   <name>hbase.bulkload.staging.dir</name>\r\n   <value>/hbase-staging</value>\r\n</property>\r\n <property>\r\n   <name>hbase.regionserver.region.transactional.tlog</name>\r\n   <value>true</value>\r\n</property>\r\n <property>\r\n   <name>hbase.snapshot.region.timeout</name>\r\n   <value>600000</value>\r\n</property>\r\n "
+                } ]
+},
+
+"HDFS_CONFIG": {
+    "items": [ {
+             "name":"dfs_namenode_acls_enabled",
+             "value":"true"
+             } ]
+},
+
+"ZK_CONFIG":{
+"roleTypeConfigs" :
+    [ {
+        "roleType" : "SERVER",
+        "items":
+           [ {
+           "name"  : "maxClientCnxns",
+           "value" : "0"
+           } ]
+    } ]
+}
+}

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/2886c023/install/python-installer/configs/prompt.json
----------------------------------------------------------------------
diff --git a/install/python-installer/configs/prompt.json b/install/python-installer/configs/prompt.json
new file mode 100644
index 0000000..a0782db
--- /dev/null
+++ b/install/python-installer/configs/prompt.json
@@ -0,0 +1,236 @@
+{
+  "traf_dirname":
+  {
+      "prompt":"Enter directory name to install trafodion to"
+  },
+  "home_dir":
+  {
+      "prompt":"Enter trafodion user's home directory",
+      "default":"/home"
+  },
+  "traf_start":
+  {
+      "prompt":"Start instance after installation",
+      "default":"Y",
+      "isYN":true
+  },
+  "dcs_ha":
+  {
+      "prompt":"Enable DCS High Avalability",
+      "default":"N",
+      "isYN":true
+  },
+  "dcs_floating_ip":
+  {
+      "prompt":"Enter Floating IP address for DCS HA",
+      "isIP":true
+  },
+  "dcs_interface":
+  {
+      "prompt":"Enter interface for Floating IP address",
+      "default":"eth0"
+  },
+  "dcs_backup_nodes":
+  {
+      "prompt":"Enter DCS Backup Master Nodes for DCS HA (comma separated)"
+  },
+  "kdc_server":
+  {
+      "prompt":"Enter KDC server address"
+  },
+  "admin_principal":
+  {
+      "prompt":"Enter admin principal (include realm)"
+  },
+  "kdcadmin_pwd":
+  {
+      "prompt":"Enter password for admin principal",
+      "ispasswd":true
+  },
+  "hdfs_keytab":
+  {
+      "prompt":"Enter fully qualified name for HDFS keytab"
+  },
+  "hbase_keytab":
+  {
+      "prompt":"Enter fully qualified name for HBase keytab"
+  },
+  "max_lifetime":
+  {
+      "prompt":"Enter max lifetime for Trafodion principal (valid format required)",
+      "default":"24hours"
+  },
+  "max_renew_lifetime":
+  {
+      "prompt":"Enter renew lifetime for Trafodion principal (valid format required)",
+      "default":"7days"
+  },
+  "traf_keytab":
+  {
+      "prompt":"Enter Trafodion keytab name"
+  },
+  "ldap_security":
+  {
+      "prompt":"Enable LDAP security",
+      "default":"N",
+      "isYN":true
+  },
+  "ldap_hosts":
+  {
+      "prompt":"Enter list of LDAP Hostnames (comma separated)"
+  },
+  "ldap_port":
+  {
+      "prompt":"Enter LDAP Port number (Example: 389 for no encryption or TLS, 636 for SSL)",
+      "default":"389",
+      "isdigit":true
+  },
+  "ldap_identifiers":
+  {
+      "prompt":"Enter all LDAP unique identifiers (blank separated)"
+  },
+  "ldap_encrypt":
+  {
+      "prompt":"Enter LDAP Encryption Level (0: Encryption not used, 1: SSL, 2: TLS)",
+      "default":"0",
+      "isdigit":true
+  },
+  "ldap_certpath":
+  {
+      "prompt":"Enter full path to TLS certificate file (*.pem)",
+      "isexist":true
+  },
+  "ldap_userinfo":
+  {
+      "prompt":"If Requred search user name/password",
+      "default":"N",
+      "isYN":true
+  },
+  "ldap_user":
+  {
+      "prompt":"Enter Search user name (if required)",
+      "default":" "
+  },
+  "ldap_pwd":
+  {
+      "prompt":"Enter Search password (if required)",
+      "default":" "
+  },
+  "scratch_locs":
+  {
+      "prompt":"Enter trafodion scratch file folder location(should be a large disk),\nif more than one folder, use comma seperated",
+      "default":"$TRAF_HOME/tmp"
+  },
+  "local_repo_dir":
+  {
+      "prompt":"Enter local repository folder location to install trafodion RPM dependencies",
+      "isexist":true
+  },
+  "java_home":
+  {
+      "prompt":"Specify location of Java(JDK) on trafodion nodes",
+      "isremote_exist":true,
+      "default":"/usr/lib/jvm/java-1.7.0-openjdk.x86_64"
+  },
+  "dcs_cnt_per_node":
+  {
+      "prompt":"Enter number of DCS client connections per node",
+      "default":"4",
+      "isdigit":true
+  },
+  "first_rsnode":
+  {
+      "prompt":"Enter the hostname of first Apache HBase RegionServer node"
+  },
+  "hdfs_user":
+  {
+      "prompt":"Enter hadoop user name",
+      "default":"hdfs",
+      "isuser":true
+  },
+  "hbase_user":
+  {
+      "prompt":"Enter hbase user name",
+      "default":"hbase",
+      "isuser":true
+  },
+  "hadoop_home":
+  {
+      "prompt":"Enter Apache Hadoop directory location",
+      "isremote_exist":true
+  },
+  "hbase_home":
+  {
+      "prompt":"Enter Apache HBase directory location",
+      "isremote_exist":true
+  },
+  "hive_home":
+  {
+      "prompt":"Enter Apache Hive directory location if exists",
+      "default":"NO_HIVE"
+  },
+  "mgr_url":
+  {
+      "prompt":"Enter HDP/CDH web manager URL:port, (full URL, if no http/https prefix, default prefix is http://)"
+  },
+  "mgr_user":
+  {
+      "prompt":"Enter HDP/CDH web manager user name",
+      "default":"admin",
+      "isuser":true
+  },
+  "mgr_pwd":
+  {
+      "prompt":"Enter HDP/CDH web manager user password",
+      "ispasswd":true
+  },
+  "traf_user":
+  {
+      "prompt":"Enter trafodion user name",
+      "default":"trafodion",
+      "isuser":true
+  },
+  "traf_pwd":
+  {
+      "prompt":"Enter trafodion user password",
+      "ispasswd":true
+  },
+  "traf_package":
+  {
+      "prompt":"Enter full path to Trafodion tar file",
+      "isexist":true
+  },
+  "db_root_user":
+  {
+      "prompt":"Enter LDAP user name to be assigned DB root privileges (DB__ROOT)",
+      "default":"trafodion",
+      "isuser":true
+  },
+  "db_admin_user":
+  {
+      "prompt":"Enter LDAP user name to be assigned DB Admin privileges",
+      "default":"admin",
+      "isuser":true
+  },
+  "db_admin_pwd":
+  {
+      "prompt":"Enter LDAP user password to be assigned DB Admin privileges",
+      "default":"traf123"
+  },
+  "node_list":
+  {
+      "prompt":"Enter list of Nodes separated by comma, support simple numeric RE,\n i.e. \"n[01-12],n[21-25]\",\"n0[1-5].com\""
+  },
+  "cluster_no":
+  {
+      "prompt":"Select the above cluster number for installing Trafodion",
+      "default":"1",
+      "isdigit":true
+  },
+  "use_data_node":
+  {
+      "prompt":"Install Trafodion nodes on all DataNodes",
+      "default":"Y",
+      "isYN":true
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/2886c023/install/python-installer/configs/script.json
----------------------------------------------------------------------
diff --git a/install/python-installer/configs/script.json b/install/python-installer/configs/script.json
new file mode 100644
index 0000000..f6191d5
--- /dev/null
+++ b/install/python-installer/configs/script.json
@@ -0,0 +1,87 @@
+{
+"install": [
+    {
+        "script": "traf_check.py",
+        "desc": "Environment Check",
+        "node": "all"
+    },
+    {
+        "script": "copy_files.py",
+        "desc": "Copy Trafodion package file",
+        "node": "local",
+        "req_pwd": "yes"
+    },
+    {
+        "script": "traf_user.py",
+        "desc": "Trafodion user Setup",
+        "node": "all"
+    },
+    {
+        "script": "traf_dep.py",
+        "desc": "Install Trafodion dependencies",
+        "node": "all"
+    },
+    {
+        "script": "traf_package.py",
+        "desc": "Install Trafodion package",
+        "node": "all",
+        "run_as_traf": "yes"
+    },
+    {
+        "script": "traf_setup.py",
+        "desc": "Environment Setup",
+        "node": "all"
+    },
+    {
+        "script": "traf_kerberos.py",
+        "desc": "Kerberos Setup",
+        "node": "all"
+    },
+    {
+        "script": "dcs_setup.py",
+        "desc": "DCS/REST Setup",
+        "node": "all",
+        "run_as_traf": "yes"
+    },
+    {
+        "script": "traf_ldap.py",
+        "desc": "LDAP Security Setup",
+        "node": "all",
+        "run_as_traf": "yes"
+    },
+    {
+        "script": "hadoop_mods.py",
+        "desc": "Hadoop modification and restart",
+        "node": "local"
+    },
+    {
+        "script": "apache_mods.py",
+        "desc": "Hadoop modification and restart",
+        "node": "all"
+    },
+    {
+        "script": "hdfs_cmds.py",
+        "desc": "Set permission of HDFS folder for Trafodion user",
+        "node": "first_rs"
+    },
+    {
+        "script": "traf_sqconfig.py",
+        "desc": "Sqconfig Setup",
+        "node": "first",
+        "run_as_traf": "yes"
+    },
+    {
+        "script": "traf_start.py",
+        "desc": "Start Trafodion",
+        "node": "first",
+        "run_as_traf": "yes"
+    }
+],
+"discover": [
+    {
+        "script": "traf_discover.py",
+        "desc": "Environment Discover",
+        "node": "all"
+    }
+]
+}

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/2886c023/install/python-installer/configs/version.json
----------------------------------------------------------------------
diff --git a/install/python-installer/configs/version.json b/install/python-installer/configs/version.json
new file mode 100644
index 0000000..b0064d5
--- /dev/null
+++ b/install/python-installer/configs/version.json
@@ -0,0 +1,10 @@
+{
+    "linux":  ["centos", "redhat"],
+    "hadoop": ["cloudera", "hortonworks", "apache"],
+    "java":   ["1.7", "1.8"],
+    "centos": ["6"],
+    "redhat": ["6"],
+    "cdh":    ["5.4", "5.5", "5.6"],
+    "hdp":    ["2.3", "2.4"],
+    "hbase":  ["1.0", "1.1"]
+}

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/2886c023/install/python-installer/copy_files.py
----------------------------------------------------------------------
diff --git a/install/python-installer/copy_files.py b/install/python-installer/copy_files.py
deleted file mode 100755
index 23b5dd9..0000000
--- a/install/python-installer/copy_files.py
+++ /dev/null
@@ -1,64 +0,0 @@
-#!/usr/bin/env python
-
-# @@@ START COPYRIGHT @@@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-# @@@ END COPYRIGHT @@@
-
-### this script should be run on local node ###
-
-import sys
-import json
-from threading import Thread
-from common import Remote, run_cmd, err
-
-def run(pwd):
-    """ gen ssh key on local and copy to all nodes
-        copy traf package file from local to all nodes
-    """
-    dbcfgs = json.loads(dbcfgs_json)
-    hosts = dbcfgs['node_list'].split(',')
-    traf_package = dbcfgs['traf_package']
-
-    key_file = '/tmp/id_rsa'
-    run_cmd('sudo -n rm -rf %s*' % key_file)
-    run_cmd('sudo -n echo -e "y" | ssh-keygen -t rsa -N "" -f %s' % key_file)
-
-    files = [key_file, key_file+'.pub', traf_package]
-
-    remote_insts = [Remote(h, pwd=pwd) for h in hosts]
-    threads = [Thread(target=r.copy, args=(files, '/tmp')) for r in remote_insts]
-    for thread in threads: thread.start()
-    for thread in threads: thread.join()
-    for r in remote_insts:
-        if r.rc != 0: err('Failed to copy files to %s' % r.host)
-
-
-# main
-try:
-    dbcfgs_json = sys.argv[1]
-except IndexError:
-    err('No db config found')
-
-try:
-    pwd = sys.argv[2]
-except IndexError:
-    pwd = ''
-
-run(pwd)

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/2886c023/install/python-installer/db_config_default
----------------------------------------------------------------------
diff --git a/install/python-installer/db_config_default b/install/python-installer/db_config_default
deleted file mode 100644
index 3087b5b..0000000
--- a/install/python-installer/db_config_default
+++ /dev/null
@@ -1,120 +0,0 @@
-# @@@ START COPYRIGHT @@@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-# @@@ END COPYRIGHT @@@
-
-[def]
-# NOTICE: if you are using CDH/HDP hadoop distro,
-# you can only specifiy management url address for a quick install
-
-##################################
-# Common Settings
-##################################
-
-# trafodion username and password
-traf_user = trafodion
-traf_pwd = traf123
-
-# trafodion used java(JDK) path on trafodion nodes
-# if not provided, installer will auto detect installed JDK
-java_home =
-
-# cloudera/ambari management url(i.e. http://192.168.0.1:7180 or just 192.168.0.1)
-# if 'http' or 'https' prefix is not provided, the default one is 'http'
-# if port is not provided, the default port is cloudera port '7180'
-mgr_url =
-# user name for cloudera/ambari management url
-mgr_user = admin
-# password for cloudera/ambari management url
-mgr_pwd = admin
-# set the cluster number if multiple clusters managed by one Cloudera manager
-# ignore it if only one cluster being managed
-cluster_no = 1
-
-# trafodion tar package file location
-# no need to provide it if the package can be found in current installer's directory
-traf_package =
-
-# the number of dcs servers on each node
-dcs_cnt_per_node = 4
-
-# scratch file location, seperated by comma if more than one
-scratch_locs = $TRAF_HOME/tmp
-
-# start trafodion instance after installation completed
-traf_start = Y
-
-
-##################################
-# DCS HA configuration
-##################################
-
-# set it to 'Y' if enable DCS HA
-dcs_ha = N
-# if HA is enabled, provide floating ip, network interface and the hostname of backup dcs master nodes
-dcs_floating_ip =
-# network interface that dcs used
-dcs_interface =
-# backup dcs master nodes, seperated by comma if more than one
-dcs_backup_nodes =
-
-
-##################################
-# Offline installation setting
-##################################
-
-# set offline mode to Y if no internet connection
-offline_mode = N
-# if offline mode is set, you must provide a local repository directory with all needed RPMs
-local_repo_dir =
-
-
-##################################
-# LDAP security configuration
-##################################
-
-# set it to 'Y' if enable LDAP security
-ldap_security = N
-# LDAP user name and password to be assigned as DB admin privilege
-db_admin_user = admin
-db_admin_pwd = traf123
-# LDAP user to be assigned DB root privileges (DB__ROOT)
-db_root_user = trafodion
-# if LDAP security is enabled, provide the following items
-ldap_hosts =
-# 389 for no encryption or TLS, 636 for SSL
-ldap_port = 389
-ldap_identifiers =
-ldap_encrypt = 0
-ldap_certpath =
-# provide if have
-ladp_user =
-ladp_pwd =
-
-##################################
-# Kerberos security configuration
-##################################
-# if kerberos is enabled in your hadoop system, provide below info
-
-# KDC server address
-kdc_server =
-# include realm, i.e. admin/admin@EXAMPLE.COM
-admin_principal =
-# admin password for admin principal, it is used to create trafodion user's principal and keytab
-kdcadmin_pwd =

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/2886c023/install/python-installer/db_install.py
----------------------------------------------------------------------
diff --git a/install/python-installer/db_install.py b/install/python-installer/db_install.py
index 73cacaa..a058c9c 100755
--- a/install/python-installer/db_install.py
+++ b/install/python-installer/db_install.py
@@ -34,13 +34,15 @@ sys.setdefaultencoding("utf-8")
 from optparse import OptionParser
 from glob import glob
 from collections import defaultdict
-import wrapper
 try:
     from prettytable import PrettyTable
 except ImportError:
     print 'Python module prettytable is not found. Install python-prettytable first.'
     exit(1)
-from common import *
+from scripts import wrapper
+from scripts.common import DEF_PORT_FILE, DBCFG_FILE, USER_PROMPT_FILE, DBCFG_TMP_FILE, \
+                           INSTALLER_LOC, Remote, Version, ParseHttp, ParseInI, ParseJson, \
+                           http_start, http_stop, format_output, err_m, expNumRe
 
 # init global cfgs for user input
 cfgs = defaultdict(str)
@@ -52,8 +54,9 @@ class HadoopDiscover(object):
         self.users = {}
         self.cluster_name = cluster_name
         self.hg = ParseHttp(user, pwd)
-        self.v1_url = '%s/api/v1/clusters' % url
-        self.v6_url = '%s/api/v6/clusters' % url
+        self.url = url
+        self.v1_url = '%s/api/v1/clusters' % self.url
+        self.v6_url = '%s/api/v6/clusters' % self.url
         self.cluster_url = '%s/%s' % (self.v1_url, cluster_name.replace(' ', '%20'))
         self._get_distro()
         self._check_version()
@@ -174,6 +177,18 @@ class HadoopDiscover(object):
         hdp = self.hg.get('%s/services/HBASE/components/HBASE_REGIONSERVER' % self.cluster_url)
         self.rsnodes = [c['HostRoles']['host_name'] for c in hdp['host_components']]
 
+    def get_hbase_lib_path(self):
+        if 'CDH' in self.distro:
+            parcel_config = self.hg.get('%s/api/v6/cm/allHosts/config' % self.url)
+            # parcel dir exists
+            if parcel_config['items'] and parcel_config['items'][0]['name'] == 'parcels_directory':
+                hbase_lib_path = parcel_config['items'][0]['value'] + '/CDH/lib/hbase/lib'
+            else:
+                hbase_lib_path = '/usr/lib/hbase/lib'
+        elif 'HDP' in self.distro:
+            hbase_lib_path = '/usr/hdp/current/hbase-regionserver/lib'
+
+        return hbase_lib_path
 
 class UserInput(object):
     def __init__(self, options, pwd):
@@ -184,6 +199,7 @@ class UserInput(object):
         isYN = self.in_data[name].has_key('isYN')
         isdigit = self.in_data[name].has_key('isdigit')
         isexist = self.in_data[name].has_key('isexist')
+        isfile = self.in_data[name].has_key('isfile')
         isremote_exist = self.in_data[name].has_key('isremote_exist')
         isIP = self.in_data[name].has_key('isIP')
         isuser = self.in_data[name].has_key('isuser')
@@ -201,6 +217,9 @@ class UserInput(object):
             elif isexist:
                 if not os.path.exists(answer):
                     log_err('%s path \'%s\' doesn\'t exist' % (name, answer))
+            elif isfile:
+                if not os.path.isfile(answer):
+                    log_err('%s file \'%s\' doesn\'t exist' % (name, answer))
             elif isremote_exist:
                 hosts = cfgs['node_list'].split(',')
                 remotes = [Remote(host, pwd=self.pwd) for host in hosts]
@@ -293,6 +312,7 @@ class UserInput(object):
             pt.align[item] = 'l'
 
         for key, value in sorted(cfgs.items()):
+            # only notify user input value
             if self.in_data.has_key(key) and value:
                 if self.in_data[key].has_key('ispasswd'): continue
                 pt.add_row([key, value])
@@ -305,7 +325,7 @@ class UserInput(object):
 
 def log_err(errtext):
     # save tmp config files
-    tp = ParseInI(DBCFG_TMP_FILE)
+    tp = ParseInI(DBCFG_TMP_FILE, 'dbconfigs')
     tp.save(cfgs)
     err_m(errtext)
 
@@ -320,7 +340,7 @@ def user_input(options, prompt_mode=True, pwd=''):
 
     # load from temp config file if in prompt mode
     if os.path.exists(DBCFG_TMP_FILE) and prompt_mode == True:
-        tp = ParseInI(DBCFG_TMP_FILE)
+        tp = ParseInI(DBCFG_TMP_FILE, 'dbconfigs')
         cfgs = tp.load()
 
     u = UserInput(options, pwd)
@@ -376,16 +396,21 @@ def user_input(options, prompt_mode=True, pwd=''):
             try:
                 cluster_name = content['items'][0]['name']
             except (IndexError, KeyError):
-                cluster_name = content['items'][0]['Clusters']['cluster_name']
+                try:
+                    cluster_name = content['items'][0]['Clusters']['cluster_name']
+                except (IndexError, KeyError):
+                    log_err('Failed to get cluster info from management url')
 
-        discover = HadoopDiscover(cfgs['mgr_user'], cfgs['mgr_pwd'], cfgs['mgr_url'], cluster_name)
-        rsnodes = discover.get_rsnodes()
-        hadoop_users = discover.get_hadoop_users()
 
-        cfgs['distro'] = discover.distro
-        cfgs['hbase_service_name'] = discover.get_hbase_srvname()
-        cfgs['hdfs_service_name'] = discover.get_hdfs_srvname()
-        cfgs['zookeeper_service_name'] = discover.get_zookeeper_srvname()
+        hadoop_discover = HadoopDiscover(cfgs['mgr_user'], cfgs['mgr_pwd'], cfgs['mgr_url'], cluster_name)
+        rsnodes = hadoop_discover.get_rsnodes()
+        hadoop_users = hadoop_discover.get_hadoop_users()
+
+        cfgs['distro'] = hadoop_discover.distro
+        cfgs['hbase_lib_path'] = hadoop_discover.get_hbase_lib_path()
+        cfgs['hbase_service_name'] = hadoop_discover.get_hbase_srvname()
+        cfgs['hdfs_service_name'] = hadoop_discover.get_hdfs_srvname()
+        cfgs['zookeeper_service_name'] = hadoop_discover.get_zookeeper_srvname()
 
         cfgs['cluster_name'] = cluster_name.replace(' ', '%20')
         cfgs['hdfs_user'] = hadoop_users['hdfs_user']
@@ -398,12 +423,21 @@ def user_input(options, prompt_mode=True, pwd=''):
         rc = os.system('ping -c 1 %s >/dev/null 2>&1' % node)
         if rc: log_err('Cannot ping %s, please check network connection and /etc/hosts' % node)
 
+    # set some system default configs
+    cfgs['config_created_date'] = time.strftime('%Y/%m/%d %H:%M %Z')
+    cfgs['traf_user'] = 'trafodion'
+    if apache:
+        cfgs['hbase_xml_file'] = cfgs['hbase_home'] + '/conf/hbase-site.xml'
+        cfgs['hdfs_xml_file'] = cfgs['hadoop_home'] + '/etc/hadoop/hdfs-site.xml'
+    else:
+        cfgs['hbase_xml_file'] = '/etc/hbase/conf/hbase-site.xml'
+
     ### discover system settings, return a dict
-    discover_results = wrapper.run(cfgs, options, mode='discover', pwd=pwd)
+    system_discover = wrapper.run(cfgs, options, mode='discover', pwd=pwd)
 
     # check discover results, return error if fails on any sinlge node
     need_java_home = 0
-    for result in discover_results:
+    for result in system_discover:
         host, content = result.items()[0]
         content_dict = json.loads(content)
 
@@ -420,8 +454,11 @@ def user_input(options, prompt_mode=True, pwd=''):
             log_err('HBase is not found')
         if content_dict['hbase'] == 'N/S':
             log_err('HBase version is not supported')
-
-        if content_dict['secure_hadoop'] == 'kerberos':
+        if content_dict['hadoop_authorization'] == 'true':
+            log_err('HBase authorization is enabled, please disable it before installing trafodion')
+        if content_dict['home_dir']: # trafodion user exists
+            cfgs['home_dir'] = content_dict['home_dir']
+        if content_dict['hadoop_authentication'] == 'kerberos':
             cfgs['secure_hadoop'] = 'Y'
         else:
             cfgs['secure_hadoop'] = 'N'
@@ -432,7 +469,8 @@ def user_input(options, prompt_mode=True, pwd=''):
             log_err('repodata directory not found, this is not a valid repository directory')
         cfgs['offline_mode'] = 'Y'
         cfgs['repo_ip'] = socket.gethostbyname(socket.gethostname())
-        cfgs['repo_port'] = '9900'
+        ports = ParseInI(DEF_PORT_FILE, 'ports').load()
+        cfgs['repo_http_port'] = ports['repo_http_port']
 
     pkg_list = ['apache-trafodion']
     # find tar in installer folder, if more than one found, use the first one
@@ -443,6 +481,7 @@ def user_input(options, prompt_mode=True, pwd=''):
             break
 
     g('traf_package')
+    cfgs['req_java8'] = 'N'
 
     # get basename and version from tar filename
     try:
@@ -451,11 +490,9 @@ def user_input(options, prompt_mode=True, pwd=''):
     except:
         log_err('Invalid package tar file')
 
-    #if float(cfgs['traf_version'][:3]) >= 2.2:
-    #    cfgs['req_java8'] = 'Y'
-    #else:
-    #    cfgs['req_java8'] = 'N'
-
+    if not cfgs['traf_dirname']:
+        cfgs['traf_dirname'] = '%s-%s' % (cfgs['traf_basename'], cfgs['traf_version'])
+    g('traf_dirname')
     g('traf_pwd')
     g('dcs_cnt_per_node')
     g('scratch_locs')
@@ -511,16 +548,6 @@ def user_input(options, prompt_mode=True, pwd=''):
         if not cfgs['java_home']:
             cfgs['java_home'] = java_home
 
-    # set other config to cfgs
-    if apache:
-        cfgs['hbase_xml_file'] = cfgs['hbase_home'] + '/conf/hbase-site.xml'
-        cfgs['hdfs_xml_file'] = cfgs['hadoop_home'] + '/etc/hadoop/hdfs-site.xml'
-    else:
-        cfgs['hbase_xml_file'] = '/etc/hbase/conf/hbase-site.xml'
-
-    cfgs['req_java8'] = 'N'
-    cfgs['traf_user'] = 'trafodion'
-    cfgs['config_created_date'] = time.strftime('%Y/%m/%d %H:%M %Z')
 
     if not silent:
         u.notify_user()
@@ -581,7 +608,7 @@ def main():
         pwd = ''
 
     # not specified config file and default config file doesn't exist either
-    p = ParseInI(config_file)
+    p = ParseInI(config_file, 'dbconfigs')
     if options.build or (not os.path.exists(config_file)):
         if options.build: format_output('DryRun Start')
         user_input(options, prompt_mode=True, pwd=pwd)
@@ -601,7 +628,7 @@ def main():
         cfgs['upgrade'] = 'Y'
 
     if options.offline:
-        http_start(cfgs['local_repo_dir'], cfgs['repo_port'])
+        http_start(cfgs['local_repo_dir'], cfgs['repo_http_port'])
     else:
         cfgs['offline_mode'] = 'N'
 
@@ -635,7 +662,7 @@ if __name__ == "__main__":
     try:
         main()
     except (KeyboardInterrupt, EOFError):
-        tp = ParseInI(DBCFG_TMP_FILE)
+        tp = ParseInI(DBCFG_TMP_FILE, 'dbconfigs')
         tp.save(cfgs)
         http_stop()
         print '\nAborted...'

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/2886c023/install/python-installer/dcs_setup.py
----------------------------------------------------------------------
diff --git a/install/python-installer/dcs_setup.py b/install/python-installer/dcs_setup.py
deleted file mode 100755
index 3a196db..0000000
--- a/install/python-installer/dcs_setup.py
+++ /dev/null
@@ -1,114 +0,0 @@
-#!/usr/bin/env python
-
-# @@@ START COPYRIGHT @@@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-# @@@ END COPYRIGHT @@@
-
-### this script should be run on all nodes with trafodion user ###
-
-import os
-import sys
-import json
-from common import ParseXML, append_file, write_file, mod_file, cmd_output, run_cmd, err
-
-def run():
-    dbcfgs = json.loads(dbcfgs_json)
-
-    TRAF_HOME = os.environ['TRAF_HOME']
-    TRAF_VER = dbcfgs['traf_version']
-    HBASE_XML_FILE = dbcfgs['hbase_xml_file']
-
-    DCS_INSTALL_ENV = 'export DCS_INSTALL_DIR=%s/dcs-%s' % (TRAF_HOME, TRAF_VER)
-    REST_INSTALL_ENV = 'export REST_INSTALL_DIR=%s/rest-%s' % (TRAF_HOME, TRAF_VER)
-
-    DCS_CONF_DIR = '%s/dcs-%s/conf' % (TRAF_HOME, TRAF_VER)
-    DCS_SRV_FILE = DCS_CONF_DIR + '/servers'
-    DCS_MASTER_FILE = DCS_CONF_DIR + '/master'
-    DCS_BKMASTER_FILE = DCS_CONF_DIR + '/backup-masters'
-    DCS_ENV_FILE = DCS_CONF_DIR + '/dcs-env.sh'
-    DCS_SITE_FILE = DCS_CONF_DIR + '/dcs-site.xml'
-    REST_SITE_FILE = '%s/rest-%s/conf/rest-site.xml' % (TRAF_HOME, TRAF_VER)
-    TRAFCI_FILE = TRAF_HOME + '/trafci/bin/trafci'
-    SQENV_FILE = TRAF_HOME + '/sqenvcom.sh'
-
-    ### dcs setting ###
-    # servers
-    nodes = dbcfgs['node_list'].split(',')
-    dcs_cnt = dbcfgs['dcs_cnt_per_node']
-    dcs_servers = ''
-    for node in nodes:
-        dcs_servers += '%s %s\n' % (node, dcs_cnt)
-
-    write_file(DCS_SRV_FILE, dcs_servers)
-
-    ### modify dcs config files ###
-    # modify master
-    dcs_master = nodes[0]
-    append_file(DCS_MASTER_FILE, dcs_master)
-
-    # modify sqenvcom.sh
-    append_file(SQENV_FILE, DCS_INSTALL_ENV)
-    append_file(SQENV_FILE, REST_INSTALL_ENV)
-
-    # modify dcs-env.sh
-    mod_file(DCS_ENV_FILE, {'.*DCS_MANAGES_ZK=.*':'export DCS_MANAGES_ZK=false'})
-
-    # modify trafci
-    mod_file(TRAFCI_FILE, {'HNAME=.*':'HNAME=%s:23400' % dcs_master})
-
-    # modify dcs-site.xml
-    net_interface = cmd_output('netstat -rn | grep "^0.0.0.0" | awk \'{print $8}\'').strip()
-    hb = ParseXML(HBASE_XML_FILE)
-    zk_hosts = hb.get_property('hbase.zookeeper.quorum')
-    zk_port = hb.get_property('hbase.zookeeper.property.clientPort')
-
-    p = ParseXML(DCS_SITE_FILE)
-    p.add_property('dcs.zookeeper.property.clientPort', zk_port)
-    p.add_property('dcs.zookeeper.quorum', zk_hosts)
-    p.add_property('dcs.dns.interface', net_interface)
-
-    if dbcfgs['dcs_ha'] == 'Y':
-        dcs_floating_ip = dbcfgs['dcs_floating_ip']
-        dcs_backup_nodes = dbcfgs['dcs_backup_nodes']
-        p.add_property('dcs.master.floating.ip', 'true')
-        p.add_property('dcs.master.floating.ip.external.interface', net_interface)
-        p.add_property('dcs.master.floating.ip.external.ip.address', dcs_floating_ip)
-        p.rm_property('dcs.dns.interface')
-
-        # modify backup_master
-        write_file(DCS_BKMASTER_FILE, dcs_backup_nodes)
-
-    p.write_xml()
-
-    ### rest setting ###
-    p = ParseXML(REST_SITE_FILE)
-    p.add_property('rest.zookeeper.property.clientPort', zk_port)
-    p.add_property('rest.zookeeper.quorum', zk_hosts)
-    p.write_xml()
-
-    ### run sqcertgen ###
-    run_cmd('sqcertgen')
-
-# main
-try:
-    dbcfgs_json = sys.argv[1]
-except IndexError:
-    err('No db config found')
-run()

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/2886c023/install/python-installer/discovery.py
----------------------------------------------------------------------
diff --git a/install/python-installer/discovery.py b/install/python-installer/discovery.py
index 9f2d66e..fe7839d 100755
--- a/install/python-installer/discovery.py
+++ b/install/python-installer/discovery.py
@@ -32,8 +32,8 @@ try:
 except ImportError:
     print 'Python module prettytable is not found. Install python-prettytable first.'
     exit(1)
-from common import err_m, err, ParseInI, expNumRe, format_output, DBCFG_FILE
-import wrapper
+from scripts.common import err_m, err, ParseInI, expNumRe, format_output, DBCFG_FILE
+from scripts import wrapper
 
 
 def get_options():
@@ -46,9 +46,9 @@ def get_options():
     parser.add_option("-u", "--remote-user", dest="user", metavar="USER",
                       help="Specify ssh login user for remote server, \
                             if not provided, use current login user as default.")
-    parser.add_option("--enable-pass", action="store_true", dest="pwd", default=True,
-                      help="Not Prompt SSH login password for remote hosts.")
-
+    parser.add_option("--enable-pwd", action="store_true", dest="pwd", default=False,
+                      help="Prompt SSH login password for remote hosts. \
+                            If set, \'sshpass\' tool is required.")
     (options, args) = parser.parse_args()
     return options
 
@@ -112,7 +112,7 @@ def main():
         pwd = ''
 
     if os.path.exists(config_file):
-        cfgs = ParseInI(config_file).load()
+        cfgs = ParseInI(config_file, 'dbconfigs').load()
     else:
         node_lists = expNumRe(raw_input('Enter list of Nodes separated by comma, support numeric RE, i.e. n[01-12]: '))
 

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/2886c023/install/python-installer/hadoop_mods.py
----------------------------------------------------------------------
diff --git a/install/python-installer/hadoop_mods.py b/install/python-installer/hadoop_mods.py
deleted file mode 100755
index 168a81f..0000000
--- a/install/python-installer/hadoop_mods.py
+++ /dev/null
@@ -1,203 +0,0 @@
-#!/usr/bin/env python
-
-# @@@ START COPYRIGHT @@@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-# @@@ END COPYRIGHT @@@
-
-### this script should be run on local node ###
-
-import time
-import sys
-import json
-from common import ParseHttp, ParseJson, MODCFG_FILE, err
-
-try:
-    dbcfgs_json = sys.argv[1]
-except IndexError:
-    err('No db config found')
-
-dbcfgs = json.loads(dbcfgs_json)
-modcfgs = ParseJson(MODCFG_FILE).load()
-
-MOD_CFGS = modcfgs['MOD_CFGS']
-HBASE_MASTER_CONFIG = modcfgs['HBASE_MASTER_CONFIG']
-HBASE_RS_CONFIG = modcfgs['HBASE_RS_CONFIG']
-HDFS_CONFIG = modcfgs['HDFS_CONFIG']
-ZK_CONFIG = modcfgs['ZK_CONFIG']
-
-CLUSTER_URL_PTR = '%s/api/v1/clusters/%s'
-RESTART_URL_PTR = CLUSTER_URL_PTR + '/commands/restart'
-RESTART_SRV_URL_PTR = CLUSTER_URL_PTR + '/services/%s/commands/restart'
-SRVCFG_URL_PTR = CLUSTER_URL_PTR + '/services/%s/config'
-RSGRP_BASEURL_PTR = '%s/api/v6/clusters/%s/services/%s/roleConfigGroups'
-DEPLOY_CFG_URL_PTR = '%s/api/v6/clusters/%s/commands/deployClientConfig'
-CMD_STAT_URL_PTR = '%s/api/v1/commands/%s'
-
-class CDHMod(object):
-    """ Modify CDH configs for trafodion and restart CDH services """
-    def __init__(self, user, passwd, url, cluster_name):
-        self.url = url
-        self.cluster_name = cluster_name
-        self.p = ParseHttp(user, passwd)
-
-    def __retry_check(self, cid, maxcnt, interval, msg):
-        stat_url = CMD_STAT_URL_PTR % (self.url, cid)
-        stat = self.p.get(stat_url)
-        retry_cnt = 0
-        while not (stat['success'] is True and stat['active'] is False):
-            retry_cnt += 1
-            flush_str = '.' * retry_cnt
-            print '\rCheck CDH services %s status (timeout: %dmin) %s' % (msg, maxcnt*interval/60, flush_str),
-            sys.stdout.flush()
-            time.sleep(interval)
-            stat = self.p.get(stat_url)
-            if retry_cnt == maxcnt: return False
-        return True
-
-    def mod(self):
-        hdfs_service = dbcfgs['hdfs_service_name']
-        hbase_service = dbcfgs['hbase_service_name']
-        zk_service = dbcfgs['zookeeper_service_name']
-        services = {hdfs_service:HDFS_CONFIG, hbase_service:HBASE_MASTER_CONFIG, zk_service:ZK_CONFIG}
-
-        for srv, cfg in services.iteritems():
-            srvcfg_url = SRVCFG_URL_PTR % (self.url, self.cluster_name, srv)
-            self.p.put(srvcfg_url, cfg)
-
-        # set configs in each regionserver group
-        rsgrp_baseurl = RSGRP_BASEURL_PTR % (self.url, self.cluster_name, hbase_service)
-        rscfg = self.p.get(rsgrp_baseurl)
-        rsgrp_urls = ['%s/%s/config' % (rsgrp_baseurl, r['name']) for r in rscfg['items'] if r['roleType'] == 'REGIONSERVER']
-
-        for rsgrp_url in rsgrp_urls:
-            self.p.put(rsgrp_url, HBASE_RS_CONFIG)
-
-    def restart(self):
-        restart_url = RESTART_URL_PTR % (self.url, self.cluster_name)
-        deploy_cfg_url = DEPLOY_CFG_URL_PTR % (self.url, self.cluster_name)
-
-        print 'Restarting CDH services ...'
-        rc1 = self.p.post(restart_url)
-        if self.__retry_check(rc1['id'], 40, 15, 'restart'):
-            print 'Restart CDH successfully!'
-        else:
-            err('Failed to restart CDH, max retry count reached')
-
-        rc2 = self.p.post(deploy_cfg_url)
-        if self.__retry_check(rc2['id'], 30, 10, 'deploy'):
-            print 'Deploy client config successfully!'
-        else:
-            err('Failed to deploy CDH client config, max retry count reached')
-
-
-class HDPMod(object):
-    """ Modify HDP configs for trafodion and restart HDP services """
-    def __init__(self, user, passwd, url, cluster_name):
-        self.url = url
-        self.cluster_name = cluster_name
-        self.p = ParseHttp(user, passwd, json_type=False)
-
-    def mod(self):
-        cluster_url = CLUSTER_URL_PTR % (self.url, self.cluster_name)
-        desired_cfg_url = cluster_url + '?fields=Clusters/desired_configs'
-        cfg_url = cluster_url + '/configurations?type={0}&tag={1}'
-        desired_cfg = self.p.get(desired_cfg_url)
-
-        for config_type in MOD_CFGS.keys():
-            desired_tag = desired_cfg['Clusters']['desired_configs'][config_type]['tag']
-            current_cfg = self.p.get(cfg_url.format(config_type, desired_tag))
-            tag = 'version' + str(int(time.time() * 1000000))
-            new_properties = current_cfg['items'][0]['properties']
-            new_properties.update(MOD_CFGS[config_type])
-            config = {
-                'Clusters': {
-                    'desired_config': {
-                        'type': config_type,
-                        'tag': tag,
-                        'properties': new_properties
-                    }
-                }
-            }
-            self.p.put(cluster_url, config)
-
-
-    def restart(self):
-        srv_baseurl = CLUSTER_URL_PTR % (self.url, self.cluster_name) + '/services/'
-        srvs = ['HBASE', 'ZOOKEEPER', 'HDFS']
-
-        # Stop
-        print 'Restarting HDP services ...'
-        for srv in srvs:
-            srv_url = srv_baseurl + srv
-            config = {'RequestInfo': {'context' :'Stop %s services' % srv}, 'ServiceInfo': {'state' : 'INSTALLED'}}
-            rc = self.p.put(srv_url, config)
-
-            # check stop status
-            if rc:
-                stat = self.p.get(srv_url)
-
-                retry_cnt, maxcnt, interval = 0, 30, 5
-                while stat['ServiceInfo']['state'] != 'INSTALLED':
-                    retry_cnt += 1
-                    flush_str = '.' * retry_cnt
-                    print '\rCheck HDP service %s stop status (timeout: %dmin) %s' % (srv, maxcnt*interval/60, flush_str),
-                    sys.stdout.flush()
-                    time.sleep(interval)
-                    stat = self.p.get(srv_url)
-                    if retry_cnt == maxcnt: err('Failed to stop HDP service %s, timeout' % srv)
-                # wrap line
-                print
-            else:
-                print 'HDP service %s had already been stopped' % srv
-
-        time.sleep(5)
-        # Start
-        config = {'RequestInfo': {'context' :'Start All services'}, 'ServiceInfo': {'state' : 'STARTED'}}
-        rc = self.p.put(srv_baseurl, config)
-
-        # check start status
-        if rc:
-            result_url = rc['href']
-            stat = self.p.get(result_url)
-            retry_cnt, maxcnt, interval = 0, 120, 5
-            while stat['Requests']['request_status'] != 'COMPLETED':
-                retry_cnt += 1
-                flush_str = '.' * retry_cnt
-                print '\rCheck HDP services start status (timeout: %dmin) %s' % (maxcnt*interval/60, flush_str),
-                sys.stdout.flush()
-                time.sleep(interval)
-                stat = self.p.get(result_url)
-                if retry_cnt == maxcnt: err('Failed to start all HDP services')
-            print 'HDP services started successfully!'
-        else:
-            print 'HDP services had already been started'
-
-def run():
-    if 'CDH' in dbcfgs['distro']:
-        cdh = CDHMod(dbcfgs['mgr_user'], dbcfgs['mgr_pwd'], dbcfgs['mgr_url'], dbcfgs['cluster_name'])
-        cdh.mod()
-        cdh.restart()
-    elif 'HDP' in dbcfgs['distro']:
-        hdp = HDPMod(dbcfgs['mgr_user'], dbcfgs['mgr_pwd'], dbcfgs['mgr_url'], dbcfgs['cluster_name'])
-        hdp.mod()
-        hdp.restart()
-
-# main
-run()

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/2886c023/install/python-installer/hdfs_cmds.py
----------------------------------------------------------------------
diff --git a/install/python-installer/hdfs_cmds.py b/install/python-installer/hdfs_cmds.py
deleted file mode 100755
index dc08b7e..0000000
--- a/install/python-installer/hdfs_cmds.py
+++ /dev/null
@@ -1,68 +0,0 @@
-#!/usr/bin/env python
-
-# @@@ START COPYRIGHT @@@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-# @@@ END COPYRIGHT @@@
-
-### this script should be run on first node with trafodion user ###
-
-import os
-import sys
-import json
-from common import err, run_cmd, cmd_output, run_cmd_as_user
-
-def run():
-    hdfs_bin = '/usr/bin/hdfs'
-
-    dbcfgs = json.loads(dbcfgs_json)
-    DISTRO = dbcfgs['distro']
-
-    if 'CDH' in DISTRO:
-        parcel_lib = '/opt/cloudera/parcels/CDH/lib/hbase/lib'
-        if os.path.exists(parcel_lib): hdfs_bin = '/opt/cloudera/parcels/CDH/bin/hdfs'
-    elif 'APACHE' in DISTRO:
-        hdfs_bin = dbcfgs['hadoop_home'] + '/bin/hdfs'
-
-    traf_loc = '/user/trafodion'
-    traf_user = dbcfgs['traf_user']
-    hdfs_user = dbcfgs['hdfs_user']
-    hbase_user = dbcfgs['hbase_user']
-
-    run_cmd_as_user(hdfs_user, '%s dfsadmin -safemode wait' % hdfs_bin)
-    run_cmd_as_user(hdfs_user, '%s dfs -mkdir -p %s/{trafodion_backups,bulkload,lobs} /hbase/archive /hbase-staging' % (hdfs_bin, traf_loc))
-    run_cmd_as_user(hdfs_user, '%s dfs -chown -R %s:%s /hbase/archive /hbase-staging' % (hdfs_bin, hbase_user, hbase_user))
-    run_cmd_as_user(hdfs_user, '%s dfs -chown -R %s:%s %s/{trafodion_backups,bulkload,lobs}' % (hdfs_bin, traf_user, traf_user, traf_loc))
-    run_cmd_as_user(hdfs_user, '%s dfs -setfacl -R -m user:%s:rwx /hbase/archive' % (hdfs_bin, traf_user))
-    run_cmd_as_user(hdfs_user, '%s dfs -setfacl -R -m default:user:%s:rwx /hbase/archive' % (hdfs_bin, traf_user))
-    run_cmd_as_user(hdfs_user, '%s dfs -setfacl -R -m mask::rwx /hbase/archive' % hdfs_bin)
-
-    # Grant all privileges to the Trafodion principal in HBase
-    if dbcfgs['secure_hadoop'] == 'Y':
-        run_cmd('grant "%s", "RWXC" | sudo -u %s hbase shell > /tmp/hbase_shell.out' % (traf_user, hbase_user))
-        has_err = cmd_output('grep -c ERROR /tmp/hbase_shell.out')
-        if int(has_err):
-            err('Failed to grant HBase privileges to %s' % traf_user)
-        run_cmd('rm /tmp/hbase_shell.out')
-# main
-try:
-    dbcfgs_json = sys.argv[1]
-except IndexError:
-    err('No db config found')
-run()

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/2886c023/install/python-installer/mod_cfgs.json
----------------------------------------------------------------------
diff --git a/install/python-installer/mod_cfgs.json b/install/python-installer/mod_cfgs.json
deleted file mode 100644
index 822b470..0000000
--- a/install/python-installer/mod_cfgs.json
+++ /dev/null
@@ -1,73 +0,0 @@
-{
-"MOD_CFGS": {
-    "hbase-site": {
-        "hbase.master.distributed.log.splitting": "false",
-        "hbase.snapshot.master.timeoutMillis": "600000",
-        "hbase.coprocessor.region.classes": "org.apache.hadoop.hbase.coprocessor.transactional.TrxRegionObserver,org.apache.hadoop.hbase.coprocessor.transactional.TrxRegionEndpoint,org.apache.hadoop.hbase.coprocessor.AggregateImplementation",
-        "hbase.hregion.impl": "org.apache.hadoop.hbase.regionserver.transactional.TransactionalRegion",
-        "hbase.regionserver.region.split.policy": "org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy",
-        "hbase.snapshot.enabled": "true",
-        "hbase.bulkload.staging.dir": "/hbase-staging",
-        "hbase.regionserver.region.transactional.tlog": "true",
-        "hbase.snapshot.region.timeout": "600000",
-        "hbase.client.scanner.timeout.period": "600000"
-    },
-    "hdfs-site": { "dfs.namenode.acls.enabled": "true" },
-    "zoo.cfg": { "maxClientCnxns": "0" }
-},
-
-"HBASE_MASTER_CONFIG": {
-"roleTypeConfigs" :  [ {
-        "roleType" : "MASTER",
-        "items" : [ {
-                "name" : "hbase_master_config_safety_valve",
-        "value" : "<property>\r\n   <name>hbase.master.distributed.log.splitting</name>\r\n   <value>false</value>\r\n</property>\r\n <property>\r\n   <name>hbase.snapshot.master.timeoutMillis</name>\r\n   <value>600000</value>\r\n</property>\r\n"
-                } ]
-    } ]
-},
-
-"HBASE_RS_CONFIG": {
-"items" : [ {
-                "name" : "hbase_coprocessor_region_classes",
-                "value" : "org.apache.hadoop.hbase.coprocessor.transactional.TrxRegionObserver,org.apache.hadoop.hbase.coprocessor.transactional.TrxRegionEndpoint,org.apache.hadoop.hbase.coprocessor.AggregateImplementation"
-                }, {
-                "name" : "hbase_regionserver_lease_period",
-                "value" : "600000"
-                }, {
-                "name" : "hbase_regionserver_config_safety_valve",
-                "value" : "<property>\r\n   <name>hbase.hregion.impl</name>\r\n   <value>org.apache.hadoop.hbase.regionserver.transactional.TransactionalRegion</value>\r\n</property>\r\n <property>\r\n   <name>hbase.regionserver.region.split.policy</name>\r\n   <value>org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy</value>\r\n</property>\r\n  <property>\r\n   <name>hbase.snapshot.enabled</name>\r\n   <value>true</value>\r\n</property>\r\n <property>\r\n   <name>hbase.bulkload.staging.dir</name>\r\n   <value>/hbase-staging</value>\r\n</property>\r\n <property>\r\n   <name>hbase.regionserver.region.transactional.tlog</name>\r\n   <value>true</value>\r\n</property>\r\n <property>\r\n   <name>hbase.snapshot.region.timeout</name>\r\n   <value>600000</value>\r\n</property>\r\n "
-                } ]
-},
-
-"HDFS_CONFIG": {
-"roleTypeConfigs" :  [ {
-        "roleType" : "NAMENODE",
-        "items": [ {
-                "name" : "namenode_java_heapsize",
-        "value" : "1073741824"
-                } ]
-   }, {
-        "roleType" : "SECONDARYNAMENODE",
-        "items":[ {
-                "name" : "secondary_namenode_java_heapsize",
-        "value" : "1073741824"
-                } ]
-     } ],
-    "items": [ {
-             "name":"dfs_namenode_acls_enabled",
-             "value":"true"
-             } ]
-},
-
-"ZK_CONFIG":{
-"roleTypeConfigs" :
-    [ {
-        "roleType" : "SERVER",
-        "items":
-           [ {
-           "name"  : "maxClientCnxns",
-           "value" : "0"
-           } ]
-    } ]
-}
-}