You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@trafodion.apache.org by sv...@apache.org on 2016/11/09 18:55:05 UTC

[1/6] incubator-trafodion git commit: [TRAFODION-1839] Trafodion Installer Evolution

Repository: incubator-trafodion
Updated Branches:
  refs/heads/master d751c96c5 -> 7ba7ee3e5


http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/904b53df/install/python-installer/script.json
----------------------------------------------------------------------
diff --git a/install/python-installer/script.json b/install/python-installer/script.json
new file mode 100644
index 0000000..f6191d5
--- /dev/null
+++ b/install/python-installer/script.json
@@ -0,0 +1,87 @@
+{
+"install": [
+    {
+        "script": "traf_check.py",
+        "desc": "Environment Check",
+        "node": "all"
+    },
+    {
+        "script": "copy_files.py",
+        "desc": "Copy Trafodion package file",
+        "node": "local",
+        "req_pwd": "yes"
+    },
+    {
+        "script": "traf_user.py",
+        "desc": "Trafodion user Setup",
+        "node": "all"
+    },
+    {
+        "script": "traf_dep.py",
+        "desc": "Install Trafodion dependencies",
+        "node": "all"
+    },
+    {
+        "script": "traf_package.py",
+        "desc": "Install Trafodion package",
+        "node": "all",
+        "run_as_traf": "yes"
+    },
+    {
+        "script": "traf_setup.py",
+        "desc": "Environment Setup",
+        "node": "all"
+    },
+    {
+        "script": "traf_kerberos.py",
+        "desc": "Kerberos Setup",
+        "node": "all"
+    },
+    {
+        "script": "dcs_setup.py",
+        "desc": "DCS/REST Setup",
+        "node": "all",
+        "run_as_traf": "yes"
+    },
+    {
+        "script": "traf_ldap.py",
+        "desc": "LDAP Security Setup",
+        "node": "all",
+        "run_as_traf": "yes"
+    },
+    {
+        "script": "hadoop_mods.py",
+        "desc": "Hadoop modification and restart",
+        "node": "local"
+    },
+    {
+        "script": "apache_mods.py",
+        "desc": "Hadoop modification and restart",
+        "node": "all"
+    },
+    {
+        "script": "hdfs_cmds.py",
+        "desc": "Set permission of HDFS folder for Trafodion user",
+        "node": "first_rs"
+    },
+    {
+        "script": "traf_sqconfig.py",
+        "desc": "Sqconfig Setup",
+        "node": "first",
+        "run_as_traf": "yes"
+    },
+    {
+        "script": "traf_start.py",
+        "desc": "Start Trafodion",
+        "node": "first",
+        "run_as_traf": "yes"
+    }
+],
+"discover": [
+    {
+        "script": "traf_discover.py",
+        "desc": "Environment Discover",
+        "node": "all"
+    }
+]
+}

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/904b53df/install/python-installer/traf_authentication_conf.template
----------------------------------------------------------------------
diff --git a/install/python-installer/traf_authentication_conf.template b/install/python-installer/traf_authentication_conf.template
new file mode 100644
index 0000000..c3add91
--- /dev/null
+++ b/install/python-installer/traf_authentication_conf.template
@@ -0,0 +1,71 @@
+# @@@ START COPYRIGHT @@@
+# #
+# # Licensed to the Apache Software Foundation (ASF) under one
+# # or more contributor license agreements.  See the NOTICE file
+# # distributed with this work for additional information
+# # regarding copyright ownership.  The ASF licenses this file
+# # to you under the Apache License, Version 2.0 (the
+# # "License"); you may not use this file except in compliance
+# # with the License.  You may obtain a copy of the License at
+# #
+# #   http://www.apache.org/licenses/LICENSE-2.0
+# #
+# # Unless required by applicable law or agreed to in writing,
+# # software distributed under the License is distributed on an
+# # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# # KIND, either express or implied.  See the License for the
+# # specific language governing permissions and limitations
+# # under the License.
+# #
+# # @@@ END COPYRIGHT @@@
+# #
+#
+# This is the template file for Trafodion database authentication
+# directory service configuration.
+#
+# To use authentication in Trafodion, this file must be configured
+# as described below and placed in $MY_SQROOT/sql/scripts and be named
+# .traf_authentication_config.  You must also enable authentication by
+# running the script traf_authentication_setup in $MY_SQROOT/sql/scripts.
+#
+# NOTE: the format of this configuration file is expected to change in the
+# next release of Trafodion.  Backward compatilibity is not guaranteed.
+#
+SECTION: Defaults
+  DefaultSectionName: local
+  RefreshTime: 1800
+  TLS_CACERTFilename: {{ ldap_certpath }}
+SECTION: local
+# If one or more of the LDAPHostName values is a load balancing host, list
+# the name(s) here, one name: value pair for each host.
+  LoadBalanceHostName:
+
+# One or more identically configured hosts must be specified here,
+# one name: value pair for each host.
+  LdapHostname: {{ ldap_hosts }}
+
+# Default is port 389, change if using 636 or any other port
+  LdapPort: {{ ldap_port }}
+
+# Must specify one or more unique identifiers, one name: value pair for each
+  UniqueIdentifier: {{ ldap_identifiers }}
+
+# If the configured LDAP server requires a username and password to
+# to perform name lookup, provide those here.
+  LDAPSearchDN: {{ ldap_user }}
+  LDAPSearchPwd: {{ ldap_pwd }}
+
+# If configured LDAP server requires TLS(1) or SSL (2), update this value
+  LDAPSSL: {{ ldap_encrypt }}
+
+# Default timeout values in seconds
+  LDAPNetworkTimeout: 30
+  LDAPTimeout: 30
+  LDAPTimeLimit: 30
+
+# Default values for retry logic algorithm
+  RetryCount: 5
+  RetryDelay: 2
+  PreserveConnection: No
+  ExcludeBadHosts: Yes
+  MaxExcludeListSize: 3

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/904b53df/install/python-installer/traf_check.py
----------------------------------------------------------------------
diff --git a/install/python-installer/traf_check.py b/install/python-installer/traf_check.py
new file mode 100755
index 0000000..31d62a9
--- /dev/null
+++ b/install/python-installer/traf_check.py
@@ -0,0 +1,87 @@
+#!/usr/bin/env python
+
+# @@@ START COPYRIGHT @@@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# @@@ END COPYRIGHT @@@
+
+### this script should be run on all nodes with sudo user ###
+
+import re
+import json
+import sys
+import os
+from common import run_cmd, cmd_output, err, Version
+
+class Check(object):
+    """ check system envs """
+
+    def __init__(self, dbcfgs_json):
+        self.dbcfgs = json.loads(dbcfgs_json)
+        self.version = Version()
+
+    def check_sudo(self):
+        """ check sudo access """
+        run_cmd('sudo -n echo -n "check sudo access" > /dev/null 2>&1')
+
+    def check_hbase_xml(self):
+        """ check if hbase-site.xml file exists """
+        hbase_xml_file = self.dbcfgs['hbase_xml_file']
+        if not os.path.exists(hbase_xml_file):
+            err('HBase xml file is not found')
+
+    def check_java(self):
+        """ check JDK version """
+        jdk_path = self.dbcfgs['java_home']
+        jdk_ver = cmd_output('%s/bin/javac -version' % jdk_path)
+        try:
+            jdk_ver, sub_ver = re.search(r'javac (\d\.\d).\d_(\d+)', jdk_ver).groups()
+        except AttributeError:
+            err('No JDK found')
+
+        if self.dbcfgs['req_java8'] == 'Y': # only allow JDK1.8
+            support_java = '1.8'
+        else:
+            support_java = self.version.get_version('java')
+
+        if jdk_ver == '1.7' and int(sub_ver) < 65:
+            err('Unsupported JDK1.7 version, sub version should be higher than 65')
+        if jdk_ver not in support_java:
+            err('Unsupported JDK version %s, supported version: %s' % (jdk_ver, support_java))
+
+    #def check_scratch_loc(self):
+    #    """ check if scratch file folder exists """
+    #    scratch_locs = self.dbcfgs['scratch_locs'].split(',')
+    #    for loc in scratch_locs:
+    #        if not os.path.exists(loc):
+    #            err('Scratch file location \'%s\' doesn\'t exist' % loc)
+
+def run():
+    PREFIX = 'check_'
+    check = Check(dbcfgs_json)
+
+    # call method
+    [getattr(check, m)() for m in dir(check) if m.startswith(PREFIX)]
+
+# main
+try:
+    dbcfgs_json = sys.argv[1]
+except IndexError:
+    err('No db config found')
+run()

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/904b53df/install/python-installer/traf_dep.py
----------------------------------------------------------------------
diff --git a/install/python-installer/traf_dep.py b/install/python-installer/traf_dep.py
new file mode 100755
index 0000000..76f570c
--- /dev/null
+++ b/install/python-installer/traf_dep.py
@@ -0,0 +1,119 @@
+#!/usr/bin/env python
+
+# @@@ START COPYRIGHT @@@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# @@@ END COPYRIGHT @@@
+
+### this script should be run on all nodes with sudo user ###
+
+import re
+import os
+import sys
+import json
+import platform
+from common import run_cmd, cmd_output, err
+
+# not used
+EPEL_REPO = """
+[epel]
+name=Extra Packages for Enterprise Linux $releasever - $basearch
+mirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-$releasever&arch=$basearch
+enabled=1
+gpgcheck=0
+"""
+
+LOCAL_REPO_PTR = """
+[traflocal]
+baseurl=http://%s:%s/
+enabled=1
+gpgcheck=0
+"""
+
+REPO_FILE = '/etc/yum.repos.d/traflocal.repo'
+
+def run():
+    """ install Trafodion dependencies """
+
+    dbcfgs = json.loads(dbcfgs_json)
+
+    if dbcfgs['offline_mode'] == 'Y':
+        print 'Installing pdsh in offline mode ...'
+
+        # setup temp local repo
+        repo_content = LOCAL_REPO_PTR % (dbcfgs['repo_ip'], dbcfgs['repo_port'])
+        with open(REPO_FILE, 'w') as f:
+            f.write(repo_content)
+
+        run_cmd('yum install -y --disablerepo=\* --enablerepo=traflocal pdsh-rcmd-ssh pdsh')
+    else:
+        pdsh_installed = cmd_output('rpm -qa|grep -c pdsh')
+        if pdsh_installed == '0':
+            release = platform.release()
+            releasever, arch = re.search(r'el(\d).(\w+)', release).groups()
+
+            if releasever == '7':
+                pdsh_pkg = 'http://mirrors.neusoft.edu.cn/epel/7/%s/p/pdsh-2.31-1.el7.%s.rpm' % (arch, arch)
+            elif releasever == '6':
+                pdsh_pkg = 'http://mirrors.neusoft.edu.cn/epel/6/%s/pdsh-2.26-4.el6.%s.rpm' % (arch, arch)
+            else:
+                err('Unsupported Linux version')
+
+            print 'Installing pdsh ...'
+            run_cmd('yum install -y %s' % pdsh_pkg)
+
+    package_list = [
+        'apr',
+        'apr-util',
+        'expect',
+        'gzip',
+        'libiodbc-devel',
+        'lzo',
+        'lzop',
+        'openldap-clients',
+        'perl-DBD-SQLite',
+        'perl-Params-Validate',
+        'perl-Time-HiRes',
+        'sqlite',
+        'snappy',
+        'unixODBC-devel',
+        'unzip'
+    ]
+
+    all_pkg_list = run_cmd('rpm -qa')
+    for pkg in package_list:
+        if pkg in all_pkg_list:
+            print 'Package %s had already been installed' % pkg
+        else:
+            print 'Installing %s ...' % pkg
+            if dbcfgs['offline_mode'] == 'Y':
+                run_cmd('yum install -y --disablerepo=\* --enablerepo=traflocal %s' % pkg)
+            else:
+                run_cmd('yum install -y %s' % pkg)
+
+    # remove temp repo file
+    if dbcfgs['offline_mode'] == 'Y':
+        os.remove(REPO_FILE)
+
+# main
+try:
+    dbcfgs_json = sys.argv[1]
+except IndexError:
+    err('No db config found')
+run()

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/904b53df/install/python-installer/traf_discover.py
----------------------------------------------------------------------
diff --git a/install/python-installer/traf_discover.py b/install/python-installer/traf_discover.py
new file mode 100755
index 0000000..d0c9936
--- /dev/null
+++ b/install/python-installer/traf_discover.py
@@ -0,0 +1,253 @@
+#!/usr/bin/env python
+
+# @@@ START COPYRIGHT @@@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# @@@ END COPYRIGHT @@@
+
+### this script should be run on all nodes with sudo user ###
+
+import re
+import json
+import sys
+import platform
+from glob import glob
+from common import cmd_output, err, Version, ParseXML
+
+PREFIX = 'get_'
+NA = 'N/A' # not available
+NS = 'N/S' # not supported
+OK = 'OK'
+
+def deco(func):
+    def wrapper(self):
+        if PREFIX in func.__name__:
+            name = func.__name__.replace(PREFIX, '')
+            return name, func(self)
+        else:
+            return
+    return wrapper
+
+
+class Discover(object):
+    """ discover functions, to add a new discover function,
+        simply add a new def with name get_xx and decorated
+        by 'deco', then return result in string format:
+
+        @deco
+        def get_xx(self):
+            # do something
+            return result
+    """
+
+    def __init__(self, dbcfgs):
+        self.CPUINFO = cmd_output('cat /proc/cpuinfo')
+        self.MEMINFO = cmd_output('cat /proc/meminfo')
+        self.SYSCTLINFO = cmd_output('sysctl -a')
+        self.version = Version()
+        self.dbcfgs = dbcfgs
+
+    def _parse_string(self, info, string):
+        try:
+            info = info.split('\n')
+            string_line = [line for line in info if string in line][0]
+        except IndexError:
+            err('Cannot get %s info' % string)
+
+        return string_line
+
+    def _get_cpu_info(self, string):
+        return self._parse_string(self.CPUINFO, string).split(':')[1].strip()
+
+    def _get_mem_info(self, string):
+        return self._parse_string(self.MEMINFO, string).split(':')[1].split()[0]
+
+    def _get_sysctl_info(self, string):
+        return self._parse_string(self.SYSCTLINFO, string).split('=')[1].strip()
+
+    @deco
+    def get_linux(self):
+        """ get linux version """
+        os_dist, os_ver = platform.dist()[:2]
+        if os_dist not in self.version.get_version('linux'):
+            return NA
+        else:
+            if not os_ver.split('.')[0] in self.version.get_version(os_dist):
+                return NA
+        return '%s-%s' % (os_dist, os_ver)
+
+    @deco
+    def get_firewall_status(self):
+        """ get firewall running status """
+        iptables_stat = cmd_output('iptables -nL|grep -vE "(Chain|target)"').strip()
+        if iptables_stat:
+            return 'Running'
+        else:
+            return 'Stopped'
+
+    @deco
+    def get_pidmax(self):
+        """ get kernel pid max setting """
+        return self._get_sysctl_info('kernel.pid_max')
+
+    @deco
+    def get_default_java(self):
+        """ get default java version """
+        jdk_path = glob('/usr/java/*') + \
+                   glob('/usr/jdk64/*') + \
+                   glob('/usr/lib/jvm/java-*-openjdk.x86_64')
+
+        jdk_list = {} # {jdk_version: jdk_path}
+        for path in jdk_path:
+            jdk_ver = cmd_output('%s/bin/javac -version' % path)
+
+            try:
+                main_ver, sub_ver = re.search(r'(\d\.\d\.\d)_(\d+)', jdk_ver).groups()
+                # don't support JDK version less than 1.7.0_65
+                if main_ver == '1.7.0' and int(sub_ver) < 65:
+                    continue
+                jdk_list[main_ver] = path
+            except AttributeError:
+                continue
+
+        if not jdk_list:
+            return NA
+        else:
+            # use JDK1.8 first
+            if jdk_list.has_key('1.8.0'):
+                return jdk_list['1.8.0']
+            elif jdk_list.has_key('1.7.0'):
+                return jdk_list['1.7.0']
+
+    @deco
+    def get_hive(self):
+        """ get Hive status """
+        hive_stat = cmd_output('which hive')
+        if 'no hive' in hive_stat:
+            return NA
+        else:
+            return OK
+
+    @deco
+    def get_secure_hadoop(self):
+        if self.dbcfgs.has_key('hadoop_home'): # apache distro
+            CORE_SITE_XML = '%s/etc/hadoop/core-site.xml' % self.dbcfgs['hadoop_home']
+        else:
+            CORE_SITE_XML = '/etc/hadoop/conf/core-site.xml'
+        p = ParseXML(CORE_SITE_XML)
+        return p.get_property('hadoop.security.authentication')
+
+    @deco
+    def get_hbase(self):
+        """ get HBase version """
+        if self.dbcfgs.has_key('hbase_home'): # apache distro
+            hbase_home = self.dbcfgs['hbase_home']
+            hbase_ver = cmd_output('%s/bin/hbase version | head -n1' % hbase_home)
+        else:
+            hbase_ver = cmd_output('hbase version | head -n1')
+
+        support_hbase_ver = self.version.get_version('hbase')
+        try:
+            hbase_ver = re.search(r'HBase (\d\.\d)', hbase_ver).groups()[0]
+        except AttributeError:
+            return NA
+        if hbase_ver not in support_hbase_ver:
+            return NS
+        return hbase_ver
+
+    @deco
+    def get_cpu_model(self):
+        """ get CPU model """
+        return self._get_cpu_info('model name')
+
+    @deco
+    def get_cpu_cores(self):
+        """ get CPU cores """
+        return self.CPUINFO.count('processor')
+
+    @deco
+    def get_arch(self):
+        """ get CPU architecture """
+        arch = platform.processor()
+        if not arch:
+            arch = 'Unknown'
+        return arch
+
+    @deco
+    def get_mem_total(self):
+        """ get total memory size """
+        mem = self._get_mem_info('MemTotal')
+        memsize = mem.split()[0]
+
+        return "%0.1f GB" % round(float(memsize) / (1024 * 1024), 2)
+
+    @deco
+    def get_mem_free(self):
+        """ get current free memory size """
+        free = self._get_mem_info('MemFree')
+        buffers = self._get_mem_info('Buffers')
+        cached = self._get_mem_info('Cached')
+        memfree = float(free) + float(buffers) + float(cached)
+
+        return "%0.1f GB" % round(memfree / (1024 * 1024), 2)
+
+    @deco
+    def get_ext_interface(self):
+        """ get external network interface """
+        return cmd_output('netstat -rn | grep "^0.0.0.0" | awk \'{print $8}\'').strip()
+
+    @deco
+    def get_rootdisk_free(self):
+        """ get root disk space left """
+        space = cmd_output('df -h|grep "\/$" | awk \'{print $4}\'')
+        return space.strip()
+
+    @deco
+    def get_python_ver(self):
+        """ get python version """
+        return platform.python_version()
+
+    @deco
+    def get_traf_status(self):
+        """ get trafodion running status """
+        mon_process = cmd_output('ps -ef|grep -v grep|grep -c "monitor COLD"')
+        if int(mon_process) > 0:
+            return 'Running'
+        else:
+            return 'Stopped'
+
+def run():
+    try:
+        dbcfgs_json = sys.argv[1]
+    except IndexError:
+        err('No db config found')
+    dbcfgs = json.loads(dbcfgs_json)
+    discover = Discover(dbcfgs)
+    methods = [m for m in dir(discover) if m.startswith(PREFIX)]
+    result = {}
+    for method in methods:
+        key, value = getattr(discover, method)() # call method
+        result[key] = value
+
+    print json.dumps(result)
+
+
+# main
+if __name__ == '__main__':
+    run()

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/904b53df/install/python-installer/traf_kerberos.py
----------------------------------------------------------------------
diff --git a/install/python-installer/traf_kerberos.py b/install/python-installer/traf_kerberos.py
new file mode 100755
index 0000000..f4391a0
--- /dev/null
+++ b/install/python-installer/traf_kerberos.py
@@ -0,0 +1,116 @@
+#!/usr/bin/env python
+
+# @@@ START COPYRIGHT @@@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# @@@ END COPYRIGHT @@@
+
+### this script should be run on all nodes with sudo user ###
+
+import sys
+import re
+import json
+import socket
+from common import run_cmd, cmd_output, err
+
+def run():
+    """ setup Kerberos security """
+    dbcfgs = json.loads(dbcfgs_json)
+
+    distro = dbcfgs['distro']
+    admin_principal = dbcfgs['admin_principal']
+    admin_passwd = dbcfgs['kdcadmin_pwd']
+    kdc_server = dbcfgs['kdc_server']
+    # maxlife = dbcfgs['max_lifetime']
+    # max_renewlife = dbcfgs['max_renew_lifetime']
+    maxlife = '24hours'
+    max_renewlife = '7days'
+    kadmin_cmd = 'kadmin -p %s -w %s -s %s -q' % (admin_principal, admin_passwd, kdc_server)
+
+    host_name = socket.getfqdn()
+    traf_user = dbcfgs['traf_user']
+    hdfs_user = 'hdfs'
+    hbase_user = 'hbase'
+    realm = re.match('.*@(.*)', admin_principal).groups()[0]
+    traf_keytab_dir = '/etc/%s/keytab' % traf_user
+    traf_keytab = '%s/%s.keytab' % (traf_keytab_dir, traf_user)
+    traf_principal = '%s/%s@%s' % (traf_user, host_name, realm)
+    hdfs_principal = '%s/%s@%s' % (hdfs_user, host_name, realm)
+    hbase_principal = '%s/%s@%s' % (hbase_user, host_name, realm)
+
+    ### setting start ###
+    print 'Checking KDC server connection'
+    run_cmd('%s listprincs' % kadmin_cmd)
+
+    # create principals and keytabs for trafodion user
+    principal_exists = cmd_output('%s listprincs | grep -c %s' % (kadmin_cmd, traf_principal))
+    if int(principal_exists) == 0: # not exist
+        run_cmd('%s \'addprinc -randkey %s\'' % (kadmin_cmd, traf_principal))
+        # Adjust principal's maxlife and maxrenewlife
+        run_cmd('%s \'modprinc -maxlife %s -maxrenewlife %s\' %s >/dev/null 2>&1' % (kadmin_cmd, maxlife, max_renewlife, traf_principal))
+
+    run_cmd('mkdir -p %s' % traf_keytab_dir)
+
+    # TODO: need skip add keytab if exist?
+    print 'Create keytab file for trafodion user'
+    run_cmd('%s \'ktadd -k %s %s\'' % (kadmin_cmd, traf_keytab, traf_principal))
+    run_cmd('chown %s %s' % (traf_user, traf_keytab))
+    run_cmd('chmod 400 %s' % traf_keytab)
+
+    # create principals for hdfs/hbase user
+    print 'Create principals for hdfs/hbase user'
+    if 'CDH' in distro:
+        hdfs_keytab = cmd_output('find /var/run/cloudera-scm-agent/process/ -name hdfs.keytab | head -n 1')
+        hbase_keytab = cmd_output('find /var/run/cloudera-scm-agent/process/ -name hbase.keytab | head -n 1')
+    elif 'HDP' in distro:
+        hdfs_keytab = '/etc/security/keytabs/hdfs.headless.keytab'
+        hbase_keytab = '/etc/security/keytabs/hbase.service.keytab'
+
+    run_cmd('sudo -u %s kinit -kt %s %s' % (hdfs_user, hdfs_keytab, hdfs_principal))
+    run_cmd('sudo -u %s kinit -kt %s %s' % (hbase_user, hbase_keytab, hbase_principal))
+
+    print 'Done creating principals and keytabs'
+
+    kinit_bashrc = """
+
+# ---------------------------------------------------------------
+# if needed obtain and cache the Kerberos ticket-granting ticket
+# start automatic ticket renewal process
+# ---------------------------------------------------------------
+klist -s >/dev/null 2>&1
+if [[ $? -eq 1 ]]; then
+    kinit -kt %s %s >/dev/null 2>&1
+fi
+
+# ---------------------------------------------------------------
+# Start trafodion kerberos ticket manager process
+# ---------------------------------------------------------------
+$MY_SQROOT/sql/scripts/krb5service start >/dev/null 2>&1
+""" % (traf_keytab, traf_principal)
+
+    traf_bashrc = '/home/%s/.bashrc' % traf_user
+    with open(traf_bashrc, 'a') as f:
+        f.write(kinit_bashrc)
+
+# main
+try:
+    dbcfgs_json = sys.argv[1]
+except IndexError:
+    err('No db config found')
+run()

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/904b53df/install/python-installer/traf_ldap.py
----------------------------------------------------------------------
diff --git a/install/python-installer/traf_ldap.py b/install/python-installer/traf_ldap.py
new file mode 100755
index 0000000..8cd4b7f
--- /dev/null
+++ b/install/python-installer/traf_ldap.py
@@ -0,0 +1,73 @@
+#!/usr/bin/env python
+
+# @@@ START COPYRIGHT @@@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# @@@ END COPYRIGHT @@@
+
+### this script should be run on all nodes with trafodion user ###
+
+import os
+import sys
+import json
+from common import run_cmd, mod_file, err, TMP_DIR
+
+def run():
+    """ setup LDAP security """
+    dbcfgs = json.loads(dbcfgs_json)
+
+    DB_ROOT_USER = dbcfgs['db_root_user']
+    SQ_ROOT = os.environ['MY_SQROOT']
+    SQENV_FILE = SQ_ROOT + '/sqenvcom.sh'
+    TRAF_AUTH_CONFIG = '%s/sql/scripts/.traf_authentication_config' % SQ_ROOT
+    TRAF_AUTH_TEMPLATE = '%s/traf_authentication_conf.template' % TMP_DIR
+
+    # set traf_authentication_config file
+    change_items = {
+        '{{ ldap_hosts }}': dbcfgs['ldap_hosts'],
+        '{{ ldap_port }}': dbcfgs['ldap_port'],
+        '{{ ldap_identifiers }}': dbcfgs['ldap_identifiers'],
+        '{{ ldap_encrypt }}': dbcfgs['ldap_encrypt'],
+        '{{ ldap_certpath }}': dbcfgs['ldap_certpath'],
+        '{{ ldap_user }}': dbcfgs['ldap_user'],
+        '{{ ldap_pwd }}': dbcfgs['ldap_pwd']
+    }
+
+    print 'Modify authentication config file'
+    run_cmd('cp %s %s' % (TRAF_AUTH_TEMPLATE, TRAF_AUTH_CONFIG))
+    mod_file(TRAF_AUTH_CONFIG, change_items)
+
+
+    print 'Check LDAP Configuration file for errors'
+    run_cmd('ldapconfigcheck -file %s' % TRAF_AUTH_CONFIG)
+
+    print 'Verify that LDAP user %s exists' % DB_ROOT_USER
+    run_cmd('ldapcheck --verbose --username=%s' % DB_ROOT_USER)
+    #if not 'Authentication successful' in ldapcheck_result:
+    #    err('Failed to access LDAP server with user %s' % DB_ROOT_USER)
+
+    print 'Modfiy sqenvcom.sh to turn on authentication'
+    mod_file(SQENV_FILE, {'TRAFODION_ENABLE_AUTHENTICATION=NO':'TRAFODION_ENABLE_AUTHENTICATION=YES'})
+
+# main
+try:
+    dbcfgs_json = sys.argv[1]
+except IndexError:
+    err('No db config found')
+run()

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/904b53df/install/python-installer/traf_package.py
----------------------------------------------------------------------
diff --git a/install/python-installer/traf_package.py b/install/python-installer/traf_package.py
new file mode 100755
index 0000000..da148c5
--- /dev/null
+++ b/install/python-installer/traf_package.py
@@ -0,0 +1,47 @@
+#!/usr/bin/env python
+
+# @@@ START COPYRIGHT @@@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# @@@ END COPYRIGHT @@@
+
+## This script should be run on all nodes with trafodion user ##
+
+import sys
+import json
+from common import run_cmd, err
+
+def run():
+    dbcfgs = json.loads(dbcfgs_json)
+
+    TRAF_DIR = '%s-%s' % (dbcfgs['traf_basename'], dbcfgs['traf_version'])
+
+    # untar traf package
+    TRAF_PACKAGE_FILE = '/tmp/' + dbcfgs['traf_package'].split('/')[-1]
+    run_cmd('mkdir -p %s' % TRAF_DIR)
+    run_cmd('tar xf %s -C %s' % (TRAF_PACKAGE_FILE, TRAF_DIR))
+
+    print 'Trafodion package uncompressed successfully!'
+
+# main
+try:
+    dbcfgs_json = sys.argv[1]
+except IndexError:
+    err('No db config found')
+run()

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/904b53df/install/python-installer/traf_setup.py
----------------------------------------------------------------------
diff --git a/install/python-installer/traf_setup.py b/install/python-installer/traf_setup.py
new file mode 100755
index 0000000..91d0faa
--- /dev/null
+++ b/install/python-installer/traf_setup.py
@@ -0,0 +1,125 @@
+#!/usr/bin/env python
+
+# @@@ START COPYRIGHT @@@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# @@@ END COPYRIGHT @@@
+
+### this script should be run on all nodes with sudo user ###
+
+import os
+import sys
+import re
+import json
+from common import err, cmd_output, run_cmd
+
+def run():
+    dbcfgs = json.loads(dbcfgs_json)
+
+    TRAF_HOME = cmd_output('cat /etc/default/useradd |grep HOME |cut -d "=" -f 2').strip()
+    TRAF_USER = dbcfgs['traf_user']
+    SQ_ROOT = '%s/%s/%s-%s' % (TRAF_HOME, TRAF_USER, dbcfgs['traf_basename'], dbcfgs['traf_version'])
+
+    TRAF_VER = dbcfgs['traf_version']
+    DISTRO = dbcfgs['distro']
+    TRAF_LIB_PATH = SQ_ROOT + '/export/lib'
+    SCRATCH_LOCS = dbcfgs['scratch_locs'].split(',')
+
+    SUDOER_FILE = '/etc/sudoers.d/trafodion'
+    SUDOER_CFG = """
+## Allow trafodion id to run commands needed for backup and restore
+%%%s ALL =(hbase) NOPASSWD: /usr/bin/hbase"
+""" % TRAF_USER
+
+    ### kernel settings ###
+    run_cmd('sysctl -w kernel.pid_max=65535 2>&1 > /dev/null')
+    run_cmd('echo "kernel.pid_max=65535" >> /etc/sysctl.conf')
+
+    ### create and set permission for scratch file dir ###
+    for loc in SCRATCH_LOCS:
+        # don't set permission for HOME folder
+        if not os.path.exists(loc):
+            run_cmd('mkdir -p %s' % loc)
+        if TRAF_HOME not in loc:
+            run_cmd('chmod 777 %s' % loc)
+
+    ### copy jar files ###
+    hbase_lib_path = '/usr/lib/hbase/lib'
+    if 'CDH' in DISTRO:
+        parcel_lib = '/opt/cloudera/parcels/CDH/lib/hbase/lib'
+        if os.path.exists(parcel_lib): hbase_lib_path = parcel_lib
+    elif 'HDP' in DISTRO:
+        hbase_lib_path = '/usr/hdp/current/hbase-regionserver/lib'
+    elif 'APACHE' in DISTRO:
+        hbase_home = dbcfgs['hbase_home']
+        hbase_lib_path = hbase_home + '/lib'
+        # for apache distro, get hbase version from cmdline
+        hbase_ver = cmd_output('%s/bin/hbase version | head -n1' % hbase_home)
+        hbase_ver = re.search(r'HBase (\d\.\d)', hbase_ver).groups()[0]
+        DISTRO += hbase_ver
+
+    distro, v1, v2 = re.search(r'(\w+)-*(\d)\.(\d)', DISTRO).groups()
+    if distro == 'CDH':
+        if v2 == '6': v2 = '5'
+        if v2 == '8': v2 = '7'
+    elif distro == 'HDP':
+        if v2 == '4': v2 = '3'
+
+    hbase_trx_jar = 'hbase-trx-%s%s_%s-%s.jar' % (distro.lower(), v1, v2, TRAF_VER)
+    traf_hbase_trx_path = '%s/%s' % (TRAF_LIB_PATH, hbase_trx_jar)
+    hbase_trx_path = '%s/%s' % (hbase_lib_path, hbase_trx_jar)
+    if not os.path.exists(traf_hbase_trx_path):
+        err('Cannot find HBase trx jar \'%s\' for your Hadoop distribution' % hbase_trx_jar)
+
+    # upgrade mode, check if existing trx jar doesn't match the new trx jar file
+    if dbcfgs.has_key('upgrade') and dbcfgs['upgrade'].upper() == 'Y':
+        if not os.path.exists(hbase_trx_path):
+            err('The trx jar \'%s\' doesn\'t exist in hbase lib path, cannot do upgrade, please do regular install' % hbase_trx_jar)
+    else:
+        # remove old trx and trafodion-utility jar files
+        run_cmd('rm -rf %s/{hbase-trx-*,trafodion-utility-*}' % hbase_lib_path)
+
+        # copy new ones
+        run_cmd('cp %s %s' % (traf_hbase_trx_path, hbase_lib_path))
+        run_cmd('cp %s/trafodion-utility-* %s' % (TRAF_LIB_PATH, hbase_lib_path))
+
+    # set permission
+    run_cmd('chmod +r %s/{hbase-trx-*,trafodion-utility-*}' % hbase_lib_path)
+
+    if dbcfgs['dcs_ha'] == 'Y':
+        # set trafodion sudoer file for specific cmds
+        SUDOER_CFG += """
+## Trafodion Floating IP commands
+Cmnd_Alias IP = /sbin/ip
+Cmnd_Alias ARP = /sbin/arping
+
+## Allow Trafodion id to run commands needed to configure floating IP
+%%%s ALL = NOPASSWD: IP, ARP
+""" % TRAF_USER
+
+    ### write trafodion sudoer file ###
+    with open(SUDOER_FILE, 'w') as f:
+        f.write(SUDOER_CFG)
+
+# main
+try:
+    dbcfgs_json = sys.argv[1]
+except IndexError:
+    err('No db config found')
+run()

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/904b53df/install/python-installer/traf_sqconfig.py
----------------------------------------------------------------------
diff --git a/install/python-installer/traf_sqconfig.py b/install/python-installer/traf_sqconfig.py
new file mode 100755
index 0000000..b329155
--- /dev/null
+++ b/install/python-installer/traf_sqconfig.py
@@ -0,0 +1,74 @@
+#!/usr/bin/env python
+
+# @@@ START COPYRIGHT @@@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# @@@ END COPYRIGHT @@@
+
+### this script should be run on first node with trafodion user ###
+
+import os
+import sys
+import json
+from common import run_cmd, err
+
+def run():
+    dbcfgs = json.loads(dbcfgs_json)
+
+    nodes = dbcfgs['node_list'].split(',')
+    scratch_locs = dbcfgs['scratch_locs'].split(',')
+
+    # this script is running by trafodion user, so get sqroot from env
+    sq_root = os.environ['MY_SQROOT']
+    if sq_root == '': err('SQ_ROOT var is empty')
+    sqconfig_file = sq_root + '/sql/scripts/sqconfig'
+
+    core, processor = run_cmd("lscpu|grep -E '(^CPU\(s\)|^Socket\(s\))'|awk '{print $2}'").split('\n')[:2]
+    core = int(core)-1 if int(core) <= 256 else 255
+
+    lines = ['begin node\n']
+    for node_id, node in enumerate(nodes):
+        line = 'node-id=%s;node-name=%s;cores=0-%d;processors=%s;roles=connection,aggregation,storage\n' % (node_id, node, core, processor)
+        lines.append(line)
+
+    lines.append('end node\n')
+    lines.append('\n')
+    lines.append('begin overflow\n')
+
+    for scratch_loc in scratch_locs:
+        line = 'hdd %s\n' % scratch_loc
+        lines.append(line)
+
+    lines.append('end overflow\n')
+
+    with open(sqconfig_file, 'w') as f:
+        f.writelines(lines)
+
+    print 'sqconfig generated successfully!'
+
+    run_cmd('sqgen')
+
+    print 'sqgen ran successfully!'
+
+# main
+try:
+    dbcfgs_json = sys.argv[1]
+except IndexError:
+    err('No db config found')
+run()

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/904b53df/install/python-installer/traf_start.py
----------------------------------------------------------------------
diff --git a/install/python-installer/traf_start.py b/install/python-installer/traf_start.py
new file mode 100755
index 0000000..7546a7a
--- /dev/null
+++ b/install/python-installer/traf_start.py
@@ -0,0 +1,70 @@
+#!/usr/bin/env python
+
+# @@@ START COPYRIGHT @@@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# @@@ END COPYRIGHT @@@
+
+### this script should be run on first node with trafodion user ###
+
+import sys
+import json
+from common import cmd_output, run_cmd, err
+
+def run():
+    """ start trafodion instance """
+    dbcfgs = json.loads(dbcfgs_json)
+
+    print 'Starting trafodion'
+    run_cmd('sqstart')
+
+    tmp_file = '/tmp/initialize.out'
+    if dbcfgs.has_key('upgrade') and dbcfgs['upgrade'].upper() == 'Y':
+        print 'Initialize trafodion upgrade'
+        run_cmd('echo "initialize trafodion, upgrade;" | sqlci > %s' % tmp_file)
+        init_output = cmd_output('cat %s' % tmp_file)
+        if 'ERROR' in init_output:
+            err('Failed to upgrade initialize trafodion:\n %s' % init_output)
+    else:
+        print 'Initialize trafodion'
+        run_cmd('echo "initialize trafodion;" | sqlci > %s' % tmp_file)
+        init_output = cmd_output('cat %s' % tmp_file)
+        # skip error 1392
+        # ERROR[1392] Trafodion is already initialized on this system. No action is needed.
+        if 'ERROR' in init_output and not '1392' in init_output:
+            err('Failed to initialize trafodion:\n %s' % init_output)
+
+    if dbcfgs['ldap_security'] == 'Y':
+        run_cmd('echo "initialize authorization; alter user DB__ROOT set external name \"%s\";" | sqlci > %s' % (dbcfgs['db_root_user'], tmp_file))
+        if dbcfgs.has_key('db_admin_user'):
+            run_cmd('echo "alter user DB__ADMIN set external name \"%s\";" | sqlci >> %s' % (dbcfgs['db_admin_user'], tmp_file))
+
+        secure_output = cmd_output('cat %s' % tmp_file)
+        if 'ERROR' in secure_output:
+            err('Failed to setup security for trafodion:\n %s' % secure_output)
+
+    run_cmd('rm %s' % tmp_file)
+    print 'Start trafodion successfully.'
+
+# main
+try:
+    dbcfgs_json = sys.argv[1]
+except IndexError:
+    err('No db config found')
+run()

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/904b53df/install/python-installer/traf_user.py
----------------------------------------------------------------------
diff --git a/install/python-installer/traf_user.py b/install/python-installer/traf_user.py
new file mode 100755
index 0000000..554ba2d
--- /dev/null
+++ b/install/python-installer/traf_user.py
@@ -0,0 +1,145 @@
+#!/usr/bin/env python
+
+# @@@ START COPYRIGHT @@@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# @@@ END COPYRIGHT @@@
+
+### this script should be run on all nodes with sudo user ###
+
+import os
+import sys
+import json
+from common import run_cmd, append_file, mod_file, cmd_output, run_cmd_as_user, err, TMP_DIR
+
+def run():
+    """ create trafodion user, bashrc, setup passwordless SSH """
+    dbcfgs = json.loads(dbcfgs_json)
+
+    DISTRO = dbcfgs['distro']
+    if 'CDH' in DISTRO:
+        hadoop_type = 'cloudera'
+    elif 'HDP' in DISTRO:
+        hadoop_type = 'hortonworks'
+    elif 'APACHE' in DISTRO:
+        hadoop_type = 'apache'
+
+    TRAF_USER = dbcfgs['traf_user']
+    TRAF_PWD = dbcfgs['traf_pwd']
+    TRAF_GROUP = TRAF_USER
+    TRAF_HOME = cmd_output('cat /etc/default/useradd |grep HOME |cut -d "=" -f 2').strip()
+    TRAF_USER_DIR = '%s/%s' % (TRAF_HOME, TRAF_USER)
+    SQ_ROOT = '%s/%s-%s' % (TRAF_USER_DIR, dbcfgs['traf_basename'], dbcfgs['traf_version'])
+
+    KEY_FILE = '/tmp/id_rsa'
+    AUTH_KEY_FILE = '%s/.ssh/authorized_keys' % TRAF_USER_DIR
+    SSH_CFG_FILE = '%s/.ssh/config' % TRAF_USER_DIR
+    BASHRC_TEMPLATE = '%s/bashrc.template' % TMP_DIR
+    BASHRC_FILE = '%s/.bashrc' % TRAF_USER_DIR
+    ULIMITS_FILE = '/etc/security/limits.d/%s.conf' % TRAF_USER
+    HSPERFDATA_FILE = '/tmp/hsperfdata_trafodion'
+
+    # create trafodion user and group
+    if not cmd_output('getent group %s' % TRAF_GROUP):
+        run_cmd('groupadd %s > /dev/null 2>&1' % TRAF_GROUP)
+
+    if not cmd_output('getent passwd %s' % TRAF_USER):
+        run_cmd('useradd --shell /bin/bash -m %s -g %s --password "$(openssl passwd %s)"' % (TRAF_USER, TRAF_GROUP, TRAF_PWD))
+    elif not os.path.exists(TRAF_USER_DIR):
+        run_cmd('mkdir -p %s' % TRAF_USER_DIR)
+        run_cmd('chmod 700 %s' % TRAF_USER_DIR)
+
+    # set ssh key
+    run_cmd_as_user(TRAF_USER, 'echo -e "y" | ssh-keygen -t rsa -N "" -f ~/.ssh/id_rsa')
+    # the key is generated in copy_file script running on the installer node
+    run_cmd('cp %s{,.pub} %s/.ssh/' % (KEY_FILE, TRAF_USER_DIR))
+
+    run_cmd_as_user(TRAF_USER, 'cat ~/.ssh/id_rsa.pub > %s' % AUTH_KEY_FILE)
+    run_cmd('chmod 644 %s' % AUTH_KEY_FILE)
+
+    ssh_cfg = 'StrictHostKeyChecking=no\nNoHostAuthenticationForLocalhost=yes\n'
+    with open(SSH_CFG_FILE, 'w') as f:
+        f.write(ssh_cfg)
+    run_cmd('chmod 600 %s' % SSH_CFG_FILE)
+
+    run_cmd('chown -R %s:%s %s/.ssh/' % (TRAF_USER, TRAF_GROUP, TRAF_USER_DIR))
+
+    # set bashrc
+    nodes = dbcfgs['node_list'].split(',')
+    change_items = {
+        '{{ java_home }}': dbcfgs['java_home'],
+        '{{ sq_home }}': SQ_ROOT,
+        '{{ hadoop_type }}': hadoop_type,
+        '{{ node_list }}': ' '.join(nodes),
+        '{{ node_count }}':str(len(nodes)),
+        '{{ my_nodes }}': ' -w ' + ' -w '.join(nodes)
+    }
+
+    mod_file(BASHRC_TEMPLATE, change_items)
+
+    if 'APACHE' in DISTRO:
+        bashrc_content = """
+export HADOOP_PREFIX=%s
+export HBASE_HOME=%s
+export PATH=$PATH:$HADOOP_PREFIX/bin:$HADOOP_PREFIX/sbin:$HBASE_HOME/bin
+        """ % (dbcfgs['hadoop_home'], dbcfgs['hbase_home'])
+        append_file(BASHRC_TEMPLATE, bashrc_content, position='HADOOP_TYPE')
+
+    # backup bashrc if exsits
+    if os.path.exists(BASHRC_FILE):
+        run_cmd('cp %s %s.bak' % ((BASHRC_FILE,) *2))
+
+    # copy bashrc to trafodion's home
+    run_cmd('cp %s %s' % (BASHRC_TEMPLATE, BASHRC_FILE))
+    run_cmd('chown -R %s:%s %s*' % (TRAF_USER, TRAF_GROUP, BASHRC_FILE))
+
+    # set ulimits for trafodion user
+    ulimits_config = '''
+# Trafodion settings
+%s   soft   core unlimited
+%s   hard   core unlimited
+%s   soft   memlock unlimited
+%s   hard   memlock unlimited
+%s   soft   nofile 32768
+%s   hard   nofile 65536
+%s   soft   nproc 100000
+%s   hard   nproc 100000
+%s   soft nofile 8192
+%s   hard nofile 65535
+hbase soft nofile 8192
+''' % ((TRAF_USER,) * 10)
+
+    with open(ULIMITS_FILE, 'w') as f:
+        f.write(ulimits_config)
+
+    # change permission for hsperfdata
+    if os.path.exists(HSPERFDATA_FILE):
+        run_cmd('chown -R %s:%s %s' % (TRAF_USER, TRAF_GROUP, HSPERFDATA_FILE))
+
+    # clean up unused key file at the last step
+    run_cmd('rm -rf %s{,.pub}' % KEY_FILE)
+
+    print 'Setup trafodion user successfully!'
+
+# main
+try:
+    dbcfgs_json = sys.argv[1]
+except IndexError:
+    err('No db config found')
+run()

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/904b53df/install/python-installer/version.json
----------------------------------------------------------------------
diff --git a/install/python-installer/version.json b/install/python-installer/version.json
new file mode 100644
index 0000000..dfb20f5
--- /dev/null
+++ b/install/python-installer/version.json
@@ -0,0 +1,10 @@
+{
+    "linux":  ["centos", "redhat"],
+    "hadoop": ["cloudera", "hortonworks", "apache"],
+    "java":   ["1.7"],
+    "centos": ["6"],
+    "redhat": ["6"],
+    "cdh":    ["5.4", "5.5", "5.6"],
+    "hdp":    ["2.3", "2.4"],
+    "hbase":  ["1.0", "1.1"]
+}

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/904b53df/install/python-installer/wrapper.py
----------------------------------------------------------------------
diff --git a/install/python-installer/wrapper.py b/install/python-installer/wrapper.py
new file mode 100644
index 0000000..f2f0ac8
--- /dev/null
+++ b/install/python-installer/wrapper.py
@@ -0,0 +1,301 @@
+#!/usr/bin/env python
+
+# @@@ START COPYRIGHT @@@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# @@@ END COPYRIGHT @@@
+
+import os
+import time
+import json
+import subprocess
+from glob import glob
+from threading import Thread
+from common import err_m, run_cmd, time_elapse, get_logger, ParseJson, Remote, INSTALLER_LOC, TMP_DIR, SCRCFG_FILE
+
+
+class RemoteRun(Remote):
+    """ run commands or scripts remotely using ssh """
+
+    def __init__(self, host, logger, user='', pwd='', quiet=False):
+        super(RemoteRun, self).__init__(host, user, pwd)
+
+        self.quiet = quiet # no output
+        self.logger = logger
+        # create tmp folder
+        self.__run_sshcmd('mkdir -p %s' % TMP_DIR)
+
+        # copy all needed files to remote host
+        all_files = glob(INSTALLER_LOC + '/*.py') + \
+                    glob(INSTALLER_LOC + '/*.json') + \
+                    glob(INSTALLER_LOC + '/*.template')
+
+        self.copy(all_files, remote_folder=TMP_DIR)
+
+        # set permission
+        self.__run_sshcmd('chmod a+rx %s/*.py' % TMP_DIR)
+
+    def __del__(self):
+        # clean up
+        self.__run_ssh('sudo rm -rf %s' % TMP_DIR)
+
+    def run_script(self, script, run_user, json_string, verbose=False):
+        """ @param run_user: run the script with this user """
+
+        if run_user:
+            # format string in order to run with 'sudo su $user -c $cmd'
+            json_string = json_string.replace('"', '\\\\\\"').replace(' ', '').replace('{', '\\{').replace('$', '\\\\\\$')
+            # this command only works with shell=True
+            script_cmd = '"sudo su - %s -c \'%s/%s %s\'"' % (run_user, TMP_DIR, script, json_string)
+            self.__run_ssh(script_cmd, verbose=verbose, shell=True)
+        else:
+            script_cmd = 'sudo %s/%s \'%s\'' % (TMP_DIR, script, json_string)
+            self.__run_ssh(script_cmd, verbose=verbose)
+
+        format1 = 'Host [%s]: Script [%s]: %s' % (self.host, script, self.stdout)
+        format2 = 'Host [%s]: Script [%s]' % (self.host, script)
+
+        self.logger.info(format1)
+
+        if self.rc == 0:
+            if not self.quiet: state_ok(format2)
+            self.logger.info(format2 + ' ran successfully!')
+        else:
+            if not self.quiet: state_fail(format2)
+            msg = 'Host [%s]: Failed to run \'%s\'' % (self.host, script)
+            if self.stderr:
+                msg += ': ' + self.stderr
+                print '\n ' + self.stderr
+            self.logger.error(msg)
+            exit(1)
+
+    def __run_ssh(self, user_cmd, verbose=False, shell=False):
+        """ @params: user_cmd should be a string """
+        cmd = self._commands('ssh')
+        cmd += ['-tt'] # force tty allocation
+        if self.user:
+            cmd += ['%s@%s' % (self.user, self.host)]
+        else:
+            cmd += [self.host]
+
+        # if shell=True, cmd should be a string not list
+        if shell:
+            cmd = ' '.join(cmd) + ' '
+            cmd += user_cmd
+        else:
+            cmd += user_cmd.split()
+
+        self._execute(cmd, verbose=verbose, shell=shell)
+
+    def __run_sshcmd(self, int_cmd):
+        """ run internal used ssh command """
+
+        self.__run_ssh(int_cmd)
+        if self.rc != 0:
+            msg = 'Host [%s]: Failed to run ssh commands, check SSH password or connectivity' % self.host
+            self.logger.error(msg)
+            err_m(msg)
+
+def state_ok(msg):
+    state(32, ' OK ', msg)
+
+def state_fail(msg):
+    state(31, 'FAIL', msg)
+
+def state_skip(msg):
+    state(33, 'SKIP', msg)
+
+def state(color, result, msg):
+    WIDTH = 80
+    print '\n\33[%dm%s %s [ %s ]\33[0m\n' % (color, msg, (WIDTH - len(msg))*'.', result)
+
+class Status(object):
+    def __init__(self, stat_file, name):
+        self.stat_file = stat_file
+        self.name = name
+
+    def get_status(self):
+        if not os.path.exists(self.stat_file): os.mknod(self.stat_file)
+        with open(self.stat_file, 'r') as f:
+            st = f.readlines()
+        for s in st:
+            if s.split()[0] == self.name: return True
+        return False
+
+    def set_status(self):
+        with open(self.stat_file, 'a+') as f:
+            f.write('%s OK\n' % self.name)
+
+@time_elapse
+def run(dbcfgs, options, mode='install', pwd=''):
+    """ main entry
+        mode: install/discover
+    """
+    STAT_FILE = mode + '.status'
+    LOG_FILE = '%s/logs/%s_%s.log' % (INSTALLER_LOC, mode, time.strftime('%Y%m%d_%H%M'))
+    logger = get_logger(LOG_FILE)
+
+    verbose = True if hasattr(options, 'verbose') and options.verbose else False
+    upgrade = True if hasattr(options, 'upgrade') and options.upgrade else False
+    user = options.user if hasattr(options, 'user') and options.user else ''
+    threshold = options.fork if hasattr(options, 'fork') and options.fork else 10
+
+    script_output = [] # script output array
+    conf = ParseJson(SCRCFG_FILE).load()
+    script_cfgs = conf[mode]
+
+    dbcfgs_json = json.dumps(dbcfgs)
+    hosts = dbcfgs['node_list'].split(',')
+
+    # handle skipped scripts, skip them if no need to run
+    skipped_scripts = []
+    if upgrade:
+        skipped_scripts += ['hadoop_mods', 'apache_mods', 'apache_restart', 'traf_dep', 'traf_kerberos']
+
+    if dbcfgs['secure_hadoop'] == 'N':
+        skipped_scripts += ['traf_kerberos']
+
+    if dbcfgs['traf_start'].upper() == 'N':
+        skipped_scripts += ['traf_start']
+
+    if dbcfgs['ldap_security'].upper() == 'N':
+        skipped_scripts += ['traf_ldap']
+
+    if 'APACHE' in dbcfgs['distro']:
+        skipped_scripts += ['hadoop_mods']
+    else:
+        skipped_scripts += ['apache_mods', 'apache_restart']
+
+
+    # set ssh config file to avoid known hosts verify on current installer node
+    SSH_CFG_FILE = os.environ['HOME'] + '/.ssh/config'
+    ssh_cfg = 'StrictHostKeyChecking=no\nNoHostAuthenticationForLocalhost=yes\n'
+    with open(SSH_CFG_FILE, 'w') as f:
+        f.write(ssh_cfg)
+    run_cmd('chmod 600 %s' % SSH_CFG_FILE)
+
+    def run_local_script(script, json_string, req_pwd):
+        cmd = '%s/%s \'%s\'' % (INSTALLER_LOC, script, json_string)
+
+        # pass the ssh password to sub scripts which need SSH password
+        if req_pwd: cmd += ' ' + pwd
+
+        if verbose: print cmd
+
+        # stdout on screen
+        p = subprocess.Popen(cmd, stderr=subprocess.PIPE, shell=True)
+        stdout, stderr = p.communicate()
+
+        rc = p.returncode
+        if rc != 0:
+            msg = 'Failed to run \'%s\'' % script
+            if stderr:
+                msg += ': ' + stderr
+                print stderr
+            logger.error(msg)
+            state_fail('localhost: Script [%s]' % script)
+            exit(rc)
+        else:
+            state_ok('Script [%s]' % script)
+            logger.info('Script [%s] ran successfully!' % script)
+
+        return stdout
+
+    # run sub scripts
+    try:
+        remote_instances = []
+        if mode == 'discover':
+            remote_instances = [RemoteRun(host, logger, user=user, pwd=pwd, quiet=True) for host in hosts]
+        else:
+            remote_instances = [RemoteRun(host, logger, user=user, pwd=pwd) for host in hosts]
+        first_instance = remote_instances[0]
+        for instance in remote_instances:
+            if instance.host == dbcfgs['first_rsnode']:
+                first_rs_instance = instance
+                break
+
+        logger.info(' ***** %s Start *****' % mode)
+        for cfg in script_cfgs:
+            script = cfg['script']
+            node = cfg['node']
+            desc = cfg['desc']
+            run_user = ''
+            if not 'run_as_traf' in cfg.keys():
+                pass
+            elif cfg['run_as_traf'] == 'yes':
+                run_user = dbcfgs['traf_user']
+
+            if not 'req_pwd' in cfg.keys():
+                req_pwd = False
+            elif cfg['req_pwd'] == 'yes':
+                req_pwd = True
+
+            status = Status(STAT_FILE, script)
+            if status.get_status():
+                msg = 'Script [%s] had already been executed' % script
+                state_skip(msg)
+                logger.info(msg)
+                continue
+
+            if script.split('.')[0] in skipped_scripts:
+                continue
+            else:
+                print '\nTASK: %s %s' % (desc, (83 - len(desc))*'*')
+
+            #TODO: timeout exit
+            if node == 'local':
+                run_local_script(script, dbcfgs_json, req_pwd)
+            elif node == 'first':
+                first_instance.run_script(script, run_user, dbcfgs_json, verbose=verbose)
+            elif node == 'first_rs':
+                first_rs_instance.run_script(script, run_user, dbcfgs_json, verbose=verbose)
+            elif node == 'all':
+                l = len(remote_instances)
+                if l > threshold:
+                    piece = (l - (l % threshold)) / threshold
+                    parted_remote_instances = [remote_instances[threshold*i:threshold*(i+1)] for i in range(piece)]
+                    parted_remote_instances.append(remote_instances[threshold*piece:])
+                else:
+                    parted_remote_instances = [remote_instances]
+
+                for parted_remote_inst in parted_remote_instances:
+                    threads = [Thread(target=r.run_script, args=(script, run_user, dbcfgs_json, verbose)) for r in parted_remote_inst]
+                    for t in threads: t.start()
+                    for t in threads: t.join()
+
+                    if sum([r.rc for r in parted_remote_inst]) != 0:
+                        err_m('Script failed to run on one or more nodes, exiting ...\nCheck log file %s for details.' % LOG_FILE)
+
+                    script_output += [{r.host:r.stdout.strip()} for r in parted_remote_inst]
+
+            else:
+                # should not go to here
+                err_m('Invalid configuration for %s' % SCRCFG_FILE)
+
+            status.set_status()
+    except KeyboardInterrupt:
+        err_m('User quit')
+
+    # remove status file if all scripts run successfully
+    os.remove(STAT_FILE)
+
+    return script_output
+
+if __name__ == '__main__':
+    exit(0)


[4/6] incubator-trafodion git commit: edit .rat-excludes to avoid jenkins error

Posted by sv...@apache.org.
edit .rat-excludes to avoid jenkins error


Project: http://git-wip-us.apache.org/repos/asf/incubator-trafodion/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-trafodion/commit/f1c33401
Tree: http://git-wip-us.apache.org/repos/asf/incubator-trafodion/tree/f1c33401
Diff: http://git-wip-us.apache.org/repos/asf/incubator-trafodion/diff/f1c33401

Branch: refs/heads/master
Commit: f1c3340135d7ed31bf999b9c5f0355e00ecaf281
Parents: a809714
Author: Eason <hf...@gmail.com>
Authored: Wed Nov 2 15:43:45 2016 +0800
Committer: Eason <hf...@gmail.com>
Committed: Wed Nov 2 15:43:45 2016 +0800

----------------------------------------------------------------------
 .rat-excludes | 6 +++++-
 1 file changed, 5 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/f1c33401/.rat-excludes
----------------------------------------------------------------------
diff --git a/.rat-excludes b/.rat-excludes
index 5d80607..feca633 100644
--- a/.rat-excludes
+++ b/.rat-excludes
@@ -165,4 +165,8 @@ VariableLengthPKTest.java
 # asciidoc extention file -- MIT license
 google-analytics-postprocessor.rb
 
-
+# installer JSON config files
+mod_cfgs.json
+prompt.json
+script.json
+version.json


[6/6] incubator-trafodion git commit: Merge [TRAFODION-1839] Trafodion Installer Evolution

Posted by sv...@apache.org.
Merge [TRAFODION-1839] Trafodion Installer Evolution


Project: http://git-wip-us.apache.org/repos/asf/incubator-trafodion/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-trafodion/commit/7ba7ee3e
Tree: http://git-wip-us.apache.org/repos/asf/incubator-trafodion/tree/7ba7ee3e
Diff: http://git-wip-us.apache.org/repos/asf/incubator-trafodion/diff/7ba7ee3e

Branch: refs/heads/master
Commit: 7ba7ee3e54e72601a486aa477405cc75ce4ed813
Parents: d751c96 ed2f03e
Author: Steve Varnau <sv...@apache.org>
Authored: Wed Nov 9 18:53:39 2016 +0000
Committer: Steve Varnau <sv...@apache.org>
Committed: Wed Nov 9 18:53:39 2016 +0000

----------------------------------------------------------------------
 .rat-excludes                                   |   6 +-
 install/python-installer/README.md              |  21 +
 install/python-installer/apache_mods.py         |  72 +++
 install/python-installer/bashrc.template        |  79 +++
 install/python-installer/common.py              | 478 ++++++++++++++
 install/python-installer/copy_files.py          |  64 ++
 install/python-installer/db_config_default      | 120 ++++
 install/python-installer/db_install.py          | 641 +++++++++++++++++++
 install/python-installer/dcs_setup.py           | 114 ++++
 install/python-installer/discovery.py           | 144 +++++
 install/python-installer/hadoop_mods.py         | 203 ++++++
 install/python-installer/hdfs_cmds.py           |  68 ++
 install/python-installer/mod_cfgs.json          |  73 +++
 install/python-installer/prompt.json            | 227 +++++++
 install/python-installer/script.json            |  87 +++
 .../traf_authentication_conf.template           |  71 ++
 install/python-installer/traf_check.py          |  87 +++
 install/python-installer/traf_dep.py            | 110 ++++
 install/python-installer/traf_discover.py       | 253 ++++++++
 install/python-installer/traf_kerberos.py       | 116 ++++
 install/python-installer/traf_ldap.py           |  73 +++
 install/python-installer/traf_package.py        |  47 ++
 install/python-installer/traf_setup.py          | 125 ++++
 install/python-installer/traf_sqconfig.py       |  74 +++
 install/python-installer/traf_start.py          |  70 ++
 install/python-installer/traf_user.py           | 146 +++++
 install/python-installer/version.json           |  10 +
 install/python-installer/wrapper.py             | 301 +++++++++
 28 files changed, 3879 insertions(+), 1 deletion(-)
----------------------------------------------------------------------



[5/6] incubator-trafodion git commit: add enable_ha env in bashrc

Posted by sv...@apache.org.
add enable_ha env in bashrc


Project: http://git-wip-us.apache.org/repos/asf/incubator-trafodion/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-trafodion/commit/ed2f03eb
Tree: http://git-wip-us.apache.org/repos/asf/incubator-trafodion/tree/ed2f03eb
Diff: http://git-wip-us.apache.org/repos/asf/incubator-trafodion/diff/ed2f03eb

Branch: refs/heads/master
Commit: ed2f03ebdb0e373b205abcba6769dd6455cd6ec3
Parents: f1c3340
Author: Eason <hf...@gmail.com>
Authored: Mon Nov 7 23:17:14 2016 +0800
Committer: Eason <hf...@gmail.com>
Committed: Mon Nov 7 23:17:14 2016 +0800

----------------------------------------------------------------------
 install/python-installer/bashrc.template | 1 +
 install/python-installer/db_install.py   | 2 ++
 install/python-installer/traf_package.py | 1 -
 install/python-installer/traf_user.py    | 3 ++-
 4 files changed, 5 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/ed2f03eb/install/python-installer/bashrc.template
----------------------------------------------------------------------
diff --git a/install/python-installer/bashrc.template b/install/python-installer/bashrc.template
index ceed82a..d820285 100644
--- a/install/python-installer/bashrc.template
+++ b/install/python-installer/bashrc.template
@@ -49,6 +49,7 @@ export NODE_LIST="{{ node_list }}"
 export MY_NODES="{{ my_nodes }}"
 export node_count="{{ node_count }}"
 export HADOOP_TYPE="{{ hadoop_type }}"
+export ENABLE_HA="{{ enable_ha }}"
 
 #-------------------------------------------
 # Execute the sqenv.sh script if it exists.

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/ed2f03eb/install/python-installer/db_install.py
----------------------------------------------------------------------
diff --git a/install/python-installer/db_install.py b/install/python-installer/db_install.py
index 6ad676e..73cacaa 100755
--- a/install/python-installer/db_install.py
+++ b/install/python-installer/db_install.py
@@ -494,6 +494,7 @@ def user_input(options, prompt_mode=True, pwd=''):
 
     # DCS HA
     g('dcs_ha')
+    cfgs['enable_ha'] = 'false'
     if cfgs['dcs_ha'].upper() == 'Y':
         g('dcs_floating_ip')
         g('dcs_interface')
@@ -501,6 +502,7 @@ def user_input(options, prompt_mode=True, pwd=''):
         # check dcs backup nodes should exist in node list
         if sorted(list(set((cfgs['dcs_backup_nodes'] + ',' + cfgs['node_list']).split(',')))) != sorted(cfgs['node_list'].split(',')):
             log_err('Invalid DCS backup nodes, please pick up from node list')
+        cfgs['enable_ha'] = 'true'
 
     if need_java_home:
         g('java_home')

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/ed2f03eb/install/python-installer/traf_package.py
----------------------------------------------------------------------
diff --git a/install/python-installer/traf_package.py b/install/python-installer/traf_package.py
index d0dbe39..c7cc043 100755
--- a/install/python-installer/traf_package.py
+++ b/install/python-installer/traf_package.py
@@ -36,7 +36,6 @@ def run():
     TRAF_PACKAGE_FILE = '/tmp/' + dbcfgs['traf_package'].split('/')[-1]
     run_cmd('mkdir -p %s' % TRAF_DIR)
     run_cmd('tar xf %s -C %s' % (TRAF_PACKAGE_FILE, TRAF_DIR))
-    run_cmd('rm -rf %s' % TRAF_PACKAGE_FILE)
 
     print 'Trafodion package extracted successfully!'
 

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/ed2f03eb/install/python-installer/traf_user.py
----------------------------------------------------------------------
diff --git a/install/python-installer/traf_user.py b/install/python-installer/traf_user.py
index 554ba2d..28e2c81 100755
--- a/install/python-installer/traf_user.py
+++ b/install/python-installer/traf_user.py
@@ -87,7 +87,8 @@ def run():
         '{{ sq_home }}': SQ_ROOT,
         '{{ hadoop_type }}': hadoop_type,
         '{{ node_list }}': ' '.join(nodes),
-        '{{ node_count }}':str(len(nodes)),
+        '{{ node_count }}': str(len(nodes)),
+        '{{ enable_ha }}': dbcfgs['enable_ha'],
         '{{ my_nodes }}': ' -w ' + ' -w '.join(nodes)
     }
 


[3/6] incubator-trafodion git commit: edit .rat-excludes to avoid jenkins error and other improvements

Posted by sv...@apache.org.
edit .rat-excludes to avoid jenkins error and other improvements


Project: http://git-wip-us.apache.org/repos/asf/incubator-trafodion/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-trafodion/commit/a8097140
Tree: http://git-wip-us.apache.org/repos/asf/incubator-trafodion/tree/a8097140
Diff: http://git-wip-us.apache.org/repos/asf/incubator-trafodion/diff/a8097140

Branch: refs/heads/master
Commit: a8097140f09a8587092fb62a5da5c08d7291e9ba
Parents: 904b53d
Author: Eason <hf...@gmail.com>
Authored: Wed Nov 2 15:43:10 2016 +0800
Committer: Eason <hf...@gmail.com>
Committed: Wed Nov 2 15:43:10 2016 +0800

----------------------------------------------------------------------
 install/python-installer/README.md       | 2 +-
 install/python-installer/common.py       | 5 ++---
 install/python-installer/copy_files.py   | 4 ++--
 install/python-installer/db_install.py   | 4 ----
 install/python-installer/traf_dep.py     | 9 ---------
 install/python-installer/traf_package.py | 3 ++-
 install/python-installer/wrapper.py      | 8 ++++----
 7 files changed, 11 insertions(+), 24 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/a8097140/install/python-installer/README.md
----------------------------------------------------------------------
diff --git a/install/python-installer/README.md b/install/python-installer/README.md
index e60a331..feadaa2 100644
--- a/install/python-installer/README.md
+++ b/install/python-installer/README.md
@@ -6,7 +6,7 @@
 - /etc/hosts contains hostname info for all Trafodion nodes on **installer node**
 - python version 2.6/2.7, and python library `httplib2`, `prettytable`
 - Trafodion server package file is stored on **installer node**
-- Passwordless SSH login, two ways:
+- Passwordless SSH login, one of these two options is needed:
  - Set SSH key pairs against **installer node** and Trafodion nodes
  - Install `sshpass` tool on **installer node**, then input the SSH password during installation using `--enable-pwd` option
 

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/a8097140/install/python-installer/common.py
----------------------------------------------------------------------
diff --git a/install/python-installer/common.py b/install/python-installer/common.py
index 3713da8..97b93f2 100644
--- a/install/python-installer/common.py
+++ b/install/python-installer/common.py
@@ -39,7 +39,6 @@ except ImportError:
 from ConfigParser import ConfigParser
 from collections import defaultdict
 
-__VERSION__ = 'v1.0.0'
 INSTALLER_LOC = sys.path[0]
 
 USER_PROMPT_FILE = INSTALLER_LOC + '/prompt.json'
@@ -50,7 +49,7 @@ MODCFG_FILE = INSTALLER_LOC + '/mod_cfgs.json'
 DBCFG_FILE = INSTALLER_LOC + '/db_config'
 DBCFG_TMP_FILE = INSTALLER_LOC + '/.db_config_temp'
 
-TMP_DIR = '/tmp/.install'
+TMP_DIR = '/tmp/.trafodion_install_temp'
 MARK = '[ERR]'
 
 def version():
@@ -99,7 +98,7 @@ def run_cmd(cmd):
     return stdout.strip()
 
 def run_cmd_as_user(user, cmd):
-    return run_cmd('sudo su - %s -c \'%s\'' % (user, cmd))
+    return run_cmd('sudo -n su - %s -c \'%s\'' % (user, cmd))
 
 def cmd_output(cmd):
     """ return command output but not check return value """

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/a8097140/install/python-installer/copy_files.py
----------------------------------------------------------------------
diff --git a/install/python-installer/copy_files.py b/install/python-installer/copy_files.py
index 73d0237..23b5dd9 100755
--- a/install/python-installer/copy_files.py
+++ b/install/python-installer/copy_files.py
@@ -37,8 +37,8 @@ def run(pwd):
     traf_package = dbcfgs['traf_package']
 
     key_file = '/tmp/id_rsa'
-    run_cmd('sudo rm -rf %s*' % key_file)
-    run_cmd('sudo echo -e "y" | ssh-keygen -t rsa -N "" -f %s' % key_file)
+    run_cmd('sudo -n rm -rf %s*' % key_file)
+    run_cmd('sudo -n echo -e "y" | ssh-keygen -t rsa -N "" -f %s' % key_file)
 
     files = [key_file, key_file+'.pub', traf_package]
 

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/a8097140/install/python-installer/db_install.py
----------------------------------------------------------------------
diff --git a/install/python-installer/db_install.py b/install/python-installer/db_install.py
index 9d88f8d..6ad676e 100755
--- a/install/python-installer/db_install.py
+++ b/install/python-installer/db_install.py
@@ -548,8 +548,6 @@ def get_options():
                       help="Install Trafodion on top of Apache Hadoop.")
     parser.add_option("--offline", action="store_true", dest="offline", default=False,
                       help="Enable local repository for offline installing Trafodion.")
-    parser.add_option("--version", action="store_true", dest="version", default=False,
-                      help="Show the installer version.")
 
     (options, args) = parser.parse_args()
     return options
@@ -562,8 +560,6 @@ def main():
     # handle parser option
     options = get_options()
 
-    if options.version: version()
-
     if options.build and options.cfgfile:
         log_err('Wrong parameter, cannot specify both --build and --config-file')
 

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/a8097140/install/python-installer/traf_dep.py
----------------------------------------------------------------------
diff --git a/install/python-installer/traf_dep.py b/install/python-installer/traf_dep.py
index 76f570c..81fb7e0 100755
--- a/install/python-installer/traf_dep.py
+++ b/install/python-installer/traf_dep.py
@@ -30,15 +30,6 @@ import json
 import platform
 from common import run_cmd, cmd_output, err
 
-# not used
-EPEL_REPO = """
-[epel]
-name=Extra Packages for Enterprise Linux $releasever - $basearch
-mirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-$releasever&arch=$basearch
-enabled=1
-gpgcheck=0
-"""
-
 LOCAL_REPO_PTR = """
 [traflocal]
 baseurl=http://%s:%s/

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/a8097140/install/python-installer/traf_package.py
----------------------------------------------------------------------
diff --git a/install/python-installer/traf_package.py b/install/python-installer/traf_package.py
index da148c5..d0dbe39 100755
--- a/install/python-installer/traf_package.py
+++ b/install/python-installer/traf_package.py
@@ -36,8 +36,9 @@ def run():
     TRAF_PACKAGE_FILE = '/tmp/' + dbcfgs['traf_package'].split('/')[-1]
     run_cmd('mkdir -p %s' % TRAF_DIR)
     run_cmd('tar xf %s -C %s' % (TRAF_PACKAGE_FILE, TRAF_DIR))
+    run_cmd('rm -rf %s' % TRAF_PACKAGE_FILE)
 
-    print 'Trafodion package uncompressed successfully!'
+    print 'Trafodion package extracted successfully!'
 
 # main
 try:

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/a8097140/install/python-installer/wrapper.py
----------------------------------------------------------------------
diff --git a/install/python-installer/wrapper.py b/install/python-installer/wrapper.py
index f2f0ac8..c7a81a7 100644
--- a/install/python-installer/wrapper.py
+++ b/install/python-installer/wrapper.py
@@ -53,19 +53,19 @@ class RemoteRun(Remote):
 
     def __del__(self):
         # clean up
-        self.__run_ssh('sudo rm -rf %s' % TMP_DIR)
+        self.__run_ssh('sudo -n rm -rf %s' % TMP_DIR)
 
     def run_script(self, script, run_user, json_string, verbose=False):
         """ @param run_user: run the script with this user """
 
         if run_user:
-            # format string in order to run with 'sudo su $user -c $cmd'
+            # format string in order to run with 'sudo -n su $user -c $cmd'
             json_string = json_string.replace('"', '\\\\\\"').replace(' ', '').replace('{', '\\{').replace('$', '\\\\\\$')
             # this command only works with shell=True
-            script_cmd = '"sudo su - %s -c \'%s/%s %s\'"' % (run_user, TMP_DIR, script, json_string)
+            script_cmd = '"sudo -n su - %s -c \'%s/%s %s\'"' % (run_user, TMP_DIR, script, json_string)
             self.__run_ssh(script_cmd, verbose=verbose, shell=True)
         else:
-            script_cmd = 'sudo %s/%s \'%s\'' % (TMP_DIR, script, json_string)
+            script_cmd = 'sudo -n %s/%s \'%s\'' % (TMP_DIR, script, json_string)
             self.__run_ssh(script_cmd, verbose=verbose)
 
         format1 = 'Host [%s]: Script [%s]: %s' % (self.host, script, self.stdout)


[2/6] incubator-trafodion git commit: [TRAFODION-1839] Trafodion Installer Evolution

Posted by sv...@apache.org.
[TRAFODION-1839] Trafodion Installer Evolution


Project: http://git-wip-us.apache.org/repos/asf/incubator-trafodion/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-trafodion/commit/904b53df
Tree: http://git-wip-us.apache.org/repos/asf/incubator-trafodion/tree/904b53df
Diff: http://git-wip-us.apache.org/repos/asf/incubator-trafodion/diff/904b53df

Branch: refs/heads/master
Commit: 904b53df5c75f0dd5b0bd84e483c7d060023a7d6
Parents: 6ce6a8e
Author: Eason <hf...@gmail.com>
Authored: Tue Nov 1 16:53:27 2016 +0800
Committer: Eason <hf...@gmail.com>
Committed: Tue Nov 1 16:53:27 2016 +0800

----------------------------------------------------------------------
 install/python-installer/README.md              |  21 +
 install/python-installer/apache_mods.py         |  72 +++
 install/python-installer/bashrc.template        |  78 +++
 install/python-installer/common.py              | 479 ++++++++++++++
 install/python-installer/copy_files.py          |  64 ++
 install/python-installer/db_config_default      | 120 ++++
 install/python-installer/db_install.py          | 643 +++++++++++++++++++
 install/python-installer/dcs_setup.py           | 114 ++++
 install/python-installer/discovery.py           | 144 +++++
 install/python-installer/hadoop_mods.py         | 203 ++++++
 install/python-installer/hdfs_cmds.py           |  68 ++
 install/python-installer/mod_cfgs.json          |  73 +++
 install/python-installer/prompt.json            | 227 +++++++
 install/python-installer/script.json            |  87 +++
 .../traf_authentication_conf.template           |  71 ++
 install/python-installer/traf_check.py          |  87 +++
 install/python-installer/traf_dep.py            | 119 ++++
 install/python-installer/traf_discover.py       | 253 ++++++++
 install/python-installer/traf_kerberos.py       | 116 ++++
 install/python-installer/traf_ldap.py           |  73 +++
 install/python-installer/traf_package.py        |  47 ++
 install/python-installer/traf_setup.py          | 125 ++++
 install/python-installer/traf_sqconfig.py       |  74 +++
 install/python-installer/traf_start.py          |  70 ++
 install/python-installer/traf_user.py           | 145 +++++
 install/python-installer/version.json           |  10 +
 install/python-installer/wrapper.py             | 301 +++++++++
 27 files changed, 3884 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/904b53df/install/python-installer/README.md
----------------------------------------------------------------------
diff --git a/install/python-installer/README.md b/install/python-installer/README.md
new file mode 100644
index 0000000..e60a331
--- /dev/null
+++ b/install/python-installer/README.md
@@ -0,0 +1,21 @@
+# Apache Trafodion Python Installer
+
+## Prerequisite:
+
+- CDH/HDP is installed on Trafodion nodes with web UI enabled, or Apache Hadoop, HBase is installed on the same directory on all nodes
+- /etc/hosts contains hostname info for all Trafodion nodes on **installer node**
+- python version 2.6/2.7, and python library `httplib2`, `prettytable`
+- Trafodion server package file is stored on **installer node**
+- Passwordless SSH login, two ways:
+ - Set SSH key pairs against **installer node** and Trafodion nodes
+ - Install `sshpass` tool on **installer node**, then input the SSH password during installation using `--enable-pwd` option
+
+> **installer node** can be any nodes as long as it can ssh to Trafodion nodes, it also can be one of the Trafodion nodes
+
+## How to use:
+- Two ways:
+ - Simply invoke `./db_install.py` to start the installation in guided mode
+ - Copy the `db_config_default` file to `your_db_config` and modify it, then invoke `./db_config.py --config-file your_db_config` to start installation in config mode
+- For a quick install with default settings, you only need to put Trafodion package file in installer's directory, provide CDH/HDP web URL in `your_db_config` file and then it's ready to go!
+- Use `./db_install.py --help` for more options
+- Invoke `./discovery.py` to get the system basic info on all nodes

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/904b53df/install/python-installer/apache_mods.py
----------------------------------------------------------------------
diff --git a/install/python-installer/apache_mods.py b/install/python-installer/apache_mods.py
new file mode 100755
index 0000000..17a54d5
--- /dev/null
+++ b/install/python-installer/apache_mods.py
@@ -0,0 +1,72 @@
+#!/usr/bin/env python
+
+# @@@ START COPYRIGHT @@@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# @@@ END COPYRIGHT @@@
+
+### this script should be run on all nodes with sudo user ###
+
+import sys
+import json
+import socket
+from common import MODCFG_FILE, ParseJson, ParseXML, err, run_cmd
+
+def run():
+    dbcfgs = json.loads(dbcfgs_json)
+    if 'APACHE' in dbcfgs['distro']:
+        modcfgs = ParseJson(MODCFG_FILE).load()
+        MOD_CFGS = modcfgs['MOD_CFGS']
+
+        hdfs_xml_file = dbcfgs['hdfs_xml_file']
+        hbase_xml_file = dbcfgs['hbase_xml_file']
+
+        hbasexml = ParseXML(hbase_xml_file)
+        for key, value in MOD_CFGS['hbase-site'].items():
+            hbasexml.add_property(key, value)
+        hbasexml.write_xml()
+
+        hdfsxml = ParseXML(hdfs_xml_file)
+        for key, value in MOD_CFGS['hdfs-site'].items():
+            hdfsxml.add_property(key, value)
+        hdfsxml.write_xml()
+
+        print 'Apache Hadoop modification completed'
+        first_node = dbcfgs['first_rsnode']
+        local_host = socket.gethostname()
+        if first_node in local_host:
+            hadoop_home = dbcfgs['hadoop_home']
+            hbase_home = dbcfgs['hbase_home']
+            # stop
+            run_cmd(hbase_home + '/bin/stop-hbase.sh')
+            run_cmd(hadoop_home + '/sbin/stop-dfs.sh')
+            # start
+            run_cmd(hadoop_home + '/sbin/start-dfs.sh')
+            run_cmd(hbase_home + '/bin/start-hbase.sh')
+
+            print 'Apache Hadoop restart completed'
+    else:
+        print 'no apache distribution found, skipping'
+
+# main
+try:
+    dbcfgs_json = sys.argv[1]
+except IndexError:
+    err('No db config found')
+run()

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/904b53df/install/python-installer/bashrc.template
----------------------------------------------------------------------
diff --git a/install/python-installer/bashrc.template b/install/python-installer/bashrc.template
new file mode 100644
index 0000000..ceed82a
--- /dev/null
+++ b/install/python-installer/bashrc.template
@@ -0,0 +1,78 @@
+#!/bin/sh
+
+# @@@ START COPYRIGHT @@@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# @@@ END COPYRIGHT @@@
+
+
+# This is the .bashrc for the Trafodion environment
+#
+#-------------------------------------------
+# Execute the system's default .bashrc first
+#-------------------------------------------
+if [ -f /etc/bashrc ]; then
+	. /etc/bashrc
+fi
+
+cd $HOME
+
+#-------------------------------------------
+# full path of your Trafodion installation
+#-------------------------------------------
+export JAVA_HOME="{{ java_home }}"
+export SQ_HOME="{{ sq_home }}"
+export MY_SQROOT=$SQ_HOME
+
+#-------------------------------------------
+# other env vars needed by Trafodion
+#-------------------------------------------
+
+# These env vars define all nodes in the cluster
+export NODE_LIST="{{ node_list }}"
+export MY_NODES="{{ my_nodes }}"
+export node_count="{{ node_count }}"
+export HADOOP_TYPE="{{ hadoop_type }}"
+
+#-------------------------------------------
+# Execute the sqenv.sh script if it exists.
+#-------------------------------------------
+PATH=".:$PATH"
+if [ -f $MY_SQROOT/sqenv.sh ]; then
+	pushd . >/dev/null
+	cd $MY_SQROOT
+	source ./sqenv.sh
+	popd >/dev/null
+	export MANPATH=$MANPATH:$MPI_ROOT/share/man
+fi
+
+#-------------------------------------------
+# additional settings for Trafodion environment
+#-------------------------------------------
+ETC_SECURITY_MSG="***ERROR: To fix this please configure /etc/security/limits.conf properly on $HOSTNAME."
+
+# set core file size
+ulimit -c unlimited
+
+# set max open files
+ulimit -n 32768
+if [ $? -ne 0 ]; then
+    echo "***ERROR: Unable to set max open files. Current value $(ulimit -n)"
+    echo $ETC_SECURITY_MSG
+fi

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/904b53df/install/python-installer/common.py
----------------------------------------------------------------------
diff --git a/install/python-installer/common.py b/install/python-installer/common.py
new file mode 100644
index 0000000..3713da8
--- /dev/null
+++ b/install/python-installer/common.py
@@ -0,0 +1,479 @@
+#!/usr/bin/env python
+
+# @@@ START COPYRIGHT @@@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# @@@ END COPYRIGHT @@@
+
+### The common functions ###
+
+import os
+import pty
+import sys
+import json
+import re
+import time
+import base64
+import subprocess
+import logging
+try:
+    import xml.etree.cElementTree as ET
+except ImportError:
+    import xml.etree.ElementTree as ET
+from ConfigParser import ConfigParser
+from collections import defaultdict
+
+__VERSION__ = 'v1.0.0'
+INSTALLER_LOC = sys.path[0]
+
+USER_PROMPT_FILE = INSTALLER_LOC + '/prompt.json'
+SCRCFG_FILE = INSTALLER_LOC + '/script.json'
+VERSION_FILE = INSTALLER_LOC + '/version.json'
+MODCFG_FILE = INSTALLER_LOC + '/mod_cfgs.json'
+
+DBCFG_FILE = INSTALLER_LOC + '/db_config'
+DBCFG_TMP_FILE = INSTALLER_LOC + '/.db_config_temp'
+
+TMP_DIR = '/tmp/.install'
+MARK = '[ERR]'
+
+def version():
+    print 'Installer version: %s' % __VERSION__
+    exit(0)
+
+def ok(msg):
+    print '\n\33[32m***[OK]: %s \33[0m' % msg
+
+def info(msg):
+    print '\n\33[33m***[INFO]: %s \33[0m' % msg
+
+def err_m(msg):
+    """ used by main script """
+    sys.stderr.write('\n\33[31m***[ERROR]: %s \33[0m\n' % msg)
+    sys.exit(1)
+
+def err(msg):
+    """ used by sub script """
+    sys.stderr.write(MARK + msg)
+    sys.exit(1)
+
+def get_logger(log_file):
+
+    log_dir = os.path.dirname(log_file)
+    if not os.path.exists(log_dir): os.mkdir(log_dir)
+
+    logger = logging.getLogger()
+    logger.setLevel(logging.INFO)
+
+    formatter = logging.Formatter('[%(asctime)s %(levelname)s]: %(message)s')
+
+    fh = logging.FileHandler(log_file)
+    fh.setFormatter(formatter)
+
+    logger.addHandler(fh)
+
+    return logger
+
+def run_cmd(cmd):
+    """ check command return value and return stdout """
+    p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
+    stdout, stderr = p.communicate()
+    if p.returncode != 0:
+        err('Failed to run command %s: %s' % (cmd, stderr))
+    return stdout.strip()
+
+def run_cmd_as_user(user, cmd):
+    return run_cmd('sudo su - %s -c \'%s\'' % (user, cmd))
+
+def cmd_output(cmd):
+    """ return command output but not check return value """
+    p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
+    stdout, stderr = p.communicate()
+
+    return stdout.strip() if stdout else stderr
+
+def mod_file(template_file, change_items):
+    """
+        @params: change_items: a dict includes:
+        {regular_expression : replace_string}
+    """
+    try:
+        with open(template_file, 'r') as f:
+            lines = f.read()
+    except IOError:
+        err('Failed to open file %s to modify' % template_file)
+
+    for regexp, replace in change_items.iteritems():
+        lines = re.sub(regexp, replace, lines)
+
+    with open(template_file, 'w') as f:
+        f.write(lines)
+
+def append_file(template_file, string, position=''):
+    try:
+        with open(template_file, 'r') as f:
+            lines = f.readlines()
+        pos = 0
+        if position:
+            for index, line in enumerate(lines):
+                if position in line:
+                    pos = index + 1
+
+        if pos == 0: pos = len(lines)
+        newlines = lines[:pos] + [string + '\n'] + lines[pos:]
+        if not string in lines:
+            with open(template_file, 'w') as f:
+                f.writelines(newlines)
+    except IOError:
+        err('Failed to open file %s to append' % template_file)
+
+
+def write_file(template_file, string):
+    try:
+        with open(template_file, 'w') as f:
+            f.write(string)
+    except IOError:
+        err('Failed to open file %s to write' % template_file)
+
+
+class Version(object):
+    def __init__(self):
+        self.support_ver = ParseJson(VERSION_FILE).load()
+
+    def get_version(self, component):
+        if self.support_ver[component] == '':
+            err('Failed to get version info for "%s" from config file' % component)
+
+        return self.support_ver[component]
+
+class Remote(object):
+    """
+        copy files to/fetch files from remote host using ssh
+        can also use paramiko, but it's not a build-in module
+    """
+
+    def __init__(self, host, user='', pwd=''):
+        self.host = host
+        self.user = user
+        self.rc = 0
+        self.pwd = pwd
+        self.sshpass = self._sshpass_available()
+
+    @staticmethod
+    def _sshpass_available():
+        sshpass_available = True
+        try:
+            p = subprocess.Popen(['sshpass'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+            p.communicate()
+        except OSError:
+            sshpass_available = False
+
+        return sshpass_available
+
+    def _commands(self, method):
+        cmd = []
+        if self.sshpass and self.pwd: cmd = ['sshpass', '-p', self.pwd]
+        cmd += [method]
+        if not (self.sshpass and self.pwd): cmd += ['-oPasswordAuthentication=no']
+        return cmd
+
+    def _execute(self, cmd, verbose=False, shell=False):
+        try:
+            if verbose: print 'cmd:', cmd
+
+            master, slave = pty.openpty()
+            if shell:
+                p = subprocess.Popen(cmd, stdin=slave, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
+            else:
+                p = subprocess.Popen(cmd, stdin=slave, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+
+            self.stdout, self.stderr = p.communicate()
+            if p.returncode:
+                self.rc = p.returncode
+                # 'ssh -tt' will overwrite stderr, so manually handle it
+                if MARK in self.stdout:
+                    self.stdout, self.stderr = self.stdout.split(MARK)
+                else:
+                    self.stderr = self.stdout
+        except Exception as e:
+            err_m('Failed to run commands on remote host: %s' % e)
+
+    def execute(self, user_cmd):
+        cmd = self._commands('ssh')
+        if self.user:
+            cmd += ['%s@%s' % (self.user, self.host)]
+        else:
+            cmd += [self.host]
+
+        cmd += user_cmd.split()
+        self._execute(cmd)
+
+    def copy(self, files, remote_folder='.'):
+        """ copy file to user's home folder """
+        for f in files:
+            if not os.path.exists(f):
+                err_m('Copy file error: %s doesn\'t exist' % f)
+
+        cmd = self._commands('scp')
+        cmd += ['-r']
+        cmd += files # files should be full path
+        if self.user:
+            cmd += ['%s@%s:%s/' % (self.user, self.host, remote_folder)]
+        else:
+            cmd += ['%s:%s/' % (self.host, remote_folder)]
+
+        self._execute(cmd)
+        if self.rc != 0: err('Failed to copy files to remote nodes')
+
+    def fetch(self, files, local_folder='.'):
+        """ fetch file from user's home folder """
+        cmd = self._commands('scp')
+        cmd += ['-r']
+        if self.user:
+            cmd += ['%s@%s:~/{%s}' % (self.user, self.host, ','.join(files))]
+        else:
+            cmd += ['%s:~/{%s}' % (self.host, ','.join(files))]
+        cmd += [local_folder]
+
+        self._execute(cmd)
+        if self.rc != 0: err('Failed to fetch files from remote nodes')
+
+
+class ParseHttp(object):
+    def __init__(self, user, passwd, json_type=True):
+        # httplib2 is not installed by default
+        try:
+            import httplib2
+        except ImportError:
+            err_m('Python module httplib2 is not found. Install python-httplib2 first.')
+
+        self.user = user
+        self.passwd = passwd
+        self.h = httplib2.Http(disable_ssl_certificate_validation=True)
+        self.h.add_credentials(self.user, self.passwd)
+        self.headers = {}
+        self.headers['X-Requested-By'] = 'trafodion'
+        if json_type:
+            self.headers['Content-Type'] = 'application/json'
+        self.headers['Authorization'] = 'Basic %s' % (base64.b64encode('%s:%s' % (self.user, self.passwd)))
+
+    def _request(self, url, method, body=None):
+        try:
+            resp, content = self.h.request(url, method, headers=self.headers, body=body)
+            # return code is not 2xx
+            if not 200 <= resp.status < 300:
+                err_m('Error return code {0} when {1}ting configs'.format(resp.status, method.lower()))
+            return content
+        except Exception as exc:
+            err_m('Error with {0}ting configs using URL {1}. Reason: {2}'.format(method.lower(), url, exc))
+
+    def get(self, url):
+        try:
+            return defaultdict(str, json.loads(self._request(url, 'GET')))
+        except ValueError:
+            err_m('Failed to get data from URL, check password if URL requires authentication')
+
+    def put(self, url, config):
+        if not isinstance(config, dict): err_m('Wrong HTTP PUT parameter, should be a dict')
+        result = self._request(url, 'PUT', body=json.dumps(config))
+        if result: return defaultdict(str, json.loads(result))
+
+    def post(self, url):
+        try:
+            return defaultdict(str, json.loads(self._request(url, 'POST')))
+        except ValueError:
+            err_m('Failed to send command to URL')
+
+
+class ParseXML(object):
+    """ handle *-site.xml with format
+        <property><name></name><value></value></proerty>
+    """
+    def __init__(self, xml_file):
+        self.__xml_file = xml_file
+        if not os.path.exists(self.__xml_file): err_m('Cannot find xml file %s' % self.__xml_file)
+        try:
+            self._tree = ET.parse(self.__xml_file)
+        except Exception as e:
+            err_m('failed to parsing xml: %s' % e)
+
+        self._root = self._tree.getroot()
+        self._properties = self._root.findall('property')
+        # name, value list
+        self._nvlist = [[elem.text for elem in p] for p in self._properties]
+
+    def __indent(self, elem):
+        """Return a pretty-printed XML string for the Element."""
+        if len(elem):
+            if not elem.text: elem.text = '\n' + '  '
+            if not elem.tail: elem.tail = '\n'
+            for subelem in elem:
+                self.__indent(subelem)
+        else:
+            if not elem.tail: elem.tail = '\n' + '  '
+
+    def get_property(self, name):
+        try:
+            return [x[1] for x in self._nvlist if x[0] == name][0]
+        except:
+            return ''
+
+    def rm_property(self, name):
+        for p in self._properties:
+            if p[0].text == name:
+                self._root.remove(p)
+
+    def add_property(self, name, value):
+        # don't add property if already exists
+        if self.get_property(name): return
+
+        elem_p = ET.Element('property')
+        elem_name = ET.Element('name')
+        elem_value = ET.Element('value')
+
+        elem_name.text = name
+        elem_value.text = value
+        elem_p.append(elem_name)
+        elem_p.append(elem_value)
+
+        self._nvlist.append([name, value])
+        self._root.append(elem_p)
+
+    def write_xml(self):
+        self.__indent(self._root)
+        self._tree.write(self.__xml_file)
+
+    def print_xml(self):
+        for name, value in self._nvlist:
+            print name, value
+
+class ParseJson(object):
+    def __init__(self, js_file):
+        self.__js_file = js_file
+
+    def load(self):
+        """ load json file to a dict """
+        if not os.path.exists(self.__js_file): err_m('Cannot find json file %s' % self.__js_file)
+        with open(self.__js_file, 'r') as f:
+            tmparray = f.readlines()
+        content = ''
+        for t in tmparray:
+            content += t
+
+        try:
+            return defaultdict(str, json.loads(content))
+        except ValueError:
+            err_m('No json format found in config file %s' % self.__js_file)
+
+    def save(self, dic):
+        """ save dict to json file with pretty format """
+        with open(self.__js_file, 'w') as f:
+            f.write(json.dumps(dic, indent=4))
+        return 0
+
+
+class ParseInI(object):
+    def __init__(self, ini_file):
+        self.__ini_file = ini_file
+        self.section = 'def'
+
+    def load(self):
+        """ load content from ini file and return a dict """
+        if not os.path.exists(self.__ini_file):
+            err_m('Cannot find ini file %s' % self.__ini_file)
+
+        cfgs = {}
+        cf = ConfigParser()
+        cf.read(self.__ini_file)
+
+        if not cf.has_section(self.section):
+            err_m('Cannot find the default section [%s]' % self.section)
+
+        for cfg in cf.items(self.section):
+            cfgs[cfg[0]] = cfg[1]
+
+        return defaultdict(str, cfgs)
+
+    def save(self, dic):
+        """ save a dict as an ini file """
+        cf = ConfigParser()
+        cf.add_section(self.section)
+        for key, value in dic.iteritems():
+            cf.set(self.section, key, value)
+
+        with open(self.__ini_file, 'w') as f:
+            cf.write(f)
+
+def http_start(repo_dir, repo_port):
+    info('Starting temporary python http server')
+    os.system("cd %s; python -m SimpleHTTPServer %s > /dev/null 2>&1 &" % (repo_dir, repo_port))
+
+def http_stop():
+    #info('Stopping temporary python http server')
+    os.system("ps -ef|grep SimpleHTTPServer |grep -v grep | awk '{print $2}' |xargs kill -9 >/dev/null 2>&1")
+
+def format_output(text):
+    num = len(text) + 4
+    print '*' * num
+    print '  ' + text
+    print '*' * num
+
+def expNumRe(text):
+    """
+    expand numeric regular expression to list
+    e.g. 'n[01-03],n1[0-1]': ['n01','n02','n03','n10','n11']
+    e.g. 'n[09-11].com': ['n09.com','n10.com','n11.com']
+    """
+    explist = []
+    for regex in text.split(','):
+        regex = regex.strip()
+        r = re.match(r'(.*)\[(\d+)-(\d+)\](.*)', regex)
+        if r:
+            h = r.group(1)
+            d1 = r.group(2)
+            d2 = r.group(3)
+            t = r.group(4)
+
+            convert = lambda d: str(('%0' + str(min(len(d1), len(d2))) + 'd') % d)
+            if d1 > d2: d1, d2 = d2, d1
+            explist.extend([h + convert(c) + t for c in range(int(d1), int(d2)+1)])
+
+        else:
+            # keep original value if not matched
+            explist.append(regex)
+
+    return explist
+
+def time_elapse(func):
+    """ time elapse decorator """
+    def wrapper(*args, **kwargs):
+        start_time = time.time()
+        output = func(*args, **kwargs)
+        end_time = time.time()
+        seconds = end_time - start_time
+        hours = seconds / 3600
+        seconds = seconds % 3600
+        minutes = seconds / 60
+        seconds = seconds % 60
+        print '\nTime Cost: %d hour(s) %d minute(s) %d second(s)' % (hours, minutes, seconds)
+        return output
+    return wrapper
+
+if __name__ == '__main__':
+    exit(0)

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/904b53df/install/python-installer/copy_files.py
----------------------------------------------------------------------
diff --git a/install/python-installer/copy_files.py b/install/python-installer/copy_files.py
new file mode 100755
index 0000000..73d0237
--- /dev/null
+++ b/install/python-installer/copy_files.py
@@ -0,0 +1,64 @@
+#!/usr/bin/env python
+
+# @@@ START COPYRIGHT @@@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# @@@ END COPYRIGHT @@@
+
+### this script should be run on local node ###
+
+import sys
+import json
+from threading import Thread
+from common import Remote, run_cmd, err
+
+def run(pwd):
+    """ gen ssh key on local and copy to all nodes
+        copy traf package file from local to all nodes
+    """
+    dbcfgs = json.loads(dbcfgs_json)
+    hosts = dbcfgs['node_list'].split(',')
+    traf_package = dbcfgs['traf_package']
+
+    key_file = '/tmp/id_rsa'
+    run_cmd('sudo rm -rf %s*' % key_file)
+    run_cmd('sudo echo -e "y" | ssh-keygen -t rsa -N "" -f %s' % key_file)
+
+    files = [key_file, key_file+'.pub', traf_package]
+
+    remote_insts = [Remote(h, pwd=pwd) for h in hosts]
+    threads = [Thread(target=r.copy, args=(files, '/tmp')) for r in remote_insts]
+    for thread in threads: thread.start()
+    for thread in threads: thread.join()
+    for r in remote_insts:
+        if r.rc != 0: err('Failed to copy files to %s' % r.host)
+
+
+# main
+try:
+    dbcfgs_json = sys.argv[1]
+except IndexError:
+    err('No db config found')
+
+try:
+    pwd = sys.argv[2]
+except IndexError:
+    pwd = ''
+
+run(pwd)

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/904b53df/install/python-installer/db_config_default
----------------------------------------------------------------------
diff --git a/install/python-installer/db_config_default b/install/python-installer/db_config_default
new file mode 100644
index 0000000..c4a2c4e
--- /dev/null
+++ b/install/python-installer/db_config_default
@@ -0,0 +1,120 @@
+# @@@ START COPYRIGHT @@@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# @@@ END COPYRIGHT @@@
+
+[def]
+# NOTICE: if you are using CDH/HDP hadoop distro,
+# you can only specifiy management url address for a quick install
+
+##################################
+# Common Settings
+##################################
+
+# trafodion username and password
+traf_user = trafodion
+traf_pwd = traf123
+
+# trafodion used java(JDK) path on trafodion nodes
+# if not provided, installer will auto detect installed JDK
+java_home =
+
+# cloudera/ambari management url(i.e. http://192.168.0.1:7180 or just 192.168.0.1)
+# if 'http' or 'https' prefix is not provided, the default one is 'http'
+# if port is not provided, the default port is cloudera port '7180'
+mgr_url =
+# user name for cloudera/ambari management url
+mgr_user = admin
+# password for cloudera/ambari management url
+mgr_pwd = admin
+# set the cluster number if multiple clusters managed by one Cloudera manager
+# ignore it if only one cluster being managed
+cluster_no = 1
+
+# trafodion tar package file location
+# no need to provide it if the package can be found in current installer's directory
+traf_package =
+
+# the number of dcs servers on each node
+dcs_cnt_per_node = 4
+
+# scratch file location, seperated by comma if more than one
+scratch_locs = $MY_SQROOT/tmp
+
+# start trafodion instance after installation completed
+traf_start = Y
+
+
+##################################
+# DCS HA configuration
+##################################
+
+# set it to 'Y' if enable DCS HA
+dcs_ha = N
+# if HA is enabled, provide floating ip, network interface and the hostname of backup dcs master nodes
+dcs_floating_ip =
+# network interface that dcs used
+dcs_interface =
+# backup dcs master nodes, seperated by comma if more than one
+dcs_backup_nodes =
+
+
+##################################
+# Offline installation setting
+##################################
+
+# set offline mode to Y if no internet connection
+offline_mode = N
+# if offline mode is set, you must provide a local repository directory with all needed RPMs
+local_repo_dir =
+
+
+##################################
+# LDAP security configuration
+##################################
+
+# set it to 'Y' if enable LDAP security
+ldap_security = N
+# LDAP user name and password to be assigned as DB admin privilege
+db_admin_user = admin
+db_admin_pwd = traf123
+# LDAP user to be assigned DB root privileges (DB__ROOT)
+db_root_user = trafodion
+# if LDAP security is enabled, provide the following items
+ldap_hosts =
+# 389 for no encryption or TLS, 636 for SSL
+ldap_port = 389
+ldap_identifiers =
+ldap_encrypt = 0
+ldap_certpath =
+# provide if have
+ladp_user =
+ladp_pwd =
+
+##################################
+# Kerberos security configuration
+##################################
+# if kerberos is enabled in your hadoop system, provide below info
+
+# KDC server address
+kdc_server =
+# include realm, i.e. admin/admin@EXAMPLE.COM
+admin_principal =
+# admin password for admin principal, it is used to create trafodion user's principal and keytab
+kdcadmin_pwd =

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/904b53df/install/python-installer/db_install.py
----------------------------------------------------------------------
diff --git a/install/python-installer/db_install.py b/install/python-installer/db_install.py
new file mode 100755
index 0000000..9d88f8d
--- /dev/null
+++ b/install/python-installer/db_install.py
@@ -0,0 +1,643 @@
+#!/usr/bin/env python
+# -*- coding: utf8 -*-
+
+# @@@ START COPYRIGHT @@@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# @@@ END COPYRIGHT @@@
+
+import os
+import re
+import socket
+import json
+import getpass
+import time
+import sys
+reload(sys)
+sys.setdefaultencoding("utf-8")
+from optparse import OptionParser
+from glob import glob
+from collections import defaultdict
+import wrapper
+try:
+    from prettytable import PrettyTable
+except ImportError:
+    print 'Python module prettytable is not found. Install python-prettytable first.'
+    exit(1)
+from common import *
+
+# init global cfgs for user input
+cfgs = defaultdict(str)
+
+class HadoopDiscover(object):
+    """ discover for hadoop related info """
+    def __init__(self, user, pwd, url, cluster_name):
+        self.rsnodes = []
+        self.users = {}
+        self.cluster_name = cluster_name
+        self.hg = ParseHttp(user, pwd)
+        self.v1_url = '%s/api/v1/clusters' % url
+        self.v6_url = '%s/api/v6/clusters' % url
+        self.cluster_url = '%s/%s' % (self.v1_url, cluster_name.replace(' ', '%20'))
+        self._get_distro()
+        self._check_version()
+
+    def _get_distro(self):
+        content = self.hg.get(self.v1_url)
+
+        if content['items'][0].has_key('name'):
+            # use v6 rest api for CDH to get fullversion
+            content = self.hg.get(self.v6_url)
+
+        # loop all managed clusters
+        for cluster in content['items']:
+            try:
+                # HDP
+                self.distro = cluster['Clusters']['version']
+            except KeyError:
+                # CDH
+                try:
+                    if self.cluster_name == cluster['displayName']:
+                        self.distro = 'CDH' + cluster['fullVersion']
+                        break
+                except KeyError:
+                    log_err('Failed to get hadoop distribution info from management url')
+
+    def get_hdfs_srvname(self):
+        return self._get_service_name('HDFS')
+
+    def get_hbase_srvname(self):
+        return self._get_service_name('HBASE')
+
+    def get_zookeeper_srvname(self):
+        return self._get_service_name('ZOOKEEPER')
+
+    def _get_service_name(self, service):
+        # CDH uses different service names in multiple clusters
+        if 'CDH' in self.distro:
+            services_cfgs = self.hg.get(self.cluster_url +'/services')
+            for item in services_cfgs['items']:
+                if item['type'] == service:
+                    return item['name']
+        else:
+            return service.lower()
+
+    def _check_version(self):
+        version = Version()
+        if 'CDH' in self.distro: version_list = version.get_version('cdh')
+        if 'HDP' in self.distro: version_list = version.get_version('hdp')
+
+        has_version = 0
+        for ver in version_list:
+            if ver in self.distro: has_version = 1
+
+        if not has_version:
+            log_err('Sorry, currently EsgynDB doesn\'t support %s version' % self.distro)
+
+    def get_hadoop_users(self):
+        if 'CDH' in self.distro:
+            self._get_cdh_users()
+        elif 'HDP' in self.distro or 'BigInsights' in self.distro:
+            self._get_hdp_users()
+        return self.users
+
+    def _get_hdp_users(self):
+        desired_cfg = self.hg.get('%s/?fields=Clusters/desired_configs' % (self.cluster_url))
+        config_type = {'hbase-env':'hbase_user', 'hadoop-env':'hdfs_user'}
+        for key, value in config_type.items():
+            desired_tag = desired_cfg['Clusters']['desired_configs'][key]['tag']
+            current_cfg = self.hg.get('%s/configurations?type=%s&tag=%s' % (self.cluster_url, key, desired_tag))
+            self.users[value] = current_cfg['items'][0]['properties'][value]
+
+    def _get_cdh_users(self):
+        def _get_username(service_name, hadoop_type):
+            cfg = self.hg.get('%s/services/%s/config' % (self.cluster_url, service_name))
+            if cfg.has_key('items'):
+                for item in cfg['items']:
+                    if item['name'] == 'process_username':
+                        return item['value']
+            return hadoop_type
+
+        hdfs_user = _get_username(self.get_hdfs_srvname(), 'hdfs')
+        hbase_user = _get_username(self.get_hbase_srvname(), 'hbase')
+
+        self.users = {'hbase_user':hbase_user, 'hdfs_user':hdfs_user}
+
+    def get_rsnodes(self):
+        if 'CDH' in self.distro:
+            self._get_rsnodes_cdh()
+        elif 'HDP' in self.distro or 'BigInsights' in self.distro:
+            self._get_rsnodes_hdp()
+
+        self.rsnodes.sort()
+        # use short hostname
+        try:
+            self.rsnodes = [re.match(r'([\w\-]+).*', node).group(1) for node in self.rsnodes]
+
+        except AttributeError:
+            pass
+        return self.rsnodes
+
+    def _get_rsnodes_cdh(self):
+        """ get list of HBase RegionServer nodes in CDH """
+        cm = self.hg.get('%s/api/v6/cm/deployment' % cfgs['mgr_url'])
+
+        hostids = []
+        for c in cm['clusters']:
+            if c['displayName'] == self.cluster_name:
+                for s in c['services']:
+                    if s['type'] == 'HBASE':
+                        for r in s['roles']:
+                            if r['type'] == 'REGIONSERVER': hostids.append(r['hostRef']['hostId'])
+        for i in hostids:
+            for h in cm['hosts']:
+                if i == h['hostId']: self.rsnodes.append(h['hostname'])
+
+    def _get_rsnodes_hdp(self):
+        """ get list of HBase RegionServer nodes in HDP """
+        hdp = self.hg.get('%s/services/HBASE/components/HBASE_REGIONSERVER' % self.cluster_url)
+        self.rsnodes = [c['HostRoles']['host_name'] for c in hdp['host_components']]
+
+
+class UserInput(object):
+    def __init__(self, options, pwd):
+        self.in_data = ParseJson(USER_PROMPT_FILE).load()
+        self.pwd = pwd
+
+    def _basic_check(self, name, answer):
+        isYN = self.in_data[name].has_key('isYN')
+        isdigit = self.in_data[name].has_key('isdigit')
+        isexist = self.in_data[name].has_key('isexist')
+        isremote_exist = self.in_data[name].has_key('isremote_exist')
+        isIP = self.in_data[name].has_key('isIP')
+        isuser = self.in_data[name].has_key('isuser')
+
+        # check answer value basicly
+        answer = answer.rstrip()
+        if answer:
+            if isYN:
+                answer = answer.upper()
+                if answer != 'Y' and answer != 'N':
+                    log_err('Invalid parameter for %s, should be \'Y|y|N|n\'' % name)
+            elif isdigit:
+                if not answer.isdigit():
+                    log_err('Invalid parameter for %s, should be a number' % name)
+            elif isexist:
+                if not os.path.exists(answer):
+                    log_err('%s path \'%s\' doesn\'t exist' % (name, answer))
+            elif isremote_exist:
+                hosts = cfgs['node_list'].split(',')
+                remotes = [Remote(host, pwd=self.pwd) for host in hosts]
+
+                nodes = ''
+                for remote in remotes:
+                    # check if directory exists on remote host
+                    remote.execute('ls %s 2>&1 >/dev/null' % answer)
+                    if remote.rc != 0:
+                        nodes += ' ' + remote.host
+                if nodes:
+                    log_err('%s path \'%s\' doesn\'t exist on node(s) \'%s\'' % (name, answer, nodes))
+            elif isIP:
+                try:
+                    socket.inet_pton(socket.AF_INET, answer)
+                except:
+                    log_err('Invalid IP address \'%s\'' % answer)
+            elif isuser:
+                if re.match(r'\w+', answer).group() != answer:
+                    log_err('Invalid user name \'%s\'' % answer)
+
+        else:
+            log_err('Empty value for \'%s\'' % name)
+
+    def _handle_prompt(self, name, user_defined):
+        prompt = self.in_data[name]['prompt']
+        default = user_defined
+
+        if (not default) and self.in_data[name].has_key('default'):
+            default = self.in_data[name]['default']
+
+        ispasswd = self.in_data[name].has_key('ispasswd')
+        isYN = self.in_data[name].has_key('isYN')
+
+        # no default value for password
+        if ispasswd: default = ''
+
+        if isYN:
+            prompt = prompt + ' (Y/N) '
+
+        if default:
+            prompt = prompt + ' [' + default + ']: '
+        else:
+            prompt = prompt + ': '
+
+        # no default value for password
+        if ispasswd:
+            orig = getpass.getpass(prompt)
+            confirm = getpass.getpass('Confirm ' + prompt)
+            if orig == confirm:
+                answer = confirm
+            else:
+                log_err('Password mismatch')
+        else:
+            try:
+                answer = raw_input(prompt)
+            except UnicodeEncodeError:
+                log_err('Character Encode error, check user input')
+            if not answer and default: answer = default
+
+        return answer
+
+    def get_input(self, name, user_defined='', prompt_mode=True):
+        if self.in_data.has_key(name):
+            if prompt_mode:
+                # save configs to global dict
+                cfgs[name] = self._handle_prompt(name, user_defined)
+
+            # check basic values from global configs
+            self._basic_check(name, cfgs[name])
+        else:
+            # should not go to here, just in case
+            log_err('Invalid prompt')
+
+    def get_confirm(self):
+        answer = raw_input('Confirm result (Y/N) [N]: ')
+        if not answer: answer = 'N'
+
+        answer = answer.upper()
+        if answer != 'Y' and answer != 'N':
+            log_err('Invalid parameter, should be \'Y|y|N|n\'')
+        return answer
+
+    def notify_user(self):
+        """ show the final configs to user """
+        format_output('Final Configs')
+        title = ['config type', 'value']
+        pt = PrettyTable(title)
+        for item in title:
+            pt.align[item] = 'l'
+
+        for key, value in sorted(cfgs.items()):
+            if self.in_data.has_key(key) and value:
+                if self.in_data[key].has_key('ispasswd'): continue
+                pt.add_row([key, value])
+        print pt
+        confirm = self.get_confirm()
+        if confirm != 'Y':
+            if os.path.exists(DBCFG_FILE): os.remove(DBCFG_FILE)
+            log_err('User quit')
+
+
+def log_err(errtext):
+    # save tmp config files
+    tp = ParseInI(DBCFG_TMP_FILE)
+    tp.save(cfgs)
+    err_m(errtext)
+
+
+def user_input(options, prompt_mode=True, pwd=''):
+    """ get user's input and check input value """
+    global cfgs
+
+    apache = True if hasattr(options, 'apache') and options.apache else False
+    offline = True if hasattr(options, 'offline') and options.offline else False
+    silent = True if hasattr(options, 'silent') and options.silent else False
+
+    # load from temp config file if in prompt mode
+    if os.path.exists(DBCFG_TMP_FILE) and prompt_mode == True:
+        tp = ParseInI(DBCFG_TMP_FILE)
+        cfgs = tp.load()
+
+    u = UserInput(options, pwd)
+    g = lambda n: u.get_input(n, cfgs[n], prompt_mode=prompt_mode)
+
+    ### begin user input ###
+    if apache:
+        g('node_list')
+        node_lists = expNumRe(cfgs['node_list'])
+
+        # check if node list is expanded successfully
+        if len([1 for node in node_lists if '[' in node]):
+            log_err('Failed to expand node list, please check your input.')
+        cfgs['node_list'] = ','.join(node_lists)
+        g('hadoop_home')
+        g('hbase_home')
+        g('hive_home')
+        g('hdfs_user')
+        g('hbase_user')
+        g('first_rsnode')
+        cfgs['distro'] = 'APACHE'
+    else:
+        g('mgr_url')
+        if not ('http:' in cfgs['mgr_url'] or 'https:' in cfgs['mgr_url']):
+            cfgs['mgr_url'] = 'http://' + cfgs['mgr_url']
+
+        # set cloudera default port 7180 if not provided by user
+        if not re.search(r':\d+', cfgs['mgr_url']):
+            cfgs['mgr_url'] += ':7180'
+
+        g('mgr_user')
+        g('mgr_pwd')
+
+        validate_url_v1 = '%s/api/v1/clusters' % cfgs['mgr_url']
+        content = ParseHttp(cfgs['mgr_user'], cfgs['mgr_pwd']).get(validate_url_v1)
+
+        # currently only CDH support multiple clusters
+        # so if condition is true, it must be CDH cluster
+        if len(content['items']) > 1:
+            cluster_names = []
+            # loop all managed clusters
+            for cluster in content['items']:
+                cluster_names.append(cluster['name'])
+
+            for index, name in enumerate(cluster_names):
+                print str(index + 1) + '. ' + name
+            g('cluster_no')
+            c_index = int(cfgs['cluster_no']) - 1
+            if c_index < 0 or c_index >= len(cluster_names):
+                log_err('Incorrect number')
+            cluster_name = cluster_names[int(c_index)]
+        else:
+            try:
+                cluster_name = content['items'][0]['name']
+            except (IndexError, KeyError):
+                cluster_name = content['items'][0]['Clusters']['cluster_name']
+
+        discover = HadoopDiscover(cfgs['mgr_user'], cfgs['mgr_pwd'], cfgs['mgr_url'], cluster_name)
+        rsnodes = discover.get_rsnodes()
+        hadoop_users = discover.get_hadoop_users()
+
+        cfgs['distro'] = discover.distro
+        cfgs['hbase_service_name'] = discover.get_hbase_srvname()
+        cfgs['hdfs_service_name'] = discover.get_hdfs_srvname()
+        cfgs['zookeeper_service_name'] = discover.get_zookeeper_srvname()
+
+        cfgs['cluster_name'] = cluster_name.replace(' ', '%20')
+        cfgs['hdfs_user'] = hadoop_users['hdfs_user']
+        cfgs['hbase_user'] = hadoop_users['hbase_user']
+        cfgs['node_list'] = ','.join(rsnodes)
+        cfgs['first_rsnode'] = rsnodes[0] # first regionserver node
+
+    # check node connection
+    for node in cfgs['node_list'].split(','):
+        rc = os.system('ping -c 1 %s >/dev/null 2>&1' % node)
+        if rc: log_err('Cannot ping %s, please check network connection and /etc/hosts' % node)
+
+    ### discover system settings, return a dict
+    discover_results = wrapper.run(cfgs, options, mode='discover', pwd=pwd)
+
+    # check discover results, return error if fails on any sinlge node
+    need_java_home = 0
+    for result in discover_results:
+        host, content = result.items()[0]
+        content_dict = json.loads(content)
+
+        java_home = content_dict['default_java']
+        if java_home == 'N/A':
+            need_java_home += 1
+        if content_dict['linux'] == 'N/A':
+            log_err('Unsupported Linux version')
+        if content_dict['firewall_status'] == 'Running':
+            log_err('Firewall should be stopped')
+        if content_dict['traf_status'] == 'Running':
+            log_err('Trafodion process is found, please stop it first')
+        if content_dict['hbase'] == 'N/A':
+            log_err('HBase is not found')
+        if content_dict['hbase'] == 'N/S':
+            log_err('HBase version is not supported')
+
+        if content_dict['secure_hadoop'] == 'kerberos':
+            cfgs['secure_hadoop'] = 'Y'
+        else:
+            cfgs['secure_hadoop'] = 'N'
+
+    if offline:
+        g('local_repo_dir')
+        if not glob('%s/repodata' % cfgs['local_repo_dir']):
+            log_err('repodata directory not found, this is not a valid repository directory')
+        cfgs['offline_mode'] = 'Y'
+        cfgs['repo_ip'] = socket.gethostbyname(socket.gethostname())
+        cfgs['repo_port'] = '9900'
+
+    pkg_list = ['apache-trafodion']
+    # find tar in installer folder, if more than one found, use the first one
+    for pkg in pkg_list:
+        tar_loc = glob('%s/*%s*.tar.gz' % (INSTALLER_LOC, pkg))
+        if tar_loc:
+            cfgs['traf_package'] = tar_loc[0]
+            break
+
+    g('traf_package')
+
+    # get basename and version from tar filename
+    try:
+        pattern = '|'.join(pkg_list)
+        cfgs['traf_basename'], cfgs['traf_version'] = re.search(r'.*(%s).*-(\d\.\d\.\d).*' % pattern, cfgs['traf_package']).groups()
+    except:
+        log_err('Invalid package tar file')
+
+    #if float(cfgs['traf_version'][:3]) >= 2.2:
+    #    cfgs['req_java8'] = 'Y'
+    #else:
+    #    cfgs['req_java8'] = 'N'
+
+    g('traf_pwd')
+    g('dcs_cnt_per_node')
+    g('scratch_locs')
+    g('traf_start')
+
+    # kerberos
+    if cfgs['secure_hadoop'].upper() == 'Y':
+        g('kdc_server')
+        g('admin_principal')
+        g('kdcadmin_pwd')
+
+    # ldap security
+    g('ldap_security')
+    if cfgs['ldap_security'].upper() == 'Y':
+        g('db_root_user')
+        g('db_admin_user')
+        g('db_admin_pwd')
+        g('ldap_hosts')
+        g('ldap_port')
+        g('ldap_identifiers')
+        g('ldap_encrypt')
+        if  cfgs['ldap_encrypt'] == '1' or cfgs['ldap_encrypt'] == '2':
+            g('ldap_certpath')
+        elif cfgs['ldap_encrypt'] == '0':
+            cfgs['ldap_certpath'] = ''
+        else:
+            log_err('Invalid ldap encryption level')
+
+        g('ldap_userinfo')
+        if cfgs['ldap_userinfo'] == 'Y':
+            g('ldap_user')
+            g('ldap_pwd')
+        else:
+            cfgs['ldap_user'] = ''
+            cfgs['ldap_pwd'] = ''
+
+    # DCS HA
+    g('dcs_ha')
+    if cfgs['dcs_ha'].upper() == 'Y':
+        g('dcs_floating_ip')
+        g('dcs_interface')
+        g('dcs_backup_nodes')
+        # check dcs backup nodes should exist in node list
+        if sorted(list(set((cfgs['dcs_backup_nodes'] + ',' + cfgs['node_list']).split(',')))) != sorted(cfgs['node_list'].split(',')):
+            log_err('Invalid DCS backup nodes, please pick up from node list')
+
+    if need_java_home:
+        g('java_home')
+    else:
+        # don't overwrite user input java home
+        if not cfgs['java_home']:
+            cfgs['java_home'] = java_home
+
+    # set other config to cfgs
+    if apache:
+        cfgs['hbase_xml_file'] = cfgs['hbase_home'] + '/conf/hbase-site.xml'
+        cfgs['hdfs_xml_file'] = cfgs['hadoop_home'] + '/etc/hadoop/hdfs-site.xml'
+    else:
+        cfgs['hbase_xml_file'] = '/etc/hbase/conf/hbase-site.xml'
+
+    cfgs['req_java8'] = 'N'
+    cfgs['traf_user'] = 'trafodion'
+    cfgs['config_created_date'] = time.strftime('%Y/%m/%d %H:%M %Z')
+
+    if not silent:
+        u.notify_user()
+
+def get_options():
+    usage = 'usage: %prog [options]\n'
+    usage += '  Trafodion install main script.'
+    parser = OptionParser(usage=usage)
+    parser.add_option("-c", "--config-file", dest="cfgfile", metavar="FILE",
+                      help="Json format file. If provided, all install prompts \
+                            will be taken from this file and not prompted for.")
+    parser.add_option("-u", "--remote-user", dest="user", metavar="USER",
+                      help="Specify ssh login user for remote server, \
+                            if not provided, use current login user as default.")
+    parser.add_option("-v", "--verbose", action="store_true", dest="verbose", default=False,
+                      help="Verbose mode, will print commands.")
+    parser.add_option("--silent", action="store_true", dest="silent", default=False,
+                      help="Do not ask user to confirm configuration result")
+    parser.add_option("--enable-pwd", action="store_true", dest="pwd", default=False,
+                      help="Prompt SSH login password for remote hosts. \
+                            If set, \'sshpass\' tool is required.")
+    parser.add_option("--build", action="store_true", dest="build", default=False,
+                      help="Build the config file in guided mode only.")
+    parser.add_option("--upgrade", action="store_true", dest="upgrade", default=False,
+                      help="Upgrade install, it is useful when reinstalling Trafodion.")
+    parser.add_option("--apache-hadoop", action="store_true", dest="apache", default=False,
+                      help="Install Trafodion on top of Apache Hadoop.")
+    parser.add_option("--offline", action="store_true", dest="offline", default=False,
+                      help="Enable local repository for offline installing Trafodion.")
+    parser.add_option("--version", action="store_true", dest="version", default=False,
+                      help="Show the installer version.")
+
+    (options, args) = parser.parse_args()
+    return options
+
+def main():
+    """ db_installer main loop """
+    global cfgs
+    format_output('Trafodion Installation ToolKit')
+
+    # handle parser option
+    options = get_options()
+
+    if options.version: version()
+
+    if options.build and options.cfgfile:
+        log_err('Wrong parameter, cannot specify both --build and --config-file')
+
+    if options.build and options.offline:
+        log_err('Wrong parameter, cannot specify both --build and --offline')
+
+    if options.cfgfile:
+        if not os.path.exists(options.cfgfile):
+            log_err('Cannot find config file \'%s\'' % options.cfgfile)
+        config_file = options.cfgfile
+    else:
+        config_file = DBCFG_FILE
+
+    if options.pwd:
+        pwd = getpass.getpass('Input remote host SSH Password: ')
+    else:
+        pwd = ''
+
+    # not specified config file and default config file doesn't exist either
+    p = ParseInI(config_file)
+    if options.build or (not os.path.exists(config_file)):
+        if options.build: format_output('DryRun Start')
+        user_input(options, prompt_mode=True, pwd=pwd)
+
+        # save config file as json format
+        print '\n** Generating config file to save configs ... \n'
+        p.save(cfgs)
+    # config file exists
+    else:
+        print '\n** Loading configs from config file ... \n'
+        cfgs = p.load()
+        if options.offline and cfgs['offline_mode'] != 'Y':
+            log_err('To enable offline mode, must set "offline_mode = Y" in config file')
+        user_input(options, prompt_mode=False, pwd=pwd)
+
+    if options.upgrade:
+        cfgs['upgrade'] = 'Y'
+
+    if options.offline:
+        http_start(cfgs['local_repo_dir'], cfgs['repo_port'])
+    else:
+        cfgs['offline_mode'] = 'N'
+
+    if not options.build:
+        format_output('Installation Start')
+
+        ### perform actual installation ###
+        wrapper.run(cfgs, options, pwd=pwd)
+
+        format_output('Installation Complete')
+
+        if options.offline: http_stop()
+
+        # rename default config file when successfully installed
+        # so next time user can input new variables for a new install
+        # or specify the backup config file to install again
+        try:
+            # only rename default config file
+            ts = time.strftime('%y%m%d_%H%M')
+            if config_file == DBCFG_FILE and os.path.exists(config_file):
+                os.rename(config_file, config_file + '.bak' + ts)
+        except OSError:
+            log_err('Cannot rename config file')
+    else:
+        format_output('DryRun Complete')
+
+    # remove temp config file
+    if os.path.exists(DBCFG_TMP_FILE): os.remove(DBCFG_TMP_FILE)
+
+if __name__ == "__main__":
+    try:
+        main()
+    except (KeyboardInterrupt, EOFError):
+        tp = ParseInI(DBCFG_TMP_FILE)
+        tp.save(cfgs)
+        http_stop()
+        print '\nAborted...'

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/904b53df/install/python-installer/dcs_setup.py
----------------------------------------------------------------------
diff --git a/install/python-installer/dcs_setup.py b/install/python-installer/dcs_setup.py
new file mode 100755
index 0000000..818a933
--- /dev/null
+++ b/install/python-installer/dcs_setup.py
@@ -0,0 +1,114 @@
+#!/usr/bin/env python
+
+# @@@ START COPYRIGHT @@@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# @@@ END COPYRIGHT @@@
+
+### this script should be run on all nodes with trafodion user ###
+
+import os
+import sys
+import json
+from common import ParseXML, append_file, write_file, mod_file, cmd_output, run_cmd, err
+
+def run():
+    dbcfgs = json.loads(dbcfgs_json)
+
+    SQ_ROOT = os.environ['MY_SQROOT']
+    TRAF_VER = dbcfgs['traf_version']
+    HBASE_XML_FILE = dbcfgs['hbase_xml_file']
+
+    DCS_INSTALL_ENV = 'export DCS_INSTALL_DIR=%s/dcs-%s' % (SQ_ROOT, TRAF_VER)
+    REST_INSTALL_ENV = 'export REST_INSTALL_DIR=%s/rest-%s' % (SQ_ROOT, TRAF_VER)
+
+    DCS_CONF_DIR = '%s/dcs-%s/conf' % (SQ_ROOT, TRAF_VER)
+    DCS_SRV_FILE = DCS_CONF_DIR + '/servers'
+    DCS_MASTER_FILE = DCS_CONF_DIR + '/master'
+    DCS_BKMASTER_FILE = DCS_CONF_DIR + '/backup-masters'
+    DCS_ENV_FILE = DCS_CONF_DIR + '/dcs-env.sh'
+    DCS_SITE_FILE = DCS_CONF_DIR + '/dcs-site.xml'
+    REST_SITE_FILE = '%s/rest-%s/conf/rest-site.xml' % (SQ_ROOT, TRAF_VER)
+    TRAFCI_FILE = SQ_ROOT + '/trafci/bin/trafci'
+    SQENV_FILE = SQ_ROOT + '/sqenvcom.sh'
+
+    ### dcs setting ###
+    # servers
+    nodes = dbcfgs['node_list'].split(',')
+    dcs_cnt = dbcfgs['dcs_cnt_per_node']
+    dcs_servers = ''
+    for node in nodes:
+        dcs_servers += '%s %s\n' % (node, dcs_cnt)
+
+    write_file(DCS_SRV_FILE, dcs_servers)
+
+    ### modify dcs config files ###
+    # modify master
+    dcs_master = nodes[0]
+    append_file(DCS_MASTER_FILE, dcs_master)
+
+    # modify sqenvcom.sh
+    append_file(SQENV_FILE, DCS_INSTALL_ENV)
+    append_file(SQENV_FILE, REST_INSTALL_ENV)
+
+    # modify dcs-env.sh
+    mod_file(DCS_ENV_FILE, {'.*DCS_MANAGES_ZK=.*':'export DCS_MANAGES_ZK=false'})
+
+    # modify trafci
+    mod_file(TRAFCI_FILE, {'HNAME=.*':'HNAME=%s:23400' % dcs_master})
+
+    # modify dcs-site.xml
+    net_interface = cmd_output('netstat -rn | grep "^0.0.0.0" | awk \'{print $8}\'').strip()
+    hb = ParseXML(HBASE_XML_FILE)
+    zk_hosts = hb.get_property('hbase.zookeeper.quorum')
+    zk_port = hb.get_property('hbase.zookeeper.property.clientPort')
+
+    p = ParseXML(DCS_SITE_FILE)
+    p.add_property('dcs.zookeeper.property.clientPort', zk_port)
+    p.add_property('dcs.zookeeper.quorum', zk_hosts)
+    p.add_property('dcs.dns.interface', net_interface)
+
+    if dbcfgs['dcs_ha'] == 'Y':
+        dcs_floating_ip = dbcfgs['dcs_floating_ip']
+        dcs_backup_nodes = dbcfgs['dcs_backup_nodes']
+        p.add_property('dcs.master.floating.ip', 'true')
+        p.add_property('dcs.master.floating.ip.external.interface', net_interface)
+        p.add_property('dcs.master.floating.ip.external.ip.address', dcs_floating_ip)
+        p.rm_property('dcs.dns.interface')
+
+        # modify backup_master
+        write_file(DCS_BKMASTER_FILE, dcs_backup_nodes)
+
+    p.write_xml()
+
+    ### rest setting ###
+    p = ParseXML(REST_SITE_FILE)
+    p.add_property('rest.zookeeper.property.clientPort', zk_port)
+    p.add_property('rest.zookeeper.quorum', zk_hosts)
+    p.write_xml()
+
+    ### run sqcertgen ###
+    run_cmd('sqcertgen')
+
+# main
+try:
+    dbcfgs_json = sys.argv[1]
+except IndexError:
+    err('No db config found')
+run()

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/904b53df/install/python-installer/discovery.py
----------------------------------------------------------------------
diff --git a/install/python-installer/discovery.py b/install/python-installer/discovery.py
new file mode 100755
index 0000000..9f2d66e
--- /dev/null
+++ b/install/python-installer/discovery.py
@@ -0,0 +1,144 @@
+#!/usr/bin/env python
+
+# @@@ START COPYRIGHT @@@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# @@@ END COPYRIGHT @@@
+
+import os
+import time
+import json
+import getpass
+from optparse import OptionParser
+from collections import defaultdict
+try:
+    from prettytable import PrettyTable
+except ImportError:
+    print 'Python module prettytable is not found. Install python-prettytable first.'
+    exit(1)
+from common import err_m, err, ParseInI, expNumRe, format_output, DBCFG_FILE
+import wrapper
+
+
+def get_options():
+    usage = 'usage: %prog [options]\n'
+    usage += '  Trafodion install main script.'
+    parser = OptionParser(usage=usage)
+    parser.add_option("-c", "--config-file", dest="cfgfile", metavar="FILE",
+                      help="Json format file. If provided, all install prompts \
+                            will be taken from this file and not prompted for.")
+    parser.add_option("-u", "--remote-user", dest="user", metavar="USER",
+                      help="Specify ssh login user for remote server, \
+                            if not provided, use current login user as default.")
+    parser.add_option("--enable-pass", action="store_true", dest="pwd", default=True,
+                      help="Not Prompt SSH login password for remote hosts.")
+
+    (options, args) = parser.parse_args()
+    return options
+
+# row format
+def output_row(results):
+    items = []
+    for result in results:
+        host, content = result.items()[0]
+        cfg_dict = json.loads(content)
+
+        cfg_tuples = sorted(cfg_dict.items())
+        title = ['Host']
+        item = [host]
+        for key, value in cfg_tuples:
+            title.append(key)
+            item.append(value)
+        items.append(item)
+
+    pt = PrettyTable(title)
+    for item in items: pt.add_row(item)
+
+    return str(pt)
+
+# column format
+def output_column(results):
+    items = []
+    for result in results:
+        host, content = result.items()[0]
+        cfg_dict = json.loads(content)
+
+        item = []
+        title = []
+        cfg_tuples = sorted(cfg_dict.items())
+        for key, value in cfg_tuples:
+            title.append(key)
+            item.append(value)
+        items.append([host, item])
+
+    pt = PrettyTable()
+    pt.add_column('Host', title)
+    for item in items:
+        pt.add_column(item[0], item[1])
+
+    return str(pt)
+
+def main():
+    options = get_options()
+
+    cfgs = defaultdict(str)
+
+    if options.cfgfile:
+        if not os.path.exists(options.cfgfile):
+            err_m('Cannot find config file \'%s\'' % options.cfgfile)
+        config_file = options.cfgfile
+    else:
+        config_file = DBCFG_FILE
+
+    if options.pwd:
+        pwd = getpass.getpass('Input remote host SSH Password: ')
+    else:
+        pwd = ''
+
+    if os.path.exists(config_file):
+        cfgs = ParseInI(config_file).load()
+    else:
+        node_lists = expNumRe(raw_input('Enter list of Nodes separated by comma, support numeric RE, i.e. n[01-12]: '))
+
+        # check if node list is expanded successfully
+        if len([1 for node in node_lists if '[' in node]):
+            err('Failed to expand node list, please check your input.')
+
+        cfgs['node_list'] = ','.join(node_lists)
+
+
+    results = wrapper.run(cfgs, options, mode='discover', pwd=pwd)
+
+    format_output('Discover results')
+
+    if len(results) > 4:
+        output = output_row(results)
+    else:
+        output = output_column(results)
+
+    print output
+    with open('discover_result', 'w') as f:
+        f.write('Discover Date: %s\n' % time.strftime('%Y-%m-%d %H:%M'))
+        f.write(output)
+
+if __name__ == "__main__":
+    try:
+        main()
+    except (KeyboardInterrupt, EOFError):
+        print '\nAborted...'

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/904b53df/install/python-installer/hadoop_mods.py
----------------------------------------------------------------------
diff --git a/install/python-installer/hadoop_mods.py b/install/python-installer/hadoop_mods.py
new file mode 100755
index 0000000..168a81f
--- /dev/null
+++ b/install/python-installer/hadoop_mods.py
@@ -0,0 +1,203 @@
+#!/usr/bin/env python
+
+# @@@ START COPYRIGHT @@@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# @@@ END COPYRIGHT @@@
+
+### this script should be run on local node ###
+
+import time
+import sys
+import json
+from common import ParseHttp, ParseJson, MODCFG_FILE, err
+
+try:
+    dbcfgs_json = sys.argv[1]
+except IndexError:
+    err('No db config found')
+
+dbcfgs = json.loads(dbcfgs_json)
+modcfgs = ParseJson(MODCFG_FILE).load()
+
+MOD_CFGS = modcfgs['MOD_CFGS']
+HBASE_MASTER_CONFIG = modcfgs['HBASE_MASTER_CONFIG']
+HBASE_RS_CONFIG = modcfgs['HBASE_RS_CONFIG']
+HDFS_CONFIG = modcfgs['HDFS_CONFIG']
+ZK_CONFIG = modcfgs['ZK_CONFIG']
+
+CLUSTER_URL_PTR = '%s/api/v1/clusters/%s'
+RESTART_URL_PTR = CLUSTER_URL_PTR + '/commands/restart'
+RESTART_SRV_URL_PTR = CLUSTER_URL_PTR + '/services/%s/commands/restart'
+SRVCFG_URL_PTR = CLUSTER_URL_PTR + '/services/%s/config'
+RSGRP_BASEURL_PTR = '%s/api/v6/clusters/%s/services/%s/roleConfigGroups'
+DEPLOY_CFG_URL_PTR = '%s/api/v6/clusters/%s/commands/deployClientConfig'
+CMD_STAT_URL_PTR = '%s/api/v1/commands/%s'
+
+class CDHMod(object):
+    """ Modify CDH configs for trafodion and restart CDH services """
+    def __init__(self, user, passwd, url, cluster_name):
+        self.url = url
+        self.cluster_name = cluster_name
+        self.p = ParseHttp(user, passwd)
+
+    def __retry_check(self, cid, maxcnt, interval, msg):
+        stat_url = CMD_STAT_URL_PTR % (self.url, cid)
+        stat = self.p.get(stat_url)
+        retry_cnt = 0
+        while not (stat['success'] is True and stat['active'] is False):
+            retry_cnt += 1
+            flush_str = '.' * retry_cnt
+            print '\rCheck CDH services %s status (timeout: %dmin) %s' % (msg, maxcnt*interval/60, flush_str),
+            sys.stdout.flush()
+            time.sleep(interval)
+            stat = self.p.get(stat_url)
+            if retry_cnt == maxcnt: return False
+        return True
+
+    def mod(self):
+        hdfs_service = dbcfgs['hdfs_service_name']
+        hbase_service = dbcfgs['hbase_service_name']
+        zk_service = dbcfgs['zookeeper_service_name']
+        services = {hdfs_service:HDFS_CONFIG, hbase_service:HBASE_MASTER_CONFIG, zk_service:ZK_CONFIG}
+
+        for srv, cfg in services.iteritems():
+            srvcfg_url = SRVCFG_URL_PTR % (self.url, self.cluster_name, srv)
+            self.p.put(srvcfg_url, cfg)
+
+        # set configs in each regionserver group
+        rsgrp_baseurl = RSGRP_BASEURL_PTR % (self.url, self.cluster_name, hbase_service)
+        rscfg = self.p.get(rsgrp_baseurl)
+        rsgrp_urls = ['%s/%s/config' % (rsgrp_baseurl, r['name']) for r in rscfg['items'] if r['roleType'] == 'REGIONSERVER']
+
+        for rsgrp_url in rsgrp_urls:
+            self.p.put(rsgrp_url, HBASE_RS_CONFIG)
+
+    def restart(self):
+        restart_url = RESTART_URL_PTR % (self.url, self.cluster_name)
+        deploy_cfg_url = DEPLOY_CFG_URL_PTR % (self.url, self.cluster_name)
+
+        print 'Restarting CDH services ...'
+        rc1 = self.p.post(restart_url)
+        if self.__retry_check(rc1['id'], 40, 15, 'restart'):
+            print 'Restart CDH successfully!'
+        else:
+            err('Failed to restart CDH, max retry count reached')
+
+        rc2 = self.p.post(deploy_cfg_url)
+        if self.__retry_check(rc2['id'], 30, 10, 'deploy'):
+            print 'Deploy client config successfully!'
+        else:
+            err('Failed to deploy CDH client config, max retry count reached')
+
+
+class HDPMod(object):
+    """ Modify HDP configs for trafodion and restart HDP services """
+    def __init__(self, user, passwd, url, cluster_name):
+        self.url = url
+        self.cluster_name = cluster_name
+        self.p = ParseHttp(user, passwd, json_type=False)
+
+    def mod(self):
+        cluster_url = CLUSTER_URL_PTR % (self.url, self.cluster_name)
+        desired_cfg_url = cluster_url + '?fields=Clusters/desired_configs'
+        cfg_url = cluster_url + '/configurations?type={0}&tag={1}'
+        desired_cfg = self.p.get(desired_cfg_url)
+
+        for config_type in MOD_CFGS.keys():
+            desired_tag = desired_cfg['Clusters']['desired_configs'][config_type]['tag']
+            current_cfg = self.p.get(cfg_url.format(config_type, desired_tag))
+            tag = 'version' + str(int(time.time() * 1000000))
+            new_properties = current_cfg['items'][0]['properties']
+            new_properties.update(MOD_CFGS[config_type])
+            config = {
+                'Clusters': {
+                    'desired_config': {
+                        'type': config_type,
+                        'tag': tag,
+                        'properties': new_properties
+                    }
+                }
+            }
+            self.p.put(cluster_url, config)
+
+
+    def restart(self):
+        srv_baseurl = CLUSTER_URL_PTR % (self.url, self.cluster_name) + '/services/'
+        srvs = ['HBASE', 'ZOOKEEPER', 'HDFS']
+
+        # Stop
+        print 'Restarting HDP services ...'
+        for srv in srvs:
+            srv_url = srv_baseurl + srv
+            config = {'RequestInfo': {'context' :'Stop %s services' % srv}, 'ServiceInfo': {'state' : 'INSTALLED'}}
+            rc = self.p.put(srv_url, config)
+
+            # check stop status
+            if rc:
+                stat = self.p.get(srv_url)
+
+                retry_cnt, maxcnt, interval = 0, 30, 5
+                while stat['ServiceInfo']['state'] != 'INSTALLED':
+                    retry_cnt += 1
+                    flush_str = '.' * retry_cnt
+                    print '\rCheck HDP service %s stop status (timeout: %dmin) %s' % (srv, maxcnt*interval/60, flush_str),
+                    sys.stdout.flush()
+                    time.sleep(interval)
+                    stat = self.p.get(srv_url)
+                    if retry_cnt == maxcnt: err('Failed to stop HDP service %s, timeout' % srv)
+                # wrap line
+                print
+            else:
+                print 'HDP service %s had already been stopped' % srv
+
+        time.sleep(5)
+        # Start
+        config = {'RequestInfo': {'context' :'Start All services'}, 'ServiceInfo': {'state' : 'STARTED'}}
+        rc = self.p.put(srv_baseurl, config)
+
+        # check start status
+        if rc:
+            result_url = rc['href']
+            stat = self.p.get(result_url)
+            retry_cnt, maxcnt, interval = 0, 120, 5
+            while stat['Requests']['request_status'] != 'COMPLETED':
+                retry_cnt += 1
+                flush_str = '.' * retry_cnt
+                print '\rCheck HDP services start status (timeout: %dmin) %s' % (maxcnt*interval/60, flush_str),
+                sys.stdout.flush()
+                time.sleep(interval)
+                stat = self.p.get(result_url)
+                if retry_cnt == maxcnt: err('Failed to start all HDP services')
+            print 'HDP services started successfully!'
+        else:
+            print 'HDP services had already been started'
+
+def run():
+    if 'CDH' in dbcfgs['distro']:
+        cdh = CDHMod(dbcfgs['mgr_user'], dbcfgs['mgr_pwd'], dbcfgs['mgr_url'], dbcfgs['cluster_name'])
+        cdh.mod()
+        cdh.restart()
+    elif 'HDP' in dbcfgs['distro']:
+        hdp = HDPMod(dbcfgs['mgr_user'], dbcfgs['mgr_pwd'], dbcfgs['mgr_url'], dbcfgs['cluster_name'])
+        hdp.mod()
+        hdp.restart()
+
+# main
+run()

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/904b53df/install/python-installer/hdfs_cmds.py
----------------------------------------------------------------------
diff --git a/install/python-installer/hdfs_cmds.py b/install/python-installer/hdfs_cmds.py
new file mode 100755
index 0000000..dc08b7e
--- /dev/null
+++ b/install/python-installer/hdfs_cmds.py
@@ -0,0 +1,68 @@
+#!/usr/bin/env python
+
+# @@@ START COPYRIGHT @@@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# @@@ END COPYRIGHT @@@
+
+### this script should be run on first node with trafodion user ###
+
+import os
+import sys
+import json
+from common import err, run_cmd, cmd_output, run_cmd_as_user
+
+def run():
+    hdfs_bin = '/usr/bin/hdfs'
+
+    dbcfgs = json.loads(dbcfgs_json)
+    DISTRO = dbcfgs['distro']
+
+    if 'CDH' in DISTRO:
+        parcel_lib = '/opt/cloudera/parcels/CDH/lib/hbase/lib'
+        if os.path.exists(parcel_lib): hdfs_bin = '/opt/cloudera/parcels/CDH/bin/hdfs'
+    elif 'APACHE' in DISTRO:
+        hdfs_bin = dbcfgs['hadoop_home'] + '/bin/hdfs'
+
+    traf_loc = '/user/trafodion'
+    traf_user = dbcfgs['traf_user']
+    hdfs_user = dbcfgs['hdfs_user']
+    hbase_user = dbcfgs['hbase_user']
+
+    run_cmd_as_user(hdfs_user, '%s dfsadmin -safemode wait' % hdfs_bin)
+    run_cmd_as_user(hdfs_user, '%s dfs -mkdir -p %s/{trafodion_backups,bulkload,lobs} /hbase/archive /hbase-staging' % (hdfs_bin, traf_loc))
+    run_cmd_as_user(hdfs_user, '%s dfs -chown -R %s:%s /hbase/archive /hbase-staging' % (hdfs_bin, hbase_user, hbase_user))
+    run_cmd_as_user(hdfs_user, '%s dfs -chown -R %s:%s %s/{trafodion_backups,bulkload,lobs}' % (hdfs_bin, traf_user, traf_user, traf_loc))
+    run_cmd_as_user(hdfs_user, '%s dfs -setfacl -R -m user:%s:rwx /hbase/archive' % (hdfs_bin, traf_user))
+    run_cmd_as_user(hdfs_user, '%s dfs -setfacl -R -m default:user:%s:rwx /hbase/archive' % (hdfs_bin, traf_user))
+    run_cmd_as_user(hdfs_user, '%s dfs -setfacl -R -m mask::rwx /hbase/archive' % hdfs_bin)
+
+    # Grant all privileges to the Trafodion principal in HBase
+    if dbcfgs['secure_hadoop'] == 'Y':
+        run_cmd('grant "%s", "RWXC" | sudo -u %s hbase shell > /tmp/hbase_shell.out' % (traf_user, hbase_user))
+        has_err = cmd_output('grep -c ERROR /tmp/hbase_shell.out')
+        if int(has_err):
+            err('Failed to grant HBase privileges to %s' % traf_user)
+        run_cmd('rm /tmp/hbase_shell.out')
+# main
+try:
+    dbcfgs_json = sys.argv[1]
+except IndexError:
+    err('No db config found')
+run()

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/904b53df/install/python-installer/mod_cfgs.json
----------------------------------------------------------------------
diff --git a/install/python-installer/mod_cfgs.json b/install/python-installer/mod_cfgs.json
new file mode 100644
index 0000000..822b470
--- /dev/null
+++ b/install/python-installer/mod_cfgs.json
@@ -0,0 +1,73 @@
+{
+"MOD_CFGS": {
+    "hbase-site": {
+        "hbase.master.distributed.log.splitting": "false",
+        "hbase.snapshot.master.timeoutMillis": "600000",
+        "hbase.coprocessor.region.classes": "org.apache.hadoop.hbase.coprocessor.transactional.TrxRegionObserver,org.apache.hadoop.hbase.coprocessor.transactional.TrxRegionEndpoint,org.apache.hadoop.hbase.coprocessor.AggregateImplementation",
+        "hbase.hregion.impl": "org.apache.hadoop.hbase.regionserver.transactional.TransactionalRegion",
+        "hbase.regionserver.region.split.policy": "org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy",
+        "hbase.snapshot.enabled": "true",
+        "hbase.bulkload.staging.dir": "/hbase-staging",
+        "hbase.regionserver.region.transactional.tlog": "true",
+        "hbase.snapshot.region.timeout": "600000",
+        "hbase.client.scanner.timeout.period": "600000"
+    },
+    "hdfs-site": { "dfs.namenode.acls.enabled": "true" },
+    "zoo.cfg": { "maxClientCnxns": "0" }
+},
+
+"HBASE_MASTER_CONFIG": {
+"roleTypeConfigs" :  [ {
+        "roleType" : "MASTER",
+        "items" : [ {
+                "name" : "hbase_master_config_safety_valve",
+        "value" : "<property>\r\n   <name>hbase.master.distributed.log.splitting</name>\r\n   <value>false</value>\r\n</property>\r\n <property>\r\n   <name>hbase.snapshot.master.timeoutMillis</name>\r\n   <value>600000</value>\r\n</property>\r\n"
+                } ]
+    } ]
+},
+
+"HBASE_RS_CONFIG": {
+"items" : [ {
+                "name" : "hbase_coprocessor_region_classes",
+                "value" : "org.apache.hadoop.hbase.coprocessor.transactional.TrxRegionObserver,org.apache.hadoop.hbase.coprocessor.transactional.TrxRegionEndpoint,org.apache.hadoop.hbase.coprocessor.AggregateImplementation"
+                }, {
+                "name" : "hbase_regionserver_lease_period",
+                "value" : "600000"
+                }, {
+                "name" : "hbase_regionserver_config_safety_valve",
+                "value" : "<property>\r\n   <name>hbase.hregion.impl</name>\r\n   <value>org.apache.hadoop.hbase.regionserver.transactional.TransactionalRegion</value>\r\n</property>\r\n <property>\r\n   <name>hbase.regionserver.region.split.policy</name>\r\n   <value>org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy</value>\r\n</property>\r\n  <property>\r\n   <name>hbase.snapshot.enabled</name>\r\n   <value>true</value>\r\n</property>\r\n <property>\r\n   <name>hbase.bulkload.staging.dir</name>\r\n   <value>/hbase-staging</value>\r\n</property>\r\n <property>\r\n   <name>hbase.regionserver.region.transactional.tlog</name>\r\n   <value>true</value>\r\n</property>\r\n <property>\r\n   <name>hbase.snapshot.region.timeout</name>\r\n   <value>600000</value>\r\n</property>\r\n "
+                } ]
+},
+
+"HDFS_CONFIG": {
+"roleTypeConfigs" :  [ {
+        "roleType" : "NAMENODE",
+        "items": [ {
+                "name" : "namenode_java_heapsize",
+        "value" : "1073741824"
+                } ]
+   }, {
+        "roleType" : "SECONDARYNAMENODE",
+        "items":[ {
+                "name" : "secondary_namenode_java_heapsize",
+        "value" : "1073741824"
+                } ]
+     } ],
+    "items": [ {
+             "name":"dfs_namenode_acls_enabled",
+             "value":"true"
+             } ]
+},
+
+"ZK_CONFIG":{
+"roleTypeConfigs" :
+    [ {
+        "roleType" : "SERVER",
+        "items":
+           [ {
+           "name"  : "maxClientCnxns",
+           "value" : "0"
+           } ]
+    } ]
+}
+}

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/904b53df/install/python-installer/prompt.json
----------------------------------------------------------------------
diff --git a/install/python-installer/prompt.json b/install/python-installer/prompt.json
new file mode 100644
index 0000000..268eca4
--- /dev/null
+++ b/install/python-installer/prompt.json
@@ -0,0 +1,227 @@
+{
+  "traf_start":
+  {
+      "prompt":"Start instance after installation",
+      "default":"Y",
+      "isYN":true
+  },
+  "dcs_ha":
+  {
+      "prompt":"Enable DCS High Avalability",
+      "default":"N",
+      "isYN":true
+  },
+  "dcs_floating_ip":
+  {
+      "prompt":"Enter Floating IP address for DCS HA",
+      "isIP":true
+  },
+  "dcs_interface":
+  {
+      "prompt":"Enter interface for Floating IP address",
+      "default":"eth0"
+  },
+  "dcs_backup_nodes":
+  {
+      "prompt":"Enter DCS Backup Master Nodes for DCS HA (comma separated)"
+  },
+  "kdc_server":
+  {
+      "prompt":"Enter KDC server address"
+  },
+  "admin_principal":
+  {
+      "prompt":"Enter admin principal (include realm)"
+  },
+  "kdcadmin_pwd":
+  {
+      "prompt":"Enter password for admin principal",
+      "ispasswd":true
+  },
+  "hdfs_keytab":
+  {
+      "prompt":"Enter fully qualified name for HDFS keytab"
+  },
+  "hbase_keytab":
+  {
+      "prompt":"Enter fully qualified name for HBase keytab"
+  },
+  "max_lifetime":
+  {
+      "prompt":"Enter max lifetime for Trafodion principal (valid format required)",
+      "default":"24hours"
+  },
+  "max_renew_lifetime":
+  {
+      "prompt":"Enter renew lifetime for Trafodion principal (valid format required)",
+      "default":"7days"
+  },
+  "traf_keytab":
+  {
+      "prompt":"Enter Trafodion keytab name"
+  },
+  "ldap_security":
+  {
+      "prompt":"Enable LDAP security",
+      "default":"N",
+      "isYN":true
+  },
+  "ldap_hosts":
+  {
+      "prompt":"Enter list of LDAP Hostnames (comma separated)"
+  },
+  "ldap_port":
+  {
+      "prompt":"Enter LDAP Port number (Example: 389 for no encryption or TLS, 636 for SSL)",
+      "default":"389",
+      "isdigit":true
+  },
+  "ldap_identifiers":
+  {
+      "prompt":"Enter all LDAP unique identifiers (blank separated)"
+  },
+  "ldap_encrypt":
+  {
+      "prompt":"Enter LDAP Encryption Level (0: Encryption not used, 1: SSL, 2: TLS)",
+      "default":"0",
+      "isdigit":true
+  },
+  "ldap_certpath":
+  {
+      "prompt":"Enter full path to TLS certificate file (*.pem)",
+      "isexist":true
+  },
+  "ldap_userinfo":
+  {
+      "prompt":"If Requred search user name/password",
+      "default":"N",
+      "isYN":true
+  },
+  "ldap_user":
+  {
+      "prompt":"Enter Search user name (if required)",
+      "default":" "
+  },
+  "ldap_pwd":
+  {
+      "prompt":"Enter Search password (if required)",
+      "default":" "
+  },
+  "scratch_locs":
+  {
+      "prompt":"Enter trafodion scratch file folder location(should be a large disk), if more than one folder, use comma seperated",
+      "default":"$MY_SQROOT/tmp"
+  },
+  "local_repo_dir":
+  {
+      "prompt":"Enter local repository folder location to install trafodion RPM dependencies",
+      "isexist":true
+  },
+  "java_home":
+  {
+      "prompt":"Specify location of Java(JDK) on trafodion nodes",
+      "isremote_exist":true,
+      "default":"/usr/lib/jvm/java-1.7.0-openjdk.x86_64"
+  },
+  "dcs_cnt_per_node":
+  {
+      "prompt":"Enter number of DCS client connections per node",
+      "default":"4",
+      "isdigit":true
+  },
+  "first_rsnode":
+  {
+      "prompt":"Enter the hostname of first Apache HBase RegionServer node"
+  },
+  "hdfs_user":
+  {
+      "prompt":"Enter hadoop user name",
+      "default":"hdfs",
+      "isuser":true
+  },
+  "hbase_user":
+  {
+      "prompt":"Enter hbase user name",
+      "default":"hbase",
+      "isuser":true
+  },
+  "hadoop_home":
+  {
+      "prompt":"Enter Apache Hadoop directory location",
+      "isremote_exist":true
+  },
+  "hbase_home":
+  {
+      "prompt":"Enter Apache HBase directory location",
+      "isremote_exist":true
+  },
+  "hive_home":
+  {
+      "prompt":"Enter Apache Hive directory location if exists",
+      "default":"NO_HIVE"
+  },
+  "mgr_url":
+  {
+      "prompt":"Enter HDP/CDH web manager URL:port, (full URL, if no http/https prefix, default prefix is http://)"
+  },
+  "mgr_user":
+  {
+      "prompt":"Enter HDP/CDH web manager user name",
+      "default":"admin",
+      "isuser":true
+  },
+  "mgr_pwd":
+  {
+      "prompt":"Enter HDP/CDH web manager user password",
+      "ispasswd":true
+  },
+  "traf_user":
+  {
+      "prompt":"Enter trafodion user name",
+      "default":"trafodion",
+      "isuser":true
+  },
+  "traf_pwd":
+  {
+      "prompt":"Enter trafodion user password",
+      "ispasswd":true
+  },
+  "traf_package":
+  {
+      "prompt":"Enter full path to Trafodion tar file",
+      "isexist":true
+  },
+  "db_root_user":
+  {
+      "prompt":"Enter LDAP user name to be assigned DB root privileges (DB__ROOT)",
+      "default":"trafodion",
+      "isuser":true
+  },
+  "db_admin_user":
+  {
+      "prompt":"Enter LDAP user name to be assigned DB Admin privileges",
+      "default":"admin",
+      "isuser":true
+  },
+  "db_admin_pwd":
+  {
+      "prompt":"Enter LDAP user password to be assigned DB Admin privileges",
+      "default":"traf123"
+  },
+  "node_list":
+  {
+      "prompt":"Enter list of Nodes separated by comma, support simple numeric RE,\n i.e. \"n[01-12],n[21-25]\",\"n0[1-5].com\""
+  },
+  "cluster_no":
+  {
+      "prompt":"Select the above cluster number for installing Trafodion",
+      "default":"1",
+      "isdigit":true
+  },
+  "use_data_node":
+  {
+      "prompt":"Install Trafodion nodes on all DataNodes",
+      "default":"Y",
+      "isYN":true
+  }
+}