You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hawq.apache.org by rl...@apache.org on 2015/10/12 11:19:53 UTC

[4/4] incubator-hawq git commit: HAWQ-39. Remove below unused mgmt scritps for hawq2.0:

HAWQ-39. Remove below unused mgmt scritps for hawq2.0:

gpactivatestandby gpinitstandby gpinitsystem gpseginstall lib/gpcreateseg.sh lib/gpinitsegment lib/gpseginitsb.sh


Project: http://git-wip-us.apache.org/repos/asf/incubator-hawq/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-hawq/commit/8ec87e6a
Tree: http://git-wip-us.apache.org/repos/asf/incubator-hawq/tree/8ec87e6a
Diff: http://git-wip-us.apache.org/repos/asf/incubator-hawq/diff/8ec87e6a

Branch: refs/heads/master
Commit: 8ec87e6a5be1981f0915a015b0805d770d9dd32b
Parents: b07297a
Author: rlei <rl...@pivotal.io>
Authored: Mon Oct 12 12:36:12 2015 +0800
Committer: rlei <rl...@pivotal.io>
Committed: Mon Oct 12 12:36:58 2015 +0800

----------------------------------------------------------------------
 tools/Makefile                   |    3 -
 tools/bin/gpactivatestandby      |  652 ---------
 tools/bin/gpinitstandby          | 1030 --------------
 tools/bin/gpinitsystem           | 2403 ---------------------------------
 tools/bin/gpseginstall           |  973 -------------
 tools/bin/lib/gpcreateseg.sh     |  331 -----
 tools/bin/lib/gpinitsegment      |  542 --------
 tools/bin/lib/gpseginitsb.sh     |  113 --
 tools/doc/gpactivatestandby_help |  160 ---
 tools/doc/gpinitsystem_help      |  362 -----
 tools/doc/gpseginstall_help      |  157 ---
 11 files changed, 6726 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/8ec87e6a/tools/Makefile
----------------------------------------------------------------------
diff --git a/tools/Makefile b/tools/Makefile
index 8d87e79..277da1f 100644
--- a/tools/Makefile
+++ b/tools/Makefile
@@ -22,7 +22,6 @@ clean distclean:
 #---------------------------------------------------------------------
 
 SET_VERSION_SCRIPTS = \
-	bin/gpactivatestandby \
 	bin/gpaddmirrors \
 	bin/gpbitmapreindex \
 	bin/gpcheck \
@@ -33,8 +32,6 @@ SET_VERSION_SCRIPTS = \
 	bin/gpexpand \
 	bin/gpextract \
 	bin/gpfilespace \
-	bin/gpinitstandby \
-	bin/gpinitsystem \
 	bin/gpload.py \
 	bin/gplogfilter \
 	bin/gpmigrator \

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/8ec87e6a/tools/bin/gpactivatestandby
----------------------------------------------------------------------
diff --git a/tools/bin/gpactivatestandby b/tools/bin/gpactivatestandby
deleted file mode 100755
index db323e2..0000000
--- a/tools/bin/gpactivatestandby
+++ /dev/null
@@ -1,652 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-# Line too long - pylint: disable=C0301
-# Invalid name  - pylint: disable=C0103
-#
-# Copyright (c) EMC 2011
-# Copyright (c) Greenplum Inc 2010
-# All Rights Reserved. 
-#
-"""
-Activates a standby master instance when the primary master instance has 
-failed.  Will stop the gpsyncmaster process, update the system catalog 
-tables and start the instance with the standby master instance as the 
-new master.
-"""
-
-import os
-import sys
-import signal
-import glob
-import time
-import shutil
-from datetime import datetime, timedelta
-
-# import GPDB modules
-try:
-    from gppylib.commands import unix, gp, pg
-    from gppylib.commands.gp import GpAddConfigScript
-    from gppylib.db import dbconn
-    from gppylib.gpparseopts import OptParser, OptChecker, OptionGroup, SUPPRESS_HELP
-    from gppylib.gplog import get_default_logger, setup_tool_logging, enable_verbose_logging, get_logger_if_verbose
-    from gppylib import gparray
-    from gppylib.userinput import ask_yesno
-    from gppylib.gp_dbid import writeGpDbidFile
-    from gppylib.operations.filespace import PG_SYSTEM_FILESPACE, GP_TRANSACTION_FILES_FILESPACE, GP_TEMPORARY_FILES_FILESPACE, GetFilespaceEntries, GetFilespaceEntriesDict
-    from gppylib.commands.base import WorkerPool
-except ImportError, e_:
-    sys.exit('ERROR: Cannot import modules.  Please check that you '
-             'have sourced greenplum_path.sh.  Detail: ' + str(e_))
-
-EXECNAME = os.path.split(__file__)[-1]
-
-# Threshold values
-LOG_TIME_THRESHOLD_MINS = 120
-
-
-_description = sys.modules[__name__].__doc__
-_usage = "\n"
-
-
-class GpActivateStandbyException(Exception):
-    "Generic exception for all things activatestandby"
-    pass
-
-
-#-------------------------------------------------------------------------
-def parseargs():
-    """Parses and validates command line args."""
-    
-    parser = OptParser(option_class=OptChecker,
-                       description=' '.join(_description.split()),
-                       version='%prog version $Revision$')
-    parser.setHelp([])
-    parser.remove_option('-h')
-    
-    # General options section
-    optgrp = OptionGroup(parser, 'General options')
-    optgrp.add_option('-h', '-?', '--help', dest='help', action='store_true',
-                      help='display this help message and exit')
-    optgrp.add_option('-v', dest='version', action='store_true',
-                      help='display version information and exit')
-    parser.add_option_group(optgrp)
-
-    # Logging options section
-    optgrp = OptionGroup(parser, 'Logging options')
-    optgrp.add_option('-q', '--quiet', action='store_true',
-                      help='quiet mode, do not log progress to screen')
-    optgrp.add_option('-l', '--logfile', type='string', default=None,
-                      help='alternative logfile directory')
-    optgrp.add_option('-a', help='don\'t ask to confirm standby master activation',
-                      dest='confirm', default=True, action='store_false')
-    parser.add_option_group(optgrp)
-
-    # Standby activation options section
-    optgrp = OptionGroup(parser, 'Standby activation options')
-    optgrp.add_option('-d', '--master-data-directory', dest='master_data_dir',
-                      type='string', help='standby master data directory')
-    optgrp.add_option('-f', '--force', action='store_true',
-                      help='force activation if gpsyncmaster process not running')
-    optgrp.add_option('-c', '--create-new-standby', type='string', dest='new_standby',
-                      help='create a new standby master instance following successful '
-                      'activation of the standby master instance', metavar='HOSTNAME')
-    optgrp.add_option('--ignore', dest='ignore', type='string', help=SUPPRESS_HELP)
-    parser.add_option_group(optgrp)
-    
-    parser.set_defaults(quiet=False, force=False)
-
-    # Parse the command line arguments
-    (options, args) = parser.parse_args()
-
-    if options.help:
-        parser.print_help()
-        parser.exit(0, None)
-
-    if options.version:
-        parser.print_version()
-        parser.exit(0, None)
-
-    # check we got the -d option
-    if not options.master_data_dir:
-        logger.fatal('Required option -d is missing.')
-        parser.exit(2, None)
-        
-    # We have to normalize this path for a later comparison
-    options.master_data_dir = os.path.abspath(os.path.normpath(options.master_data_dir))
-
-    # check that there isn't a conflict between -d option and MASTER_DATA_DIRECTORY env
-    env_master_data_dir = os.getenv('MASTER_DATA_DIRECTORY', None)
-    if not env_master_data_dir:
-        logger.fatal('MASTER_DATA_DIRECTORY environment variable not set.')
-        parser.exit(2, None)
-
-    if os.path.normpath(env_master_data_dir) != options.master_data_dir:
-        logger.fatal('Current setting of MASTER_DATA_DIRECTORY not same as -d parameter.')
-        parser.exit(2, None)
-
-    # check we aren't trying to create a standby master on this host
-    if options.new_standby and unix.getLocalHostname() == options.new_standby:
-        logger.fatal('New standby hostname supplied is same as current hostname.')
-        parser.exit(2, None)
-
-    # check new standby host is up
-    if options.new_standby:
-        cmd = unix.Ping('Check new standby host up', options.new_standby)
-        cmd.run()
-        if cmd.get_results().rc != 0:
-            logger.fatal('New standby host %s did not respond to ping request.' % options.new_standby)
-            parser.exit(2, None)
-
-        # Check master data directory does not exist already on new standby
-        if unix.FileDirExists.remote('Check new standby', options.new_standby, options.master_data_dir):
-            logger.fatal('%s already exists on proposed new standby master %s' % \
-                         (options.master_data_dir, options.new_standby))
-            parser.exit(2, None)
-
-    if options.logfile and not os.path.exists(options.logfile):
-        logger.fatal('Log directory %s does not exist.' % options.logfile)
-        parser.exit(2, None)
-
-    # The default logging for gpactivatestandby is verbose
-    if not options.quiet:
-        enable_verbose_logging()
-
-    # There shouldn't be any args
-    if len(args) > 0:
-        logger.error('Unknown arguments:')
-        for arg in args:
-            logger.error('  %s' % arg)
-        parser.exit(2, None)
-        
-    return options, args
-
-
-#-------------------------------------------------------------------------
-def print_results(array, hostname, options):
-    """Prints out the summary of the operation."""
-    
-    logger.info('-----------------------------------------------------')
-    logger.info('The activation of the standby master has completed successfully.')
-    logger.info('%s is now the new primary master.' % hostname)
-    logger.info('You will need to update your user access mechanism to reflect')
-    logger.info('the change of master hostname.')
-    logger.info('Do not re-start the failed master while the fail-over master is')
-    logger.info('operational, this could result in database corruption!')
-    logger.info('MASTER_DATA_DIRECTORY is now %s if' % options.master_data_dir)
-    logger.info('this has changed as a result of the standby master activation, remember')
-    logger.info('to change this in any startup scripts etc, that may be configured')
-    logger.info('to set this value.')
-    if not options.new_standby:
-        logger.info('New standby master not initialized!')
-    else:
-        logger.info('New standby master instance %s' % options.new_standby)
-    logger.info('MASTER_PORT is now %d, if this has changed, you' % array.standbyMaster.getSegmentPort())
-    logger.info('may need to make additional configuration changes to allow access')
-    logger.info('to the Greenplum instance.')
-    logger.info('Refer to the Administrator Guide for instructions on how to re-activate')
-    logger.info('the master to its previous state once it becomes available.')
-    logger.info('Query planner statistics must be updated on all databases')
-    logger.info('following standby master activation.')
-    logger.info('When convenient, run ANALYZE against all user databases.')
-    logger.info('-----------------------------------------------------')
-
-
-#-------------------------------------------------------------------------
-def print_summary(options, warnings_errors, last_entry_time):
-    # Too many statements - pylint: disable=R0915
-
-    """Print summary of the action and asks user if they want to
-    continue with the activation."""
-    
-    last_entry_ago = None
-    require_force = False
-    warnings_generated = False
-    
-    # calculate the timedelta of last log message
-    if last_entry_time:
-        last_entry_ago = datetime.now() - datetime.strptime(last_entry_time, '%Y-%m-%d %H:%M:%S.%f %Z')
-        
-    # check the syncmaster
-    gpsyncmaster_running = check_gpsync_running(options)
-    if not gpsyncmaster_running:
-        require_force = True
-        
-    logger.info('-----------------------------------------------------')
-    logger.info('Master data directory     = %s' % options.master_data_dir)
-    if options.logfile:
-        logger.info('Log directory             = %s' % options.logfile)
-    logger.info('gpsyncmaster running      = %s' % ('yes' if gpsyncmaster_running else 'no'))
-    logger.info('Last log entry time       = %s' % last_entry_time)
-    if last_entry_ago:
-        logger.info('                            %s ago' % last_entry_ago)
-    logger.info('Create new standby master = %s' % ('yes' if options.new_standby else 'no'))
-    if options.new_standby:
-        logger.info('New standby master host   = %s' % options.new_standby)
-    logger.info('Force standby activation  = %s' % ('yes' if options.force else 'no'))
-    logger.info('-----------------------------------------------------')
-    if last_entry_ago > timedelta(minutes=LOG_TIME_THRESHOLD_MINS):
-        logger.warning('The last log entry timestamp was over %d minutes ago.' % LOG_TIME_THRESHOLD_MINS)
-        logger.warning('This indicates that the standby master is likely out of date.')
-        require_force = True
-        warnings_generated = True
-    if len(warnings_errors) > 0:
-        logger.warning('The following warnings/errors were found in the most recent log file:')
-        for log_msg in warnings_errors:
-            logger.warning('  %s' % log_msg)
-
-        logger.warning('Greenplum has detected errors and/or warnings in your standby')
-        logger.warning('master log file that indicate a problem with the synchronization process')
-        logger.warning('between your primary and standby master hosts. Before activating your')
-        logger.warning('standby master, it is critical to ensure that it is up to date with all')
-        logger.warning('of the transactions currently committed to Greenplum Database. If you')
-        logger.warning('activate a standby master that is not in sync with the transactional')
-        logger.warning('state of the segments, you may introduce catalog and data')
-        logger.warning('inconsistencies that will render your Greenplum Database instance')
-        logger.warning('unusable. If your primary master is no longer available and you suspect')
-        logger.warning('that you do not have an up-to-date standby master, contact Greenplum')
-        logger.warning('Customer Support for further assistance.')
-        logger.warning('It is also recommended that you make a backup of the standby master')
-        logger.warning('data directory (%s) before continuing.' % options.master_data_dir)
-        
-        require_force = True
-        warnings_generated = True
-    # Check if we require a force
-    if require_force and not options.force:
-        logger.warning('If you wish to continue you must use the -f option to force')
-        logger.warning('the activation process.')
-        warnings_generated = True
-        raise GpActivateStandbyException('Force activation required')
-    if options.confirm:
-        yn = ask_yesno(None, 'Do you want to continue with standby master activation?', 'N')
-        if not yn:
-            raise GpActivateStandbyException('User canceled') 
-
-    return warnings_generated
-
-
-#-------------------------------------------------------------------------
-def get_most_recent_log(options):
-    """
-    Returns the file name of the most recent log file.
-    """
-    file_pattern = options.master_data_dir + '/pg_log/gpdb*.csv'
-    file_pair    = lambda f: (time.localtime(os.stat(f)[8]), f)
-    file_list    = sorted([file_pair(f) for f in glob.glob(file_pattern)])
-    if len(file_list) == 0:
-        return None
-    return file_list[-1][1]
-
-
-#-------------------------------------------------------------------------
-def setup_ignore_filter(options):
-    """
-    Returns ignore_filter function found in python file specified by options.ignore.
-    """
-    if not options.ignore:
-        return None
-
-    # load the filter from the file, giving it access to the logger
-    #
-    gdict = { 'logger':logger }
-    try:
-        execfile(options.ignore, gdict)
-    except (OSError, IOError), e:
-        logger.info('Could not read ignore_filter file: %s' % str(e))
-
-    return gdict.get('ignore_filter')
-
-
-#-------------------------------------------------------------------------
-def check_recent_log(options):
-    """
-    Checks the most recent GPDB log file for warnings and errors related to standby master.
-    Returns a tuple containing the warnings and the time of the last log entry.
-    """
-    warnings_errors = []
-    recent_log      = get_most_recent_log(options)
-    ignore_filter   = setup_ignore_filter(options)
-    ignore_count    = 0
-    how             = "Filtering" if ignore_filter is not None else "Examining"
-
-    logger.info('%s log file %s for warnings and errors...' % (how, recent_log))
-
-    lines = gp.GpLogFilter.local('log search', recent_log, trouble=True)
-    for line in lines:
-
-        # filter out lines matched by the ignore_filter
-        if ignore_filter is not None and ignore_filter(line):
-            ignore_count += 1
-            continue
-
-        # collect errors
-        try:
-            warnings_errors.append(line.split('|')[18])
-        except Exception, e:
-            # badly formatted log entry or empty line??
-            logger.info(str(e))
-            logger.info(line)
-        
-    # report lines ignored
-    if ignore_filter is not None and ignore_count > 0:
-        logger.info("Note: %d line(s) ignored by %s's ignore_filter" % (ignore_count, options.ignore))
-
-    # get date from last line in log file
-    last_entry_time = None
-    last_line = gp.GpLogFilter.local('last log msg', recent_log, count=1)
-    if last_line:
-        last_entry_time = last_line[0].split('|')[0]
-
-    return (warnings_errors, last_entry_time)
-
-
-#-------------------------------------------------------------------------
-def check_standby_master_activation(options):
-    """Runs general sanity checks on the standby master looking for any
-    obvious situations that would cause problems."""
-    
-    # Parse the most recent log file looking for sync errors and warnings.
-    (warnings_errors, last_entry_time) = check_recent_log(options)
-    return (warnings_errors, last_entry_time)
-
-
-#-------------------------------------------------------------------------
-def get_config():
-    """Retrieves configuration information from the catalog."""
-    
-    logger.info('Reading current configuration...')
-    dburl = dbconn.DbURL()
-    array = gparray.GpArray.initFromCatalog(dburl, utility=True)
-    
-    master_hostname = array.master.getSegmentHostName()
-    master_port = array.master.getSegmentPort()
-    
-    cmd = pg.ReadPostmasterTempFile.remote('Read postmaster file', master_port, master_hostname)
-    (file_exists, _, _) = cmd.getResults()
-    if file_exists:
-        logger.warn('Appears that there is an active postgres process on %s port=%d' % (master_hostname, master_port))
-        logger.info('This may have been caused by a kill -9 of the master postgres process.')
-        logger.info('Traces of this process will need to be removed, please follow the instructions below.')
-        logger.info('1. Delete the /tmp/.s.PGSQL.%d and /tmp/.s.PGSQL.%d.* files on %s' % (master_port, master_port, master_hostname))
-        logger.info('2. Remove the %s/postmaster.pid file on %s' % (array.master.getSegmentDataDirectory(), master_hostname))
-        logger.info('3. Then call this utility again.')
-        stop_master()
-        raise GpActivateStandbyException('Active postgres process on master')
-    
-    return array
-
-#-------------------------------------------------------------------------
-def update_flat_file(array, flat_file):
-    """
-        If the transaction/temporary filespaces have
-        ever been moved, we need to update the flat file.
-        The filespace directories are copied by the 
-        copy_master_filespaces method.
-    """
-
-    logger.info('Updating filespace flat files')    
-
-    pg_system_fs_entries = GetFilespaceEntriesDict(GetFilespaceEntries(array, PG_SYSTEM_FILESPACE).run()).run() 
-   
-    flat_file_location = os.path.join(pg_system_fs_entries[1][2], flat_file) 
-        
-    if not os.path.exists(flat_file_location):
-        return
-
-    logger.debug('flat file location for transaction files = %s' % flat_file_location)
-    #Copy over the updated flat file to the standby
-    with open(flat_file_location) as read_file:
-        lines_to_write = ''
-        for line in read_file:
-            tokens = line.split()
-            if len(tokens) != 2:
-                lines_to_write += line
-            elif tokens[0] == '1':
-                lines_to_write += line
-
-    temp_flat_file = os.path.join(flat_file_location + '.tmp')
-    
-    try:
-        with open(temp_flat_file, 'w') as write_file:
-            write_file.write(lines_to_write)
-     
-        #Rewrite the master flat file to include the standby information 
-        shutil.move(temp_flat_file, flat_file_location)
-    except Exception, e:
-        raise Exception('Failed to update flat file')
-
-def set_repair_global_sequence(datadir):
-    """set gp_persistent_repair_global_sequenece for new standby master"""
-
-    pool = WorkerPool()
-    cmd = GpAddConfigScript(unix.getLocalHostname(), datadir, 'gp_persistent_repair_global_sequence', 'true', False)
-    pool.addCommand(cmd)
-    try:
-        pool.join()
-        items = pool.getCompletedItems()
-        failure = False
-        for i in items:
-            if not i.was_successful():
-                logger.error('failed updating the postgresql.conf files on host: ' + i.remoteHost)
-                failure = True
-
-        pool.check_results()
-    except Exception, e:
-        logger.error('errors in job:')
-        logger.error(e.__str__())
-        logger.error('exiting early')
-
-    pool.haltWork()
-    pool.joinWorkers()
-
-
-def reset_repair_global_sequence(datadir):
-    """reset gp_persistent_repair_global_sequenece after the activate standby"""
-    pool = WorkerPool()
-    cmd = GpAddConfigScript(unix.getLocalHostname(), datadir, 'gp_persistent_repair_global_sequence', None, True)
-    pool.addCommand(cmd)
-    try:
-        pool.join()
-        items = pool.getCompletedItems()
-        failure = False
-        for i in items:
-            if not i.was_successful():
-                logger.error('failed updating the postgresql.conf files on host: ' + i.remoteHost)
-                failure = True
-
-        pool.check_results()
-    except Exception, e:
-        logger.error('errors in job:')
-        logger.error(e.__str__())
-        logger.error('exiting early')
-
-    pool.haltWork()
-    pool.joinWorkers()
-
-
-#-------------------------------------------------------------------------
-def update_config():
-    """Updates the configuration information in the catalog."""
-    
-    dburl = dbconn.DbURL()
-    conn = dbconn.connect(dburl, utility=True)
-    
-    logger.info('Updating catalog...')
-    sql = "SELECT gp_activate_standby()"
-    dbconn.execSQL(conn, sql)
-
-    conn.commit()
-    conn.close()
-
-    logger.info('Database catalog updated successful')
-
-#-------------------------------------------------------------------------
-def update_gpdbid_file(array):
-    """Updates to gp_dbid file in the data directory to reflect the standby masters dbid."""
-    
-    standby_datadir = os.path.normpath(array.standbyMaster.getSegmentDataDirectory())
-
-    # MPP-13245, use single mechanism to manage gp_dbid file instead of ad-hoc replace
-    writeGpDbidFile(standby_datadir, 1, get_logger_if_verbose())
-
-
-#-------------------------------------------------------------------------
-def create_new_standby_master(options):
-    """Creates a new standby master."""
-    
-    logger.info('Creating new standby master...')
-
-    gphome = os.environ.get("GPHOME")
-    # we have to use os.system here because gpinitstandby will be interactive
-    # due to filespace remapping.
-    rc = os.system("%s/bin/gpinitstandby -s %s" % (gphome, options.new_standby))
-    if rc != 0:
-        logger.warning('Failed to create the new standby master on %s' % options.new_standby)
-        logger.warning('You will need to manually run \'gpinitstandby -s %s\' to create the new standby master.' % options.new_standby)
-        raise GpActivateStandbyException('Failed to create new standby')
-
-
-#-------------------------------------------------------------------------
-def check_gpsync_running(options):
-    """Checks if the gpsyncmaster process is running."""
-    
-    return gp.getSyncmasterPID('localhost', options.master_data_dir) > 0
-
-
-#-------------------------------------------------------------------------
-def stop_gpsync_process(options):
-    """Stops the gpsyncmaster process."""
-    
-    logger.info('Stopping gpsync process...')
-
-    # check to see if the gpsyncmaster process is active
-    pid = gp.getSyncmasterPID('localhost', options.master_data_dir)
-    
-    if not pid > 0:
-        # gpsyncmaster is not running so check if the force option was given.
-        if options.force:
-            # DbStatus only uses data directory so we can ignore the other values
-            db = gparray.GpDB(None, None, None, None, None, None, None, None, None, options.master_data_dir, None)
-            # check that postmaster isn't already running
-            if pg.DbStatus.local('check db status', db) == True:
-                logger.error('Located a postgres process on this host')
-                logger.error('Has the master standby instance already been activated?')
-                logger.error('Run the gpstate utility to check.')
-                logger.error('Possible standby master instance active')
-                raise GpActivateStandbyException('postgres process already running')
-    else:
-        gp.SegmentStop.local('stopping gpsyncmaster', options.master_data_dir, mode='fast')
-        if unix.check_pid(pid):
-            # not able to stop normally so give it a go with immediate mode
-            logger.warning('Process gpsyncmaster still running, will issue fast shutdown with immediate')
-            gp.SegmentStop.local('stopping gpsyncmaster', options.master_data_dir, mode='immediate')
-                            
-            if unix.check_pid(pid):
-                logger.error('Unable to stop sync process')
-                raise GpActivateStandbyException('Unable to stop sync process')
-            else:
-                logger.info('Successfully shutdown sync process.')
-        else:
-            logger.info('Successfully shutdown sync process')
-
-
-#-------------------------------------------------------------------------
-def start_master():
-    """Starts the master."""
-    
-    logger.info('Starting standby master database in utility mode...')
-    gp.GpStart.local('Start GPDB', masterOnly=True)
-
-
-#-------------------------------------------------------------------------
-def stop_master():
-    """Stops the master."""
-    
-    logger.info('Stopping standby master...')
-    gp.GpStop.local('Stop GPDB', masterOnly=True, fast=True)
-    
-
-#-------------------------------------------------------------------------
-def start_database():
-    """Starts the database."""
-    
-    logger.info('Starting database in production mode...')
-    gp.GpStart.local('Start database in production mode')
-
-#-------------------------------------------------------------------------
-def stop_database():
-    """Stops the database."""
-    
-    logger.info('Stopping database...')
-    gp.GpStop.local('Stopping database')
-
-
-
-#-------------------------------------------------------------------------
-# Main
-#-------------------------------------------------------------------------
-
-# setup logging
-logger = get_default_logger()
-setup_tool_logging(EXECNAME, unix.getLocalHostname(), unix.getUserName())
-
-# parse args and options
-(options_, args_) = parseargs()
-
-# if we got a new log dir, we can now set it up.
-if options_.logfile:
-    setup_tool_logging(EXECNAME, unix.getLocalHostname(), unix.getUserName(), logdir=options_.logfile)
-
-try:
-    (warnings_errors_, last_entry_time_) = check_standby_master_activation(options_)
-
-    warnings_generated_ = print_summary(options_, warnings_errors_, last_entry_time_)
-
-    # disable keyboard interrupt to prevent users from canceling
-    # out of the process at a very bad time.  If there is a partial
-    # update to the gp_configuration catalog and the user cancels
-    # you get stuck where you can't go forward and you can't go
-    # backwards.
-    signal.signal(signal.SIGINT, signal.SIG_IGN)
-    stop_gpsync_process(options_)
-    # Prevent the global_sequence regression happened.
-    set_repair_global_sequence(options_.master_data_dir)
-    start_master()
-    array_ = get_config()
-    reset_repair_global_sequence(options_.master_data_dir)
-    update_config()
-    update_gpdbid_file(array_)
-    update_flat_file(array_, GP_TRANSACTION_FILES_FILESPACE)
-    update_flat_file(array_, GP_TEMPORARY_FILES_FILESPACE)
-
-    # This should be stop_master, but due to filerep issue (MPP-9559)
-    # we need to stop all the segments too.
-    # Catch the exception that can be thrown if some segments are already down.
-    try:
-        stop_database()
-    except Exception, e_:
-        logger.info('Exception observed while stopping database')
-        logger.info(str(e_))
- 
-    start_database()
-    
-    # At this point, cancel isn't all that bad so re-enable 
-    # keyboard interrupt.  They may cancel out of the creating
-    # of a new standby, but gpinitstandby can be used to 
-    # create one at a later point.
-    signal.signal(signal.SIGINT, signal.default_int_handler)
-    
-    if options_.new_standby:
-        create_new_standby_master(options_)
-        
-    print_results(array_, unix.getLocalHostname(), options_)
-    
-    if warnings_generated_:
-        sys.exit(1)
-    else:
-        sys.exit(0)
-    
-except Exception, e_:
-    logger.fatal('Error activating standby master: %s' % str(e_))
-    sys.exit(2)
-
-

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/8ec87e6a/tools/bin/gpinitstandby
----------------------------------------------------------------------
diff --git a/tools/bin/gpinitstandby b/tools/bin/gpinitstandby
deleted file mode 100755
index decd7a6..0000000
--- a/tools/bin/gpinitstandby
+++ /dev/null
@@ -1,1030 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-#
-# Copyright (c) Greenplum Inc 2010. All Rights Reserved. 
-#
-import os
-import sys
-import signal
-import shutil
-
-# import GPDB modules
-try:
-    from gppylib.gpparseopts import *
-    from gppylib.gplog import *
-    from gppylib.commands import unix, gp, base
-    from gppylib import gparray
-    from gppylib.db import dbconn
-    from gppylib.db import catalog
-    from gppylib.userinput import *
-    from gppylib.gp_dbid import GpDbidFile
-    from gppylib.operations.package import SyncPackages
-    from gppylib.operations.filespace import PG_SYSTEM_FILESPACE, GP_TRANSACTION_FILES_FILESPACE, GP_TEMPORARY_FILES_FILESPACE, GetFilespaceEntries, GetFilespaceEntriesDict, MoveFilespaceError, create_temporary_directories, remove_temporary_directories
-except ImportError, e:
-    sys.exit('ERROR: Cannot import modules.  Please check that you '
-             'have sourced greenplum_path.sh.  Detail: ' + str(e))
-
-EXECNAME = os.path.split(__file__)[-1]
-
-# initstandby state constants for rollback
-INIT_STANDBY_STATE_NOT_STARTED=0
-INIT_STANDBY_STATE_UPDATE_CATALOG=1
-INIT_STANDBY_STATE_COPY_FILES=2
-INIT_STANDBY_STATE_UPDATE_GPDBID=3
-INIT_STANDBY_STATE_DONE=4
-
-g_init_standby_state=INIT_STANDBY_STATE_NOT_STARTED
-
-
-# default batch size
-DEFAULT_BATCH_SIZE=16
-
-# backup filename
-PG_HBA_BACKUP = 'pg_hba.conf.gpinitstandby.bak'
-
-_description = """The gpinitstandby utility adds a backup master host to your
-Greenplum Database system. If your system has an existing backup
-master host configured, use the -r option to remove it before adding 
-the new standby master host.
-
-Before running this utility, make sure 
-that the Greenplum Database software is installed on the backup master 
-host and that you have exchanged SSH keys between hosts. Also make sure 
-that the master port is set to the same port number on the master host 
-and the backup master host. This utility should be run on the currently 
-active primary master host.
-
-The utility will perform the following steps:
-* Shutdown your Greenplum Database system
-* Update the Greenplum Database system catalog to remove the 
-  existing backup master host information (if the -r option is supplied)
-* Update the Greenplum Database system catalog to add the new backup 
-  master host information (use the -n option to skip this step)
-* Edit the pg_hba.conf files of the segment instances to allow access 
-  from the newly added standby master.
-* Setup the backup master instance on the alternate master host
-* Start the synchronization process
-* Restart your Greenplum Database system
-
-A backup master host serves as a 'warm standby' in the event of the 
-primary master host becoming nonoperational. The backup master is kept 
-up to date by a transaction log replication process (gpsyncmaster), 
-which runs on the backup master host and keeps the data between the 
-primary and backup master hosts synchronized. If the primary master 
-fails, the log replication process is shutdown, and the backup master 
-can be activated in its place by using the gpactivatestandby utility. 
-Upon activation of the backup master, the replicated logs are used to 
-reconstruct the state of the master host at the time of the last 
-successfully committed transaction.
-"""
-
-_usage = """
-"""
-
-class GpInitStandbyException(Exception):
-    pass
-
-
-#-------------------------------------------------------------------------
-def parseargs():
-    """parses and validates command line args."""
-    
-    parser = OptParser(option_class=OptChecker,
-                       version='%prog version $Revision$')
-
-    parser.setHelp([])
-    parser.remove_option('-h')
-    
-    # General options section
-    optgrp = OptionGroup(parser, 'General options')
-    optgrp.add_option('-?', '--help', dest='help', action='store_true',
-                      help='display this help message and exit')
-    optgrp.add_option('-v', dest='version', action='store_true',
-                      help='display version information and exit')
-    parser.add_option_group(optgrp)
-
-    # Logging options section
-    optgrp = OptionGroup(parser, 'Logging options')
-    optgrp.add_option('-q', '--quiet', action='store_true',
-                      help='quiet mode, do not log progress to screen')
-    optgrp.add_option('-l', '--logfile', type='string', default=None,
-                      help='alternative logfile directory')
-    optgrp.add_option('-a', help='don\'t ask to confirm standby master activation',
-                      dest='confirm', default=True, action='store_false')
-    optgrp.add_option('-D', '--debug', action='store_true', default=False,
-                      help='enable debug logging')
-    parser.add_option_group(optgrp)
-
-    # Standby initialization options section
-    optgrp = OptionGroup(parser, 'Standby initialization options')
-    optgrp.add_option('-s', '--standby-host', type='string', dest='standby_host',
-                      help='hostname of system to create standby master on')
-    optgrp.add_option('-n', '--no-update', action='store_true', dest='no_update',
-                      help='do not update system catalog tables')
-    optgrp.add_option('-r', '--remove', action='store_true',
-                      help='remove current warm master standby.  Use this option '
-                      'if the warm master standby host has failed.  This option will '
-                      'need to shutdown the GPDB array to be able to complete the request')
-    optgrp.add_option('-M', '--mode', type='string', default='smart',
-                      help='use specified mode when stopping the GPDB array.  Default: smart')
-    optgrp.add_option('-L', '--no-restart', dest='no_restart', default=False, action='store_true',
-                      help='leave the GPDB array in a stopped state after removing the warm standby master')
-    parser.add_option_group(optgrp)
-
-    
-    # Parse the command line arguments
-    (options, args) = parser.parse_args()
-
-    if options.help:
-        parser.print_help()
-        parser.exit(0, None)
-
-    if options.version:
-        parser.print_version()
-        parser.exit(0, None)
-
-    if options.logfile and not os.path.exists(options.logfile):
-        logger.error('Log directory %s does not exist.' % options.logfile)
-        parser.exit(2, None)
-
-    # -s and -n are exclusive
-    if options.standby_host and options.no_update:
-        logger.error('Options -s and -n cannot be specified together.')
-        parser.exit(2, None)
-
-    # -s and -r are exclusive
-    if options.standby_host and options.remove:
-        logger.error('Options -s and -r cannot be specified together.')
-        parser.exit(2, None)
-	
-    # -L and -s are exclusive
-    if options.standby_host and options.no_restart:
-        logger.error('Options -s and -L cannot be specified together.')
-        parser.exit(2, None)
-
-    # we either need to delete or create or sync
-    if not options.remove and not options.standby_host and not options.no_update:
-        logger.error('No action provided in the options.')
-        parser.print_help()
-        parser.exit(2, None)
-
-    # check that new standby host is up
-    if options.standby_host:
-        try:
-            gp.Ping.local('check new standby up', options.standby_host)
-        except:
-            logger.error('Unable to ping new standby host %s' % options.standby_host)
-            parser.exit(2, None)
-
-    # make sure we aren't trying to create a standby on this host
-    if options.standby_host and options.standby_host == unix.getLocalHostname():
-        logger.error('Cannot run this script on the standby master host')
-        parser.exit(2, None)
-
-    return options, args
-   
-   
-#-------------------------------------------------------------------------
-def print_summary(options, array, standby_filespace_map):
-    """Display summary of gpinitstandby operations."""
-    
-    logger.info('-----------------------------------------------------')
-    if options.remove:
-        logger.info('Warm master standby removal parameters')
-    else:
-        logger.info('Greenplum standby master initialization parameters')
-    logger.info('-----------------------------------------------------')
-    logger.info('Greenplum master hostname               = %s' \
-                    % array.master.getSegmentHostName())
-    logger.info('Greenplum master data directory         = %s' \
-                    % array.master.getSegmentDataDirectory())
-    logger.info('Greenplum master port                   = %s' \
-                    % array.master.getSegmentPort())
-    if options.remove:
-        logger.info('Greenplum standby master hostname       = %s' \
-                        % array.standbyMaster.getSegmentHostName())
-    else:
-        logger.info('Greenplum standby master hostname       = %s' \
-                        % options.standby_host)
-    logger.info('Greenplum standby master port           = %s' \
-                    % array.master.getSegmentPort())
-    if not array.standbyMaster:
-        pg_system = None
-        for fs in standby_filespace_map:
-            if fs[0] == 'pg_system':
-                pg_system = fs[1]
-                break
-        if pg_system:
-            logger.info('Greenplum standby master data directory = %s' % pg_system)
-        else:
-            GpInitStandbyException('Failed to find pg_system '
-                                   'filespace for standby master')
-    else:
-        logger.info('Greenplum standby master data directory = %s' \
-                        % array.standbyMaster.getSegmentDataDirectory())
-    if not options.remove and options.no_update:
-        logger.info('Greenplum update system catalog         = Off')
-    elif not options.remove:
-        logger.info('Greenplum update system catalog         = On')
-    logger.info('Greenplum stop database mode            = %s' % options.mode)
-    if options.remove and options.no_restart:
-        logger.info('Restart Greenplum database after delete = No')
-    elif options.remove:
-        logger.info('Restart Greenplum database after delete = Yes')
-        
-    if not options.remove and standby_filespace_map:
-        logger.info('-----------------------------------------------------')
-        logger.info(' Filespace locations')
-        logger.info('-----------------------------------------------------')
-        for item in standby_filespace_map:
-            logger.info('%s -> %s' % (item[0], item[1]))
-
-    # Confirm the action
-    if options.confirm:
-        if options.remove:
-            yn = ask_yesno(None, 'Do you want to continue with deleting '
-                           'the standby master?', 'N')
-        else:
-            yn = ask_yesno(None, 'Do you want to continue with '
-                           'standby master initialization?', 'N')
-        if not yn:
-            raise GpInitStandbyException('User canceled')
-
-
-#-------------------------------------------------------------------------
-def stop_database(options):
-    """Stops the database."""
-    
-    try:
-        logger.info('Stopping database...')
-        if options.mode == 'fast':
-            gp.GpStop.local('Stop GPDB', fast=True)
-        else:
-            gp.GpStop.local('Stop GPDB')
-    except Exception, ex:
-        logger.error('Failed to stop the database.')
-        raise GpInitStandbyException(ex)
-
-def getDbUrlForInitStandby():
-    """
-    Return the dbconn.DbURL instance that should be used for connecting
-    """
-
-    #
-    # use template0 to avoid using PGDATABASE value (which definitely won't work during initsystem)
-    #
-    return dbconn.DbURL(dbname="template0")
-
-#-------------------------------------------------------------------------
-def start_database():
-    """Starts the database."""
-    
-    logger.info('Starting database in production mode...')
-    try:
-        gp.GpStart.local('Start database in production mode')
-    except Exception, ex:
-        logger.error('Failed to start the database')
-        raise GpInitStandbyException(ex)
-
-#-------------------------------------------------------------------------
-def stop_master():
-    """Stops the master only."""
-    
-    logger.info('Stopping master...')
-    try:
-        gp.GpStop.local('Stop GPDB', masterOnly=True, fast=True)
-    except Exception, ex:
-        logger.error('Failed to stop the master.')
-        raise GpInitStandbyException(ex)
-
-#-------------------------------------------------------------------------
-def start_master():
-    """Starts the master in utility mode."""
-    
-    logger.info('Starting master in utility mode...')
-    try:
-        gp.GpStart.local('Start GPDB', masterOnly=True)
-    except Exception, ex:
-        logger.error('Failed to start the master.')
-        raise GpInitStandbyException(ex)
-
-#-------------------------------------------------------------------------
-def delete_standby(options):
-    """Removes the standby master."""
-    try:
-        dburl = getDbUrlForInitStandby()
-        array = gparray.GpArray.initFromCatalog(dburl, utility=True)
-    except:
-        logger.error('Failed to retrieve configuration information from the master.')
-        raise
-    
-    # make sure we have a standby to delete
-    if not array.standbyMaster:
-        logger.error('Request made to remove warm master standby, '  
-                     'but no standby located.')
-        raise GpInitStandbyException('no standby configured')
-    
-    print_summary(options, array, None)
-
-    # Disable Ctrl-C
-    signal.signal(signal.SIGINT,signal.SIG_IGN)
-    
-    stop_database(options)
-    
-    try:
-        remove_standby_from_catalog(options, array)
-    except Exception, ex:
-        logger.error('Failed to remove standby master from catalog.')
-        raise GpInitStandbyException(ex)
-    
-    #repopulate flat file
-    pg_system_fs_entries = GetFilespaceEntriesDict(GetFilespaceEntries(array, PG_SYSTEM_FILESPACE).run()).run() 
-    flat_file_location = os.path.join(pg_system_fs_entries[1][2], GP_TRANSACTION_FILES_FILESPACE) 
-    remove_standby_from_flat_file(flat_file_location, GP_TRANSACTION_FILES_FILESPACE, array.standbyMaster)
-    flat_file_location = os.path.join(pg_system_fs_entries[1][2], GP_TEMPORARY_FILES_FILESPACE) 
-    remove_standby_from_flat_file(flat_file_location, GP_TEMPORARY_FILES_FILESPACE, array.standbyMaster)
-
-    if not options.no_restart:
-        start_database()
-	
-    # check if syncmaster running on standby
-    try:
-        gpsyncmaster_pid = gp.getSyncmasterPID(array.standbyMaster.getSegmentHostName(),
-                                               array.standbyMaster.getSegmentDataDirectory())
-        if gpsyncmaster_pid > 0:
-            # stop it
-            logger.info('Stopping gpsyncmaster on %s' %
-                        array.standbyMaster.getSegmentHostName())
-            gp.SegmentStop.remote('stop gpsyncmaster',
-                                  array.standbyMaster.getSegmentHostName(),
-                                  array.standbyMaster.getSegmentDataDirectory())
-    except Exception, ex:
-        logger.error('Failed to stop gpsyncmaster process on standby master.')
-        raise GpInitStandbyException(ex)
- 
-	# delete temporary directories
-    remove_temporary_directories(array.standbyMaster.getSegmentHostName(),
-                                 array.standbyMaster.getSegmentDataDirectory())
-
-    # delete directory
-    remove_standby_filespace_dirs(array)
-
-    # Reenable Ctrl-C
-    signal.signal(signal.SIGINT,signal.default_int_handler)
-  
-#-------------------------------------------------------------------------
-def remove_standby_filespace_dirs(array):
-    """Removes the filespace directories on the standby master."""
-    
-    if array.standbyMaster:
-        logger.info('Removing filespace directories on standby master...')
-       
-        fs_dirs = array.standbyMaster.getSegmentFilespaces().values()
-        
-        pool = base.WorkerPool(numWorkers=DEFAULT_BATCH_SIZE)
-        
-        for fs_dir in fs_dirs:
-            cmd = unix.RemoveFiles('delete standby filespace dir',
-                                   fs_dir, ctxt=base.REMOTE,
-                                   remoteHost=array.standbyMaster.getSegmentHostName())
-            pool.addCommand(cmd)
-        
-        pool.join()
-        try:
-            pool.check_results()
-        except Exception, ex:
-            logger.error('Failed to remove filespace directories on standby master.')
-            raise GpInitStandbyException(ex)
-        finally:
-            pool.haltWork()
-            
-
-
-#-------------------------------------------------------------------------
-def create_standby(options):
-    """Creates the standby master."""
-    
-    global g_init_standby_state
-    
-    master_filespace_map = None
-    standby_filespace_map = None
-    array = None
-    conn = None
-    
-    # The mode the master was in when we started
-    # This is needed because when gpinitstandby is
-    # called by gpinitsystem the db is in master only
-    # mode and needs to remain that way. 
-    master_mode='production'
-    
-    try:
-        try:
-            dburl = getDbUrlForInitStandby()
-            array = gparray.GpArray.initFromCatalog(dburl, utility=True)
-            
-            # get list of master filespaces for later
-            conn = dbconn.connect(dburl, utility=True)
-            master_filespace_map = catalog.get_master_filespace_map(conn)
-            
-            # get standby filespace map
-            if not options.no_update:
-                # create new standby
-                standby_filespace_map = get_filespace_mappings(array, master_filespace_map)
-            else:
-                standby_filespace_map = catalog.get_standby_filespace_map(conn)
-        except Exception, ex:
-            logger.error('Failed to retrieve configuration information from the master.')
-            raise GpInitStandbyException(ex)
-        finally:
-            if conn:
-                conn.close()
-
-        # Get the mode the master is in so we can restore it to that mode
-        cmd = unix.FileContainsTerm('check mode', 'gp_role=utility',
-                                    array.master.getSegmentDataDirectory() + '/postmaster.opts')
-        cmd.run(validateAfter=False)
-        
-        if cmd.contains_term():
-            master_mode = 'utility'
-        else:
-            master_mode = 'production'
-        
-        # validate
-        validate_standby_init(options, array, standby_filespace_map)
-        
-        # display summary
-        print_summary(options, array, standby_filespace_map)
-        
-        # GPSQL does not support gppkg and package, and there is known issue for GPSQL rpm build:
-        # MPP-15568 and GPSQL-99.
-        #
-        # sync packages
-        # The design decision here is to squash any exceptions resulting from the 
-        # synchronization of packages. We should *not* disturb the user's attempts 
-        # initialize a standby.
-        # try:
-        #    logger.info('Syncing Greenplum Database extensions to standby')
-        #    SyncPackages(options.standby_host).run()
-        # except Exception, e:
-        #    logger.exception('Syncing of Greenplum Database extensions has failed.')
-        #    logger.warning('Please run gppkg --clean after successful standby initialization.')
-
-        # Disable Ctrl-C
-        signal.signal(signal.SIGINT,signal.SIG_IGN)
-
-        if master_mode == 'utility':
-            stop_master()
-        else:
-            stop_database(options)
-    
-        # update the catalog if needed
-        if not options.no_update:
-            update_pg_hba_conf(options, array)
-            array = add_standby_to_catalog(options,
-                                           standby_filespace_map)
-        else:
-            logger.info('-n option given, skipping catalog update')
- 
-        copy_master_filespaces_to_standby(options, array,
-                                          master_filespace_map,
-                                          standby_filespace_map)
-        update_gpdbid_file(options, array)
-        update_flat_file(array, standby_filespace_map, GP_TRANSACTION_FILES_FILESPACE)
-        update_flat_file(array, standby_filespace_map, GP_TEMPORARY_FILES_FILESPACE)
-        # no need to recreate the temporaries directory
-        if not options.no_update:
-            create_temporary_directories(array.standbyMaster.getSegmentHostName(),
-                                         array.standbyMaster.getSegmentDataDirectory())
-        cleanup_pg_hba_conf_backup(array)
-
-        if master_mode == 'utility':
-            start_master()
-        else:
-            start_database()
-    
-        # Reenable Ctrl-C
-        signal.signal(signal.SIGINT,signal.default_int_handler)
-    except Exception, ex:
-        # Something went wrong.  Based on the current state, we can rollback
-        # the operation.
-        logger.info('Trying to rollback changes that have been made...')
-        if g_init_standby_state == INIT_STANDBY_STATE_NOT_STARTED:
-            # nothing to rollback
-            pass
-        elif g_init_standby_state == INIT_STANDBY_STATE_UPDATE_CATALOG:
-            undo_catalog_update(options, array)
-        elif g_init_standby_state == INIT_STANDBY_STATE_COPY_FILES or \
-             g_init_standby_state == INIT_STANDBY_STATE_UPDATE_GPDBID:
-            undo_update_pg_hba_conf(array)
-            undo_catalog_update(options, array)
-            undo_file_copy(options, array)
-        # at this point we are back at the original state so we'll start up
-        # the database.
-        if g_init_standby_state != INIT_STANDBY_STATE_NOT_STARTED:
-            start_database()
-        raise GpInitStandbyException(ex)
-            
-            
-#-------------------------------------------------------------------------
-def update_pg_hba_conf(options, array):
-    """Updates the pg_hba.conf file to include the ip addresses of the
-    standby master."""
-    
-    logger.info('Updating pg_hba.conf file...')
-    try:
-        master_data_dir = array.master.getSegmentDataDirectory()
-        standby_ips = unix.InterfaceAddrs.remote('get standby ips', options.standby_host)
-        current_user = unix.UserId.local('get userid')
-        
-        # back it up
-        os.system('cp %s/pg_hba.conf %s/%s' \
-                  % (master_data_dir, master_data_dir, PG_HBA_BACKUP))
-        
-        # read in current pg_hba.conf file
-        fp = open(master_data_dir + '/pg_hba.conf', 'r')
-        pg_hba_conf = fp.readlines()
-        fp.close()
-        
-        # Find where the comments stop
-        index = 0
-        while pg_hba_conf[index].strip().startswith('#'):
-            index += 1
-        
-        new_section = ['# standby master host ip addresses\n']
-        for ip in standby_ips:
-            cidr_suffix = '/128' if ':' in ip else '/32' # MPP-15889
-            new_section.append('host\tall\t%s\t%s%s\ttrust\n' % (current_user, ip, cidr_suffix))
-            
-        # insert new section
-        pg_hba_conf[index:index] = new_section
-        
-        # write it out
-        fp = open(array.master.getSegmentDataDirectory() + '/pg_hba.conf', 'w')
-        fp.writelines(pg_hba_conf)
-        fp.close()
-        
-    except Exception, ex:
-        logger.error('Failed to update pg_hba.conf file on master.')
-        raise GpInitStandbyException(ex)
-
-
-#-------------------------------------------------------------------------
-def cleanup_pg_hba_conf_backup(array):
-    """Removes the pg_hba.conf backup."""
-    
-    logger.info('Removing pg_hba.conf backup...')
-    master_data_dir = array.master.getSegmentDataDirectory()
-    standby_data_dir = array.standbyMaster.getSegmentDataDirectory()
-    
-    try:
-        unix.RemoveFiles.local('cleanup master pg_hba.conf backup', '%s/%s' % (master_data_dir, PG_HBA_BACKUP))
-        unix.RemoveFiles.remote('cleanup standby pg_hba.conf backup',
-                                array.standbyMaster.getSegmentHostName(),
-                                '%s/%s' % (standby_data_dir, PG_HBA_BACKUP))
-    except:
-        # ignore...
-        pass
-    
-
-#-------------------------------------------------------------------------
-def validate_standby_init(options, array, standby_filespace_map):
-    """Validates the parameters and environment."""
-    
-    logger.info('Validating environment and parameters for standby initialization...')
-    if array.standbyMaster and not options.no_update:
-        logger.error('Standby master already configured')
-        logger.info('If you want to resync the standby master, use the -n option')
-        raise GpInitStandbyException('standby master already configured')
-    
-    if options.no_update:
-        if array.standbyMaster:
-            options.standby_host = array.standbyMaster.getSegmentHostName()
-        else:
-            logger.error('Cannot use -n option when standby master has not yet been configured')
-            raise GpInitStandbyException('Standby master not configured')
-        
-    # make sure we have top level dirs
-    for fs_name, fs_dir in standby_filespace_map:
-        base_dir = os.path.dirname(os.path.normpath(fs_dir))
-        # In GPSQL, user should not maintain the master local path anymore.
-        unix.MakeDirectory.remote('make dir for ' + str(fs_name), options.standby_host, base_dir)
-
-        if not unix.FileDirExists.remote('check for filespace dir',
-                                         options.standby_host,
-                                         base_dir):
-            logger.error('Parent directory %s does not exist on host %s' %(base_dir, options.standby_host))
-            logger.error('This directory must be created before running gpactivatestandby')
-            raise GpInitStandbyException('Parent directory %s does not exist' % base_dir)
-
-        # check that master data dir does not exist on new host unless we are just re-syncing
-        logger.info('Checking for filespace directory %s on %s' % (fs_dir, options.standby_host))
-        if not options.no_update and unix.FileDirExists.remote('check for filespace dir', options.standby_host,
-                                                               fs_dir):
-            logger.error('Filespace directory already exists on host %s' % options.standby_host)
-            if array.standbyMaster:
-                logger.error('If you want to just sync the data directory, use the -n option')
-            raise GpInitStandbyException('master data directory exists')
-
-
-#-------------------------------------------------------------------------
-def get_add_standby_sql(hostname, address, filespaces):
-    """Returns the SQL for adding a standby master."""
-    
-    sql = "select gp_add_master_standby('%s', '%s', '%s')" % (hostname,
-                                                              address,
-                                                              filespaces[0][1])
-    return sql
-
-
-#-------------------------------------------------------------------------
-def get_remove_standby_sql():
-    """Returns the SQL for removing a standby master."""
-
-    sql = "select gp_remove_master_standby()"
-    return sql
-
-
-#-------------------------------------------------------------------------
-def filespace_map_to_string(filespace_map):
-    """Converts the filespace map into a postgres array string."""
-    
-    filespace_map_str = "{"
-    for item in filespace_map:
-        filespace_map_str += '{"%s","%s"},' % (item[0], item[1])
-    filespace_map_str =  filespace_map_str.rstrip(',') + "}"
-    return filespace_map_str
-
-    
-#-------------------------------------------------------------------------
-def get_filespace_mappings(array, master_filespace_map):
-    """Asks user for the mapping from master filespaces -> standby master.  
-    master_filespace_map should be a 2d array of:
-    [ ['master_fs1name', 'master_fs1path'],
-    ['master_fs2name', 'master_fs2path'],
-    ...
-    ['master_fsnname', 'master_fsnpath'] ]"""
-    
-    standby_filespace_map = []
-    tmp_validator = lambda str, default, ignore1: str if str and str != '' else default
-    
-    if len(master_filespace_map) > 1:
-        print """The filespace locations on the master must be mapped to
-locations on the standby.  These locations must be empty on the
-standby master host.  The default provided is the location of
-the filespace on the master.  In most cases the defaults can be
-used.  The exception is the pg_system filespace which must be in
-the same location on both the master and standby master.
-    """
-    
-    for item in master_filespace_map:
-        if item[0] != 'pg_system':
-            continue
-
-        fs_loc = item[1]
-        # fs_loc = ask_input(None,'Enter standby filespace location for filespace %s (default: %s)' % (item[0], item[1]), '',
-        #                   item[1], tmp_validator, None)
-        if not os.path.isabs(fs_loc):
-            raise GpInitStandbyException('Filespace paths must be absolute paths.  %s is a relative path.' % fs_loc)
-        standby_filespace_map.append([item[0], fs_loc])
-    
-    return standby_filespace_map
-
-#-------------------------------------------------------------------------
-def add_standby_to_catalog(options, standby_filespace_map):
-    """Adds the standby to the catalog."""
-    
-    global g_init_standby_state
-    
-    try:
-        g_init_standby_state=INIT_STANDBY_STATE_UPDATE_CATALOG
-        start_master()
-        dburl = getDbUrlForInitStandby()
-        conn = dbconn.connect(dburl, utility=True)
-    
-        logger.info('Adding standby master to catalog...')
-    
-        sql = get_add_standby_sql(options.standby_host, options.standby_host,
-                                  standby_filespace_map)
-    
-        dbconn.execSQL(conn, sql)
-        conn.commit()
-        conn.close()
-        logger.info('Database catalog updated successfully.')
-        array = gparray.GpArray.initFromCatalog(dburl, utility=True)
-        stop_master()
-
-        # MPP-13245, store the new standby_dbid in the gp_dbid file
-        d = GpDbidFile( array.master.getSegmentDataDirectory(), do_read=True, logger=get_logger_if_verbose() )
-        d.standby_dbid = int(array.standbyMaster.getSegmentDbId())
-        d.write_gp_dbid()
-
-        return array
-    except Exception, ex:
-        logger.error('Failed to add standby to master catalog.')
-        raise GpInitStandbyException(ex)
-
-
-#-------------------------------------------------------------------------  
-def remove_standby_from_catalog(options, array):
-    """Removes the standby from the catalog."""
-    # update catalog
-    try:
-        start_master()
-        dburl = getDbUrlForInitStandby()
-        conn = dbconn.connect(dburl, utility=True)
-
-        logger.info('Removing standby master from catalog...')
-        sql = get_remove_standby_sql()
-        
-        dbconn.execSQL(conn, sql)
-        conn.commit()
-        conn.close()
-        
-        logger.info('Database catalog updated successfully.')
-        stop_master()
-
-        # MPP-13245, remove the standby_dbid from the gp_dbid file
-        d = GpDbidFile( array.master.getSegmentDataDirectory(), do_read=True, logger=get_logger_if_verbose() )
-        d.standby_dbid = None
-        d.write_gp_dbid()
-
-    except Exception, ex:
-        logger.error('Failed to remove standby from master catalog.')
-        stop_master()
-        raise GpInitStandbyException(ex)
-        
-
-#-------------------------------------------------------------------------   
-def copy_master_filespaces_to_standby(options, array, master_filespace_map, standby_filespace_map):
-    """Copies the filespaces from the master to the standby according to
-    the maps provided."""
-    
-    global g_init_standby_state
-
-    g_init_standby_state=INIT_STANDBY_STATE_COPY_FILES
-    #maps -> dicts
-    master_fs_dict = {}
-    standby_fs_dict = {}
-    for i in master_filespace_map:
-        master_fs_dict[i[0]] = i[1]
-    for i in standby_filespace_map:
-        standby_fs_dict[i[0]] = i[1]
-
-    # The worker pool for the copies
-    pool = base.WorkerPool(numWorkers=DEFAULT_BATCH_SIZE)
-
-    # need to make sure file spaces are sync'd
-    for fs_name, fs_dir in master_fs_dict.iteritems():
-        cwd = os.getcwd()
-        os.chdir(fs_dir)
-        logger.info('Forcing changed blocks to disk for filespace %s...' % fs_dir)
-        os.system('sync')
-        os.chdir(cwd)
-    
-        # resolve the mapping
-        standby_fs_dir = standby_fs_dict[fs_name]
-    
-        # create the directory
-        if not unix.FileDirExists.remote('check dir', options.standby_host, standby_fs_dir):
-            logger.info('Filespace directory does not exist on %s' % options.standby_host)
-            logger.info('Creating %s:%s' % (options.standby_host, standby_fs_dir))
-            unix.MakeDirectory.remote('create dir', options.standby_host, standby_fs_dir)
-            unix.Chmod.remote('chmod', options.standby_host, standby_fs_dir, "0700")
-        
-        # Do the copy using pysync
-        logger.info('Copying filespace directory to %s' % options.standby_host)
-            
-        exclude_dirs = ['gpperfmon/data', 'pg_log', 'db_dumps']
-        pysync_options = '-x ' + ' -x '.join(exclude_dirs)
-        
-        if options.debug:
-            pysync_options = pysync_options + ' -v'
-            
-        cmd = gp.PySync('master data dir sync', fs_dir,
-                     options.standby_host, standby_fs_dir,
-                     options=pysync_options)
-        pool.addCommand(cmd)
-
-    pool.join()
-    try:
-        pool.check_results()
-    except Exception, ex:
-        logger.error('Failed to copy filespace directories from master to standby.')
-        raise GpInitStandbyException(ex)
-    finally:
-        pool.haltWork()
-        
-
-#-------------------------------------------------------------------------
-def update_gpdbid_file(options, array):
-    """Updates the gp_dbid file on the standby master to reflect the correct dbid."""
-    global g_init_standby_state
-    
-    g_init_standby_state = INIT_STANDBY_STATE_UPDATE_GPDBID
-    
-    standby_dbid = array.standbyMaster.getSegmentDbId()
-    standby_datadir = os.path.normpath(array.standbyMaster.getSegmentDataDirectory())
-    try:
-        # MPP-13245, use single mechanism to manage gp_dbid file instead of ad-hoc replace
-        cmd = gp.GpCreateDBIdFile('update gp_dbid file',
-                                  standby_datadir,
-                                  standby_dbid,
-                                  verbose=logging_is_verbose(),
-                                  ctxt=base.REMOTE,
-                                  remoteHost=options.standby_host)
-
-        cmd.run(validateAfter=True)
-    except Exception, ex:
-        logger.error('Failed to update standby master\'s gp_dbid file.')
-        raise GpInitStandbyException(ex)
-
-#-------------------------------------------------------------------------
-def write_temp_flat_file(flat_file_location, flat_file, array, standby_filespace_dict):
-
-    standby_master = array.standbyMaster
-    master = array.master
-
-    temp_file = None
-    if os.path.exists(flat_file_location):
-        logger.info('Writing standby information to %s flat file' % flat_file_location)
-        temp_file = flat_file + '.tmp'
-        lines_to_write = ""
-
-        #Read data
-        with open(flat_file_location, 'r') as read_file:
-            for line in read_file:
-                fs_info = line.split()
-                if len(fs_info) != 2:
-                    fs_oid = fs_info[0]
-                    lines_to_write += line.strip()
-                else:
-                    fs_dir = fs_info[1]
-                    fs_info[0] = str(standby_master.getSegmentDbId())
-                    fs_info[1] = standby_filespace_dict[array.getFileSpaceName(int(fs_oid))] 
-                    lines_to_write += ' '.join(fs_info)
-
-                lines_to_write += '\n'
-            #We now write the peer information
-            lines_to_write +=( str(master.getSegmentDbId()) + ' ' + fs_dir + '\n' )
-
-        #Write data
-        with open(temp_file, 'w') as write_file:
-            write_file.write(lines_to_write)   
-
-        #Check what we've written
-        with open(temp_file) as file:
-            contents = file.read()
-            if contents != lines_to_write:
-                raise MoveFilespaceError('Failed to write contents to flat file.')
-
-    return temp_file
-
-#-------------------------------------------------------------------------
-def remove_standby_from_flat_file(flat_file_location, flat_file, standby_master):
-
-    if os.path.exists(flat_file_location):
-        logger.info('Removing standby entry from %s flat file' % flat_file)    
-        temp_file = flat_file + '.tmp'
-        lines_to_write = ""
-        with open(temp_file, 'w') as write_file:
-            with open(flat_file_location, 'r') as read_file:
-                for line in read_file:
-                    fs_info = line.split()
-                    if fs_info[0] == str(standby_master.getSegmentDbId()): 
-                        continue
-                    else:
-                        lines_to_write += line
-        
-            write_file.write(lines_to_write)                
-            logger.debug('Wrote %s to %s' % (lines_to_write, temp_file))
-
-        #Check what we've written
-        with open(temp_file) as file:
-            contents = file.read()
-            if contents != lines_to_write:
-                raise MoveFilespaceError('Failed to write contents to flat file.')
-        
-        shutil.move(temp_file, flat_file_location)
-
-#-------------------------------------------------------------------------
-def update_flat_file(array, standby_filespace_map, flat_file):
-    """
-        If the transaction/temporary filespaces have
-        ever been moved, we need to update the flat file.
-        The filespace directories are copied by the 
-        copy_master_filespaces method.
-    """
-
-    logger.info('Updating filespace flat files')    
-
-    standby_filespace_dict = {}
-    for i in standby_filespace_map:
-        standby_filespace_dict[i[0]] = i[1]
-
-    pg_system_fs_entries = GetFilespaceEntriesDict(GetFilespaceEntries(array, PG_SYSTEM_FILESPACE).run()).run() 
-   
-    flat_file_location = os.path.join(pg_system_fs_entries[1][2], flat_file) 
-    logger.debug('flat file location for transaction files = %s' % flat_file_location)
-    #Copy over the updated flat file to the standby
-    temp_flat_file = write_temp_flat_file(flat_file_location, flat_file, 
-                                          array, standby_filespace_dict)
-    
-    if temp_flat_file:
-        cpCmd = unix.Scp('gpinitstandby updating flat file for transaction filespace', 
-                         os.path.join(os.getcwd(), temp_flat_file),
-                         flat_file_location,
-                         dstHost=array.standbyMaster.getSegmentHostName()    
-                        )
-        cpCmd.run(validateAfter=True)
-        logger.debug('results of scp = %s' % cpCmd.get_results())
-       
-        #Rewrite the master flat file to include the standby information 
-        shutil.move(temp_flat_file, flat_file_location)
-    
-#-------------------------------------------------------------------------
-# Rollback functions
-#-------------------------------------------------------------------------
-
-def undo_catalog_update(options, array):
-    """Undoes the catalog updates."""
-    
-    # See if we can connect to master
-    conn = None
-    try:
-        dburl = getDbUrlForInitStandby()
-        conn = dbconn.connect(dburl, utility=True)
-        stop_master()
-    except:
-        pass
-    finally:
-        if conn:
-            conn.close()
-    
-    try:
-        remove_standby_from_catalog(options, array)
-    except:
-        # Can't undo because the update never occured.  Ok to 
-        # ignore this exception and continue
-        pass
-        
-
-#-------------------------------------------------------------------------
-def undo_file_copy(options, array):
-    """Undoes the filespace copy."""
-    
-    try:
-        remove_standby_filespace_dirs(array)
-    except Exception, ex:
-        # Just log a warning here.
-        logger.warn('There was an error trying to cleanup the filespace')
-        logger.warn('directories on the standby host %s' % options.standby_host)
-
-
-#-------------------------------------------------------------------------
-def undo_update_pg_hba_conf(array):
-    """Undoes the pg_hba.conf update."""
-    
-    logger.info('Backing up pg_hba.conf file...')
-    master_data_dir = array.master.getSegmentDataDirectory()
-    os.system('mv %s/%s %s/pg_hba.conf' % (master_data_dir, PG_HBA_BACKUP, master_data_dir))
-
-#-------------------------------------------------------------------------
-# Main
-#-------------------------------------------------------------------------
-try:
-    # setup logging
-    logger = get_default_logger()
-    
-    (options, args) = parseargs()
-    
-    setup_tool_logging(EXECNAME,unix.getLocalHostname(),unix.getUserName(),options.logfile)
-
-    # Turn on debug logging if needed
-    if options.debug:
-        enable_verbose_logging()
-    if options.quiet:
-        quiet_stdout_logging()
-
-    # Kick off the work
-    if options.remove:
-        delete_standby(options)
-        logger.info('Successfully removed standby master')
-    else:
-        create_standby(options)
-        if options.no_update:
-            logger.info('Successfully syncronized standby master.')
-        else:
-            logger.info('Successfully created standby master on %s' % options.standby_host)
-
-except KeyboardInterrupt:
-    logger.error('User canceled')
-    sys.exit(2)
-except Exception, ex:
-    if options.remove:
-        logger.error('Error removing standby master: %s' % str(ex))
-    else:
-        logger.error('Error initializing standby master: %s' % str(ex))
-    if options.debug:
-        logger.exception(ex)
-    sys.exit(2)
-