You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hawq.apache.org by hu...@apache.org on 2015/12/14 03:19:37 UTC

[2/2] incubator-hawq git commit: HAWQ-247. Remove deprecated GUCs related to FTS

HAWQ-247. Remove deprecated GUCs related to FTS


Project: http://git-wip-us.apache.org/repos/asf/incubator-hawq/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-hawq/commit/c90ba7cf
Tree: http://git-wip-us.apache.org/repos/asf/incubator-hawq/tree/c90ba7cf
Diff: http://git-wip-us.apache.org/repos/asf/incubator-hawq/diff/c90ba7cf

Branch: refs/heads/master
Commit: c90ba7cf744b7501114f946075fff81ab8cbbe32
Parents: 1500475
Author: Ruilong Huo <rh...@pivotal.io>
Authored: Fri Dec 11 02:01:55 2015 -0800
Committer: Ruilong Huo <rh...@pivotal.io>
Committed: Mon Dec 14 10:19:25 2015 +0800

----------------------------------------------------------------------
 src/backend/catalog/gp_toolkit.sql.in           |   2 +-
 src/backend/cdb/cdbvars.c                       |  57 --
 src/backend/utils/misc/guc.c                    |  51 --
 src/include/cdb/cdbvars.h                       |  21 -
 .../data/upgrade41/catalog40/toolkit.sql        |   2 +-
 src/test/unit/mock/mock_info.json               |  36 -
 tools/bin/gppylib/commands/gp.py                | 402 +--------
 tools/bin/gppylib/gp_contentnum.py              | 160 ----
 tools/bin/gppylib/gp_dbid.py                    | 174 ----
 tools/bin/gppylib/gp_era.py                     |   2 +-
 tools/bin/gppylib/gparray.py                    |   3 -
 .../gppylib/operations/buildMirrorSegments.py   | 860 -------------------
 tools/bin/gppylib/system/faultProberImplGpdb.py |  93 --
 tools/bin/lib/gp_bash_functions.sh              |  36 -
 tools/bin/lib/gpconfigurenewsegment             | 404 ---------
 15 files changed, 4 insertions(+), 2299 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/c90ba7cf/src/backend/catalog/gp_toolkit.sql.in
----------------------------------------------------------------------
diff --git a/src/backend/catalog/gp_toolkit.sql.in b/src/backend/catalog/gp_toolkit.sql.in
index 43e0ffb..abc3094 100644
--- a/src/backend/catalog/gp_toolkit.sql.in
+++ b/src/backend/catalog/gp_toolkit.sql.in
@@ -573,7 +573,7 @@ REVOKE ALL ON TABLE %%JETPACK_PREFIX%%log_command_timings FROM public;
 --	FROM 
 --		%%JETPACK_PREFIX%%param_settings()
 --	WHERE
---		paramname NOT IN ('config_file', 'data_directory', 'gp_contentid', 'gp_dbid', 'hba_file', 'ident_file', 'port')
+--		paramname NOT IN ('config_file', 'data_directory', 'hba_file', 'ident_file', 'port')
 --	GROUP BY 
 --		1,2
 --	HAVING 

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/c90ba7cf/src/backend/cdb/cdbvars.c
----------------------------------------------------------------------
diff --git a/src/backend/cdb/cdbvars.c b/src/backend/cdb/cdbvars.c
index 8a614db..fc0ff47 100644
--- a/src/backend/cdb/cdbvars.c
+++ b/src/backend/cdb/cdbvars.c
@@ -114,28 +114,6 @@ bool          gp_select_invisible=false; /* debug mode to allow select to see "i
 int     gp_snapshotadd_timeout=10;
 
 /*
- * Probe retry count for fts prober.
- */
-int			gp_fts_probe_retries = 5;
-
-/*
- * Probe timeout for fts prober.
- */
-int			gp_fts_probe_timeout = 20;
-
-/*
- * Polling interval for the fts prober. A scan of the entire system starts
- * every time this expires.
- */
-int			gp_fts_probe_interval=60;
-
-/*
- * Number of threads to use for probe of segments (it is a good idea to have this
- * larger than the number of segments per host.
- */
-int			gp_fts_probe_threadcount=16;
-
-/*
  * gp_enable_delete_as_truncate
  *
  * piggy-back a truncate on simple delete statements (statements
@@ -787,20 +765,6 @@ show_gp_connections_per_thread(void)
 GpVars_Verbosity   gp_log_gang;
 
 /*
- * gp_log_fts (string)
- *
- * What kind of messages should the fault-prober log ?
- * "OFF"     -> only errors are logged
- * "TERSE"   -> terse logging of routine events
- * "VERBOSE" -> gang allocation per command is logged
- * "DEBUG"   -> additional events are logged at severity level DEBUG1 to DEBUG5
- *
- * The messages that are enabled by the TERSE and VERBOSE settings are
- * written with a severity level of LOG.
- */
-GpVars_Verbosity   gp_log_fts;
-
-/*
  * gp_log_interconnect (string)
  *
  * Should connections between internal processes be logged?  (qDisp/qExec/etc)
@@ -881,27 +845,6 @@ gpvars_show_gp_log_gang(void)
 	return gpvars_verbosity_to_string(gp_log_gang);
 }                               /* gpvars_show_gp_log_gangs */
 
-/*
- * gpvars_assign_gp_log_fts
- * gpvars_show_gp_log_fts
- */
-const char *
-gpvars_assign_gp_log_fts(const char *newval, bool doit, GucSource source __attribute__((unused)) )
-{
-	GpVars_Verbosity v = gpvars_string_to_verbosity(newval);
-
-	if (v == GPVARS_VERBOSITY_UNDEFINED)
-        return NULL;
-	if (doit)
-		gp_log_fts = v;
-	return newval;
-}                               /* gpvars_assign_gp_log_fts */
-
-const char *
-gpvars_show_gp_log_fts(void)
-{
-	return gpvars_verbosity_to_string(gp_log_fts);
-}                               /* gpvars_show_gp_log_fts */
 
 /*
  * gpvars_assign_gp_log_interconnect

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/c90ba7cf/src/backend/utils/misc/guc.c
----------------------------------------------------------------------
diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c
index 568759e..869d747 100644
--- a/src/backend/utils/misc/guc.c
+++ b/src/backend/utils/misc/guc.c
@@ -537,7 +537,6 @@ static int	block_size;
 static bool integer_datetimes;
 //static bool standard_conforming_strings;
 static char *gp_log_gang_str;
-static char *gp_log_fts_str;
 static char *gp_log_interconnect_str;
 static char *gp_interconnect_type_str;
 static char *gp_interconnect_fc_method_str;
@@ -5752,46 +5751,6 @@ static struct config_int ConfigureNamesInt[] =
 	},
 
 	{
-		{"gp_fts_probe_retries", PGC_POSTMASTER, GP_ARRAY_TUNING,
-			gettext_noop("Number of retries for FTS to complete probing a segment."),
-			gettext_noop("Used by the fts-probe process."),
-			GUC_NO_SHOW_ALL | GUC_NOT_IN_SAMPLE
-		},
-		&gp_fts_probe_retries,
-		5, 0, 100, NULL, NULL
-	},
-
-	{
-		{"gp_fts_probe_timeout", PGC_USERSET, GP_ARRAY_TUNING,
-			gettext_noop("Maximum time (in seconds) allowed for FTS to complete probing a segment."),
-			gettext_noop("Used by the fts-probe process."),
-			GUC_UNIT_S
-		},
-		&gp_fts_probe_timeout,
-		20, 0, INT_MAX, NULL, NULL
-	},
-
-	{
-		{"gp_fts_probe_interval", PGC_POSTMASTER, GP_ARRAY_TUNING,
-			gettext_noop("A complete probe of all segments starts each time a timer with this period expires."),
-			gettext_noop("Used by the fts-probe process. "),
-			GUC_UNIT_S
-		},
-		&gp_fts_probe_interval,
-		60, 10, INT_MAX, NULL, NULL
-	},
-
-	{
-		{"gp_fts_probe_threadcount", PGC_POSTMASTER, GP_ARRAY_TUNING,
-			gettext_noop("Use this number of threads for probing the segments."),
-			gettext_noop("The number of threads to create at each probe interval expiration."),
-			GUC_NOT_IN_SAMPLE
-		},
-		&gp_fts_probe_threadcount,
-		16, 1, 128, NULL, NULL
-	},
-
-	{
 		{"gp_session_id", PGC_BACKEND, CLIENT_CONN_OTHER,
 			gettext_noop("Global ID used to uniquely identify a particular session in an Greenplum Database array"),
 			NULL,
@@ -7732,16 +7691,6 @@ static struct config_string ConfigureNamesString[] =
 	},
 
 	{
-		{"gp_log_fts", PGC_POSTMASTER, LOGGING_WHAT,
-			gettext_noop("Sets the verbosity of logged messages pertaining to fault probing."),
-			gettext_noop("Valid values are \"off\", \"terse\", \"verbose\" and \"debug\"."),
-			GUC_NO_SHOW_ALL | GUC_NOT_IN_SAMPLE
-		},
-		&gp_log_fts_str,
-		"terse", gpvars_assign_gp_log_fts, gpvars_show_gp_log_fts
-	},
-
-	{
 		{"gp_log_interconnect", PGC_USERSET, LOGGING_WHAT,
 			gettext_noop("Sets the verbosity of logged messages pertaining to connections between worker processes."),
 			gettext_noop("Valid values are \"off\", \"terse\", \"verbose\" and \"debug\"."),

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/c90ba7cf/src/include/cdb/cdbvars.h
----------------------------------------------------------------------
diff --git a/src/include/cdb/cdbvars.h b/src/include/cdb/cdbvars.h
index 00206dd..158509e 100644
--- a/src/include/cdb/cdbvars.h
+++ b/src/include/cdb/cdbvars.h
@@ -396,10 +396,6 @@ extern const char *role_to_string(GpRoleValue role);
 extern int	gp_segment_connect_timeout; /* GUC var - timeout specifier for gang creation */
 extern int  gp_snapshotadd_timeout; /* GUC var - timeout specifier for snapshot-creation wait */
 
-extern int	gp_fts_probe_retries; /* GUC var - specifies probe number of retries for FTS */
-extern int	gp_fts_probe_timeout; /* GUC var - specifies probe timeout for FTS */
-extern int	gp_fts_probe_interval; /* GUC var - specifies polling interval for FTS */
-extern int	gp_fts_probe_threadcount; /* GUC var - specifies number of threads to use for FTS probes */
 extern bool	gp_fts_transition_parallel; /* GUC var - controls parallel segment transition for FTS */
 
 /*
@@ -601,23 +597,6 @@ const char *gpvars_assign_gp_log_gang(const char *newval, bool doit, GucSource s
 const char *gpvars_show_gp_log_gang(void);
 
 /*
- * gp_log_fts (string)
- *
- * What kind of messages should be logged by the fault-prober
- * "OFF"     -> only errors are logged
- * "TERSE"   -> terse logging of routine events
- * "VERBOSE" -> more messages
- * "DEBUG"   -> additional events are logged at severity level DEBUG1 to DEBUG5
- *
- * The messages that are enabled by the TERSE and VERBOSE settings are
- * written with a severity level of LOG.
- */
-extern GpVars_Verbosity    gp_log_fts;
-
-const char *gpvars_assign_gp_log_fts(const char *newval, bool doit, GucSource source);
-const char *gpvars_show_gp_log_fts(void);
-
-/*
  * gp_log_interconnect (string)
  *
  * Should connections between internal processes be logged?  (qDisp/qExec/etc)

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/c90ba7cf/src/test/regress/data/upgrade41/catalog40/toolkit.sql
----------------------------------------------------------------------
diff --git a/src/test/regress/data/upgrade41/catalog40/toolkit.sql b/src/test/regress/data/upgrade41/catalog40/toolkit.sql
index 9685193..9dfd2f9 100644
--- a/src/test/regress/data/upgrade41/catalog40/toolkit.sql
+++ b/src/test/regress/data/upgrade41/catalog40/toolkit.sql
@@ -572,7 +572,7 @@ AS
     FROM 
         gp_toolkit.gp_param_settings()
     WHERE
-        paramname NOT IN ('config_file', 'data_directory', 'gp_contentid', 'gp_dbid', 'hba_file', 'ident_file', 'port')
+        paramname NOT IN ('config_file', 'data_directory', 'hba_file', 'ident_file', 'port')
     GROUP BY 
         1,2
     HAVING 

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/c90ba7cf/src/test/unit/mock/mock_info.json
----------------------------------------------------------------------
diff --git a/src/test/unit/mock/mock_info.json b/src/test/unit/mock/mock_info.json
index a45129c..2e02af1 100644
--- a/src/test/unit/mock/mock_info.json
+++ b/src/test/unit/mock/mock_info.json
@@ -11014,16 +11014,6 @@
             ], 
             "return": "char*"
         }, 
-        "gpvars_assign_gp_log_fts": {
-            "filename": "src/backend/cdb/cdbvars.c", 
-            "header filename": "src/include/cdb/cdbvars.h", 
-            "parameter": [
-                "newval", 
-                "doit", 
-                "source"
-            ], 
-            "return": "char*"
-        }, 
         "gpvars_assign_gp_log_gang": {
             "filename": "src/backend/cdb/cdbvars.c", 
             "header filename": "src/include/cdb/cdbvars.h", 
@@ -11082,12 +11072,6 @@
             "parameter": [], 
             "return": "char*"
         }, 
-        "gpvars_show_gp_log_fts": {
-            "filename": "src/backend/cdb/cdbvars.c", 
-            "header filename": "src/include/cdb/cdbvars.h", 
-            "parameter": [], 
-            "return": "char*"
-        }, 
         "gpvars_show_gp_log_gang": {
             "filename": "src/backend/cdb/cdbvars.c", 
             "header filename": "src/include/cdb/cdbvars.h", 
@@ -13940,26 +13924,10 @@
             "filename": "src/backend/cdb/cdbvars.c", 
             "header filename": "src/include/cdb/cdbvars.h"
         }, 
-        "gp_fts_probe_interval": {
-            "filename": "src/backend/cdb/cdbvars.c", 
-            "header filename": "src/include/cdb/cdbvars.h"
-        }, 
         "gp_fts_probe_pause": {
             "filename": "src/backend/cdb/cdbvars.c", 
             "header filename": "src/include/cdb/cdbvars.h"
         }, 
-        "gp_fts_probe_retries": {
-            "filename": "src/backend/cdb/cdbvars.c", 
-            "header filename": "src/include/cdb/cdbvars.h"
-        }, 
-        "gp_fts_probe_threadcount": {
-            "filename": "src/backend/cdb/cdbvars.c", 
-            "header filename": "src/include/cdb/cdbvars.h"
-        }, 
-        "gp_fts_probe_timeout": {
-            "filename": "src/backend/cdb/cdbvars.c", 
-            "header filename": "src/include/cdb/cdbvars.h"
-        }, 
         "gp_fts_transition_parallel": {
             "filename": "src/backend/cdb/cdbvars.c", 
             "header filename": "src/include/cdb/cdbvars.h"
@@ -14096,10 +14064,6 @@
             "filename": "src/backend/utils/misc/guc.c", 
             "header filename": "src/include/utils/guc.h"
         }, 
-        "gp_log_fts": {
-            "filename": "src/backend/cdb/cdbvars.c", 
-            "header filename": "src/include/cdb/cdbvars.h"
-        }, 
         "gp_log_gang": {
             "filename": "src/backend/cdb/cdbvars.c", 
             "header filename": "src/include/cdb/cdbvars.h"

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/c90ba7cf/tools/bin/gppylib/commands/gp.py
----------------------------------------------------------------------
diff --git a/tools/bin/gppylib/commands/gp.py b/tools/bin/gppylib/commands/gp.py
index 9eae460..e2a5a9a 100644
--- a/tools/bin/gppylib/commands/gp.py
+++ b/tools/bin/gppylib/commands/gp.py
@@ -60,17 +60,6 @@ def getSyncmasterPID(hostname, datadir):
     except:
         return -1
 
-def get_max_dbid(name,conn):
-    try:
-        curs=conn.cursor()
-        curs.execute("SELECT max(dbid) FROM gp_configuration")
-        rows = curs.fetchall()
-        if len(rows) != 1:
-            raise Exception, 'Failed to retrieve maximum dbid from catalog'
-        return rows[0][0]
-    finally:
-        curs.close()
-
 #-----------------------------------------------
 class PySync(Command):
     def __init__(self,name,srcDir,dstHost,dstDir,ctxt=LOCAL,remoteHost=None, options=None):
@@ -158,101 +147,6 @@ class CmdArgs(list):
 
 
 
-class PgCtlBackendOptions(CmdArgs):
-    """
-    List of options suitable for use with the -o option of pg_ctl.
-    Used by MasterStart, SegmentStart to format the backend options
-    string passed via pg_ctl -o
-
-    Examples
-    --------
-
-    >>> str(PgCtlBackendOptions(5432, 1, 2))
-    '-p 5432 -b 1 -z 2 --silent-mode=true'
-    >>> str(PgCtlBackendOptions(5432, 1, 2).set_master(2, False, False))
-    '-p 5432 -b 1 -z 2 --silent-mode=true -i -M master -C -1 -x 2'
-    >>> str(PgCtlBackendOptions(5432, 1, 2).set_master(2, False, True))
-    '-p 5432 -b 1 -z 2 --silent-mode=true -i -M master -C -1 -x 2 -E'
-    >>> str(PgCtlBackendOptions(5432, 1, 2).set_segment('mirror', 1))
-    '-p 5432 -b 1 -z 2 --silent-mode=true -i -M mirror -C 1'
-    >>> str(PgCtlBackendOptions(5432, 1, 2).set_special('upgrade'))
-    '-p 5432 -b 1 -z 2 --silent-mode=true -U'
-    >>> str(PgCtlBackendOptions(5432, 1, 2).set_special('maintenance'))
-    '-p 5432 -b 1 -z 2 --silent-mode=true -m'
-    >>> str(PgCtlBackendOptions(5432, 1, 2).set_utility(True))
-    '-p 5432 -b 1 -z 2 --silent-mode=true -c gp_role=utility'
-    >>> str(PgCtlBackendOptions(5432, 1, 2).set_utility(False))
-    '-p 5432 -b 1 -z 2 --silent-mode=true'
-    >>> str(PgCtlBackendOptions(5432, 1, 2).set_restricted(True,1))
-    '-p 5432 -b 1 -z 2 --silent-mode=true -c superuser_reserved_connections=1'
-    >>>
-
-    """
-
-    def __init__(self, port, dbid, numcids):
-        """
-        @param port: backend port
-        @param dbid: backed dbid
-        @param numcids: total number of content ids in cluster
-        """
-        CmdArgs.__init__(self, [
-            "-p", str(port),
-            "-b", str(dbid),
-            "-z", str(numcids),
-            "--silent-mode=true"
-        ])
-
-    #
-    # master/segment-specific options
-    #
-
-    def set_master(self, standby_dbid, disable, seqserver):
-        """
-        @param standby_dbid: standby dbid
-        @param disable: start without master mirroring?
-        @param seqserver: start with seqserver?
-        """
-        self.extend(["-i", "-M", "master", "-C", "-1", "-x", str(standby_dbid)])
-        if disable: self.append("-y")
-        if seqserver: self.append("-E")
-        return self
-
-    def set_segment(self, mode, content):
-        """
-        @param mode: mirroring mode
-        @param content: content id
-        """
-        self.extend(["-i", "-M", str(mode), "-C", str(content)])
-        return self
-
-    #
-    # startup mode options
-    #
-
-    def set_special(self, special):
-        """
-        @param special: special mode (none, 'upgrade' or 'maintenance')
-        """
-        opt = {None:None, 'upgrade':'-U', 'maintenance':'-m'}[special]
-        if opt: self.append(opt)
-        return self
-
-    def set_utility(self, utility):
-        """
-        @param utility: true if starting in utility mode
-        """
-        if utility: self.append("-c gp_role=utility")
-        return self
-
-    def set_restricted(self, restricted, max_connections):
-        """
-        @param restricted: true if restricting connections
-        @param max_connections: connection limit
-        """
-        if restricted:  self.append("-c superuser_reserved_connections=%s" % max_connections)
-        return self
-
-
 class PgCtlStartArgs(CmdArgs):
     """
     Used by MasterStart, SegmentStart to format the pg_ctl command
@@ -320,40 +214,6 @@ class PgCtlStopArgs(CmdArgs):
         self.set_wait_timeout(wait, timeout)
         self.append("stop")
 
-
-class MasterStart(Command):
-    def __init__(self, name, dataDir, port, dbid, standby_dbid, numContentsInCluster, era,
-                 wrapper, wrapper_args, specialMode=None, restrictedMode=False, timeout=SEGMENT_TIMEOUT_DEFAULT,
-                 max_connections=1, disableMasterMirror=False, utilityMode=False, ctxt=LOCAL, remoteHost=None
-                 ):
-        self.dataDir=dataDir
-        self.port=port
-        self.utilityMode=utilityMode
-        self.wrapper=wrapper
-        self.wrapper_args=wrapper_args
-
-        # build backend options
-        b = PgCtlBackendOptions(port, dbid, numContentsInCluster)
-        b.set_master(standby_dbid, disableMasterMirror, seqserver=not utilityMode)
-        b.set_utility(utilityMode)
-        b.set_special(specialMode)
-        b.set_restricted(restrictedMode, max_connections)
-
-        # build pg_ctl command
-        c = PgCtlStartArgs(dataDir, b, era, wrapper, wrapper_args, True, timeout)
-        self.cmdStr = str(c)
-
-        Command.__init__(self, name, self.cmdStr, ctxt, remoteHost)
-
-    @staticmethod
-    def local(name, dataDir, port, dbid, standbydbid, numContentsInCluster, era,
-              wrapper, wrapper_args, specialMode=None, restrictedMode=False, timeout=SEGMENT_TIMEOUT_DEFAULT,
-              max_connections=1, disableMasterMirror=False, utilityMode=False):
-        cmd=MasterStart(name, dataDir, port, dbid, standbydbid, numContentsInCluster, era,
-                        wrapper, wrapper_args, specialMode, restrictedMode, timeout,
-                        max_connections, disableMasterMirror, utilityMode)
-        cmd.run(validateAfter=True)
-
 #-----------------------------------------------
 class MasterStop(Command):
     def __init__(self,name,dataDir,mode='smart',timeout=SEGMENT_TIMEOUT_DEFAULT, ctxt=LOCAL,remoteHost=None):
@@ -367,52 +227,6 @@ class MasterStop(Command):
         cmd.run(validateAfter=True)
 
 #-----------------------------------------------
-class SegmentStart(Command):
-    """
-    SegmentStart is used to start a single segment.
-
-    Note: Most code should probably use GpSegStartCmd instead which starts up
-    all of the segments on a specified GpHost.
-    """
-
-    def __init__(self, name, gpdb, numContentsInCluster, era, mirrormode,
-                 utilityMode=False, ctxt=LOCAL, remoteHost=None,
-                 noWait=False, timeout=SEGMENT_TIMEOUT_DEFAULT,
-                 specialMode=None, wrapper=None, wrapper_args=None):
-
-        # This is referenced from calling code
-        self.segment = gpdb
-
-        # Interesting data from our input segment
-        dbid    = gpdb.getSegmentDbId()
-        content = gpdb.getSegmentContentId()
-        port    = gpdb.getSegmentPort()
-        datadir = gpdb.getSegmentDataDirectory()
-
-        # build backend options
-        b = PgCtlBackendOptions(port, dbid, numContentsInCluster)
-        b.set_segment(mirrormode, content)
-        b.set_utility(utilityMode)
-        b.set_special(specialMode)
-
-        # build pg_ctl command
-        c = PgCtlStartArgs(datadir, b, era, wrapper, wrapper_args, not noWait, timeout)
-        self.cmdStr = str(c) + ' 2>&1'
-
-        Command.__init__(self, name, self.cmdStr, ctxt, remoteHost)
-
-    @staticmethod
-    def local(name, gpdb, numContentsInCluster, era, mirrormode, utilityMode=False):
-        cmd=SegmentStart(name, gpdb, numContentsInCluster, era, mirrormode, utilityMode)
-        cmd.run(validateAfter=True)
-
-    @staticmethod
-    def remote(name, remoteHost, gpdb, numContentsInCluster, era, mirrormode, utilityMode=False):
-        cmd=SegmentStart(name, gpdb, numContentsInCluster, era, mirrormode, utilityMode, ctxt=REMOTE, remoteHost=remoteHost)
-        cmd.run(validateAfter=True)
-
-
-#-----------------------------------------------
 class SendFilerepTransitionMessage(Command):
 
     # see gpmirrortransition.c and primary_mirror_transition_client.h
@@ -509,7 +323,7 @@ class SendFilerepVerifyMessage(Command):
 
     DEFAULT_IGNORE_FILES = [
         'pg_internal.init', 'pgstat.stat', 'pga_hba.conf',
-        'pg_ident.conf', 'pg_fsm.cache', 'gp_dbid', 'gp_pmtransitions_args',
+        'pg_ident.conf', 'pg_fsm.cache', 'gp_pmtransitions_args',
         'gp_dump', 'postgresql.conf', 'postmaster.log', 'postmaster.opts',
         'postmaser.pids', 'postgresql.conf.bak', 'core',  'wet_execute.tbl',
         'recovery.done', 'gp_temporary_files_filespace', 'gp_transaction_files_filespace']
@@ -665,51 +479,6 @@ class GpGetStatusUsingTransitionArgs(CmdArgs):
         self.set_segments(segments)
 
 
-class GpGetSegmentStatusValues(Command):
-    """
-    Fetch status values for segments on a host
-
-    Results will be a bin-hexed/pickled value that, when unpacked, will give a
-    two-level map:
-
-    outer-map maps from SEGMENT_STATUS__* value to inner-map
-    inner-map maps from dbid to result (which is usually a string, but may be different)
-
-    @param statusRequestArr an array of SEGMENT_STATUS__ constants
-    """
-    def __init__(self, name, segments, statusRequestArr, verbose=False, ctxt=LOCAL, remoteHost=None):
-
-        # clone the list
-        self.dblist = [x for x in segments]
-
-        # build gpgetstatususingtransition commadn
-        status_request = ":".join(statusRequestArr)
-        c = GpGetStatusUsingTransitionArgs(segments, status_request)
-        c.set_verbose(verbose)
-        cmdStr = str(c)
-
-        Command.__init__(self, name, cmdStr, ctxt, remoteHost)
-
-    def decodeResults(self):
-        """
-        return (warning,outputFromCmd) tuple, where if warning is None then
-           results were returned and outputFromCmd should be read.  Otherwise, the warning should
-           be logged and outputFromCmd ignored
-        """
-        if self.get_results().rc != 0:
-            return ("Error getting status from host %s" % self.remoteHost, None)
-
-        outputFromCmd = None
-        for line in self.get_results().stdout.split('\n'):
-            if line.startswith("STATUS_RESULTS:"):
-                toDecode = line[len("STATUS_RESULTS:"):]
-                outputFromCmd = pickle.loads(base64.urlsafe_b64decode(toDecode))
-                break
-        if outputFromCmd is None:
-            return ("No status output provided from host %s" % self.remoteHost, None)
-        return (None, outputFromCmd)
-
-
 SEGSTART_ERROR_UNKNOWN_ERROR = -1
 SEGSTART_SUCCESS = 0
 SEGSTART_ERROR_MIRRORING_FAILURE = 1
@@ -845,32 +614,6 @@ class GpSegStopCmd(Command):
 
         Command.__init__(self,name,self.cmdStr,ctxt,remoteHost)
 
-
-#-----------------------------------------------
-class GpStandbyStart(Command):
-    def __init__(self,name,datadir,port,ncontents,ctxt=LOCAL,
-                 remoteHost=None,dbid=None):
-        self.datadir=datadir
-        self.port=port
-        self.dbid=dbid
-        self.ncontents=ncontents
-        cmdStr="exec $GPHOME/sbin/gpstandbystart.sh %s %d %s %d" % \
-            (datadir,port,dbid,ncontents)
-        Command.__init__(self,name,cmdStr,ctxt,remoteHost)
-
-    @staticmethod
-    def remote(name,host,datadir,port,ncontents,dbid):
-        cmd=GpStandbyStart(name,datadir,port,ncontents,
-                           ctxt=REMOTE,remoteHost=host,dbid=dbid)
-        cmd.run(validateAfter=True)
-        return cmd
-
-    def validate(self, expected_rt=0):
-        if self.results.rc != 0:
-            raise ExecutionError("non-zero rc: %d" % self.results.rc, self)
-        if len(self.results.stderr) != 0 and (self.results.stderr.strip() != "FIPS mode initialized"):
-            raise ExecutionError("messages on stderr: %s" % self.results.stderr, self)
-
 #-----------------------------------------------
 class GpInitSystem(Command):
     def __init__(self,name,configFile,hostsFile, ctxt=LOCAL, remoteHost=None):
@@ -1052,89 +795,6 @@ class GpDumpDirsExist(Command):
         # This is > 1 because the command output will terminate with \n
         return dirCount > 1
 
-
-#-----------------------------------------------
-class ConfigureNewSegment(Command):
-    """
-    Configure a new segment, usually from a template, as is done during gpexpand, gpaddmirrors, gprecoverseg (full),
-      etc.
-    """
-
-    def __init__(self, name, confinfo, newSegments=False, tarFile=None,
-                 batchSize=None, verbose=False,ctxt=LOCAL, remoteHost=None, validationOnly=False, writeGpIdFileOnly=False):
-        cmdStr = '$GPHOME/bin/lib/gpconfigurenewsegment -c \"%s\"' % (confinfo)
-        if newSegments:
-            cmdStr += ' -n'
-        if tarFile:
-            cmdStr += ' -t %s' % tarFile
-        if verbose:
-            cmdStr += ' -v '
-        if batchSize:
-            cmdStr += ' -B %s' % batchSize
-        if validationOnly:
-            cmdStr += " --validation-only"
-        if writeGpIdFileOnly:
-            cmdStr += " --write-gpid-file-only"
-
-        Command.__init__(self, name, cmdStr, ctxt, remoteHost)
-
-    #-----------------------------------------------
-    @staticmethod
-    def buildSegmentInfoForNewSegment(segments, isTargetReusedLocationArr = None, primaryMirror = 'both'):
-        """
-        Build the new segment info that can be used to get the confinfo argument to pass to ConfigureNewSegment
-
-        @param segments list of segments
-
-        @param isTargetReusedLocationArr if not None, then is an array of boolean values in parallel with segments
-                                      True values indicate that the directory has been cleaned by gpcleansegmentdir.py
-                                      and we should have lighter restrictions on how to check it for emptiness
-                                      Passing None is the same as passing an array of all False values
-
-        @param primaryMirror Process 'primary' or 'mirror' or 'both'
-
-        @return A dictionary with the following format:
-
-                Name  =   <host name>
-                Value =   <system data directory>
-                        | <port>
-                        | if primary then 'true' else 'false'
-                        | if target is reused location then 'true' else 'false'
-                        | <segment dbid>
-						| <content id>
-                      [ | <filespace oid> + <file space directory> ]...
-
-        """
-        result = {}
-        for segIndex, seg in enumerate(segments):
-            if primaryMirror == 'primary' and seg.isSegmentPrimary() == False:
-               continue
-            elif primaryMirror == 'mirror' and seg.isSegmentPrimary() == True:
-               continue
-            hostname = seg.getSegmentHostName()
-            if result.has_key(hostname):
-                result[hostname] += ','
-            else:
-                result[hostname] = ''
-
-            isTargetReusedLocation = isTargetReusedLocationArr and isTargetReusedLocationArr[segIndex]
-
-            filespaces = []
-            for fsOid, path in seg.getSegmentFilespaces().iteritems():
-                if fsOid not in [gparray.SYSTEM_FILESPACE]:
-                    filespaces.append(str(fsOid) + "+" + path)
-
-            result[hostname] += '%s|%d|%s|%s|%s|%d%s' % (seg.getSegmentDataDirectory(), seg.getSegmentPort(),
-                        "true" if seg.isSegmentPrimary(current_role=True) else "false",
-                        "true" if isTargetReusedLocation else "false",
-                        seg.getSegmentDbId(),
-                        seg.getSegmentContentId(),
-                        "" if len(filespaces) == 0 else ("|" + "|".join(filespaces))
-            )
-        return result
-
-
-
 #-----------------------------------------------
 class GpVersion(Command):
     def __init__(self,name,gphome,ctxt=LOCAL,remoteHost=None):
@@ -1335,7 +995,6 @@ def check_permissions(username):
     chk_gpdb_id(username)
 
 
-
 ######
 def standby_check(master_datadir):
     logger.debug("---Make sure we aren't a passive standby master")
@@ -1343,35 +1002,6 @@ def standby_check(master_datadir):
         raise GpError('Cannot run this utility on the standby instance')
 
 
-
-#=-=-=-=-=-=-=-=-=-= Bash Migration Helper Functions =-=-=-=-=-=-=-=-
-
-def start_standbymaster(host,datadir,port,dbid,ncontents):
-    logger.info("Starting standby master")
-
-    logger.info("Checking if standby master is running on host: %s  in directory: %s" % (host,datadir))
-    res = recovery_startup(datadir)
-
-    if res:
-        logger.warning("Unable to cleanup previously started standby: '%s'" % res)
-
-    #create a pg_log directory if necessary
-    CreateDirIfNecessary.remote('create standby logdir if needed', host, datadir + "/pg_log")
-
-
-    cmd=GpStandbyStart.remote('start standby master',host,datadir,port,
-                              ncontents,dbid)
-    logger.debug("Starting standby: %s" % cmd )
-
-    logger.debug("Starting standby master results: %s" % cmd.get_results() )
-
-    if cmd.get_results().rc == 0:
-        return True
-    else:
-        return False
-
-    pass
-
 ######
 def recovery_startup(datadir):
     """ investigate a db that may still be running """
@@ -1580,36 +1210,6 @@ def createTempDirectoryName(masterDataDirectory, tempDirPrefix):
 
 
 #-------------------------------------------------------------------------
-# gp_dbid methods moved to gp_dbid.py, but this class was left here
-# to avoid changing gpmigrator and gpmigrator_mirror (which is the only caller).
-#
-
-class GpCreateDBIdFile(Command):
-    def __init__(self, name, directory, dbid, verbose=False, ctxt=LOCAL, remoteHost=None):
-        if verbose:
-            setverbose="-v"
-        else:
-            setverbose=""
-        args = [
-            "$GPHOME/sbin/gpsetdbid.py",
-            "-d %s" % directory,
-            "-i %s" % dbid,
-            setverbose,
-            ]
-        cmdStr = " ".join(args)
-        Command.__init__(self, name, cmdStr, ctxt, remoteHost)
-
-    @staticmethod
-    def local(name, directory, dbid):
-        cmd = GpCreateDBIdFile(name, directory, dbid)
-        cmd.run(validateAfter=True)
-
-    @staticmethod
-    def remote(name, remoteHost, directory, dbid):
-        cmd = GpCreateDBIdFile(name, directory, dbid, ctxt=REMOTE, remoteHost=remoteHost)
-        cmd.run(validateAfter=True)
-
-#-------------------------------------------------------------------------
 class GpRecoverSeg(Command):
    """
    This command will execute the gprecoverseg utility

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/c90ba7cf/tools/bin/gppylib/gp_contentnum.py
----------------------------------------------------------------------
diff --git a/tools/bin/gppylib/gp_contentnum.py b/tools/bin/gppylib/gp_contentnum.py
deleted file mode 100644
index 6e68df9..0000000
--- a/tools/bin/gppylib/gp_contentnum.py
+++ /dev/null
@@ -1,160 +0,0 @@
-#!/usr/bin/env python
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-# 
-#   http://www.apache.org/licenses/LICENSE-2.0
-# 
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-# Line too long - pylint: disable=C0301
-# Invalid name  - pylint: disable=C0103
-
-"""
-  gp_contentnum.py
-"""
-
-import re
-import os, stat
-
-class DummyLogger:
-    def info(self, msg):  pass
-    def debug(self, msg): pass
-
-CONTENTNUM_RE         = re.compile(r"content_num\s*=\s*(\d+)")
-
-class GpContentnumFile:
-    """
-    Used by gpstart, gpinitstandby, and gpactivatestandby to
-    manage the gp_contentnum file.
-    """
-
-    def __init__(self, datadir, do_read=False, logger=None):
-        """
-        Initialize path to gp_contentnum file and reset values.
-        Log subsequent activity using specified logger and
-        if do_read is True, immediately attempt to read values.
-        """
-        self.datadir      = datadir
-        self.logger       = logger or DummyLogger()
-        self.filepath     = os.path.join(self.datadir, 'gp_contentnum')
-        self.contentnum   = None
-
-        if do_read:
-            self.read_gp_contentnum()
-
-
-    def read_gp_contentnum(self):
-        """
-        Open the gp_contentnum file and parse its contents.
-        """
-        INFO = self.logger.info
-        INFO('%s - read_gp_contentnum' % self.filepath)
-
-        with open(self.filepath) as f:
-            self.parse(f)
-
-
-    def parse(self, f):
-        """
-        Parse f, looking for matching contentnum expressions and
-        ignoring all other lines.  Assigns contentnum to observed 
-        values, converting matched values from strings to integers.  
-        """
-        INFO  = self.logger.info
-        DEBUG = self.logger.debug
-
-        self.contentnum  = None
-        for line in f:
-            line = line.strip()
-            DEBUG('parse: %s' % line)
-
-            m = re.match(CONTENTNUM_RE, line)
-            if m:
-                self.contentnum = int(m.group(1))
-                INFO('match contentnum: %d' % self.contentnum)
-
-        assert self.contentnum is not None
-
-
-    def format(self, f):
-        """
-        Generate gp_contentnum contents based on contentnum values
-        """
-        INFO  = self.logger.info
-
-        f.write("# Greenplum Database content num for master/standby.\n")
-        f.write("# Do not change the contents of this file.\n")
-        f.write('contentnum = %d\n' % self.contentnum)
-        INFO('wrote contentnum: %d' % self.contentnum)
- 
-
-    def write_gp_contentnum(self):
-        """
-        Create or replace gp_contentnum file with current values, changing
-        permissions of the new file when done and verifying by re-reading
-        the file contents and checking the values read match desired values
-        """
-        INFO  = self.logger.info
-        INFO('%s - write_gp_contentnum' % self.filepath)
-
-        if os.path.exists(self.filepath):
-            INFO('found existing file')
-
-            os.remove(self.filepath)
-            INFO('removed existing file')
-
-        self.logger.info('opening new file')
-        with open(self.filepath, 'w') as f:
-            self.format(f)
-
-        INFO('setting read only')
-        os.chmod(self.filepath, stat.S_IRUSR)  # user read permissions (0400)
-
-        INFO('verifying file')
-        v = GpContentnumFile(self.datadir, do_read=True)
-        assert self.contentnum == v.contentnum
-
-
-def writeGpContentnumFile(directory, contentnum, logger=None):
-    """
-    Writes the gp_contentnum file to the given directory, marking it as for the given contentnum.
-    This method may be deprecating.  See comments in CR-2806.
-    """
-    d = GpContentnumFile(directory, logger=logger)
-    d.contentnum = contentnum
-    d.write_gp_contentnum()
-
-
-
-
-#
-# trivial unit test
-#
-if __name__ == '__main__':
-    import copy, shutil
-    import unittest2 as unittest
-
-    TESTDIR = 'test_gp_contentnum1'
-
-    class MyTestCase(unittest.TestCase):
-        def test1(self):
-            d = GpContentnumFile(TESTDIR)
-            d2 = copy.copy(d)
-            d.contentnum = 10
-            d.write_gp_contentnum()
-            d2.read_gp_contentnum()
-            assert d.contentnum == d2.contentnum
-       
-    if os.path.exists(TESTDIR): shutil.rmtree(TESTDIR)
-    os.mkdir(TESTDIR)
-    unittest.main()
-    shutil.rmtree(TESTDIR)

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/c90ba7cf/tools/bin/gppylib/gp_dbid.py
----------------------------------------------------------------------
diff --git a/tools/bin/gppylib/gp_dbid.py b/tools/bin/gppylib/gp_dbid.py
deleted file mode 100644
index 54d79aa..0000000
--- a/tools/bin/gppylib/gp_dbid.py
+++ /dev/null
@@ -1,174 +0,0 @@
-#!/usr/bin/env python
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-# 
-#   http://www.apache.org/licenses/LICENSE-2.0
-# 
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-# Line too long - pylint: disable=C0301
-# Invalid name  - pylint: disable=C0103
-
-"""
-  gp_dbid.py
-"""
-
-import re
-import os, stat
-
-class DummyLogger:
-    def info(self, msg):  pass
-    def debug(self, msg): pass
-
-DBID_RE         = re.compile(r"dbid\s*=\s*(\d+)")
-STANDBY_DBID_RE = re.compile(r"standby_dbid\s*=\s*(\d+)")
-
-class GpDbidFile:
-    """
-    Used by gpstart, gpinitstandby, gpactivatestandby and indirectly
-    by gpmigrator via gpsetdbid.py to manage the gp_dbid file.
-    """
-
-    def __init__(self, datadir, do_read=False, logger=None):
-        """
-        Initialize path to gp_dbid file and reset values.
-        Log subsequent activity using specified logger and
-        if do_read is True, immediately attempt to read values.
-        """
-        self.datadir      = datadir
-        self.logger       = logger or DummyLogger()
-        self.filepath     = os.path.join(self.datadir, 'gp_dbid')
-        self.dbid         = None
-        self.standby_dbid = None
-
-        if do_read:
-            self.read_gp_dbid()
-
-
-    def read_gp_dbid(self):
-        """
-        Open the gp_dbid file and parse its contents.
-        """
-        INFO = self.logger.info
-        INFO('%s - read_gp_dbid' % self.filepath)
-
-        with open(self.filepath) as f:
-            self.parse(f)
-
-
-    def parse(self, f):
-        """
-        Parse f, looking for matching dbid and standby_dbid expressions and
-        ignoring all other lines.  Assigns dbid and/or standby_dbid to observed 
-        values, converting matched values from strings to integers.  
-        """
-        INFO  = self.logger.info
-        DEBUG = self.logger.debug
-
-        self.dbid         = None
-        self.standby_dbid = None
-        for line in f:
-            line = line.strip()
-            DEBUG('parse: %s' % line)
-
-            m = re.match(DBID_RE, line)
-            if m:
-                self.dbid = int(m.group(1))
-                INFO('match dbid: %d' % self.dbid)
-
-            m = re.match(STANDBY_DBID_RE, line)
-            if m:
-                self.standby_dbid = int(m.group(1))
-                INFO('match standby_dbid: %d' % self.standby_dbid)
-
-        assert self.dbid is not None
-
-
-    def format(self, f):
-        """
-        Generate gp_dbid contents based on dbid and standby_dbid values
-        """
-        INFO  = self.logger.info
-
-        f.write("# Greenplum Database identifier for this master/segment.\n")
-        f.write("# Do not change the contents of this file.\n")
-        f.write('dbid = %d\n' % self.dbid)
-        INFO('wrote dbid: %d' % self.dbid)
-
-        if self.standby_dbid:
-            f.write('standby_dbid = %d\n' % self.standby_dbid)
-            INFO('wrote standby_dbid: %d' % self.standby_dbid)
-
- 
-    def write_gp_dbid(self):
-        """
-        Create or replace gp_dbid file with current values, changing
-        permissions of the new file when done and verifying by re-reading
-        the file contents and checking the values read match desired values
-        """
-        INFO  = self.logger.info
-        INFO('%s - write_gp_dbid' % self.filepath)
-
-        if os.path.exists(self.filepath):
-            INFO('found existing file')
-
-            os.remove(self.filepath)
-            INFO('removed existing file')
-
-        self.logger.info('opening new file')
-        with open(self.filepath, 'w') as f:
-            self.format(f)
-
-        INFO('setting read only')
-        os.chmod(self.filepath, stat.S_IRUSR)  # user read permissions (0400)
-
-        INFO('verifying file')
-        v = GpDbidFile(self.datadir, do_read=True)
-        assert self.dbid == v.dbid
-        assert self.standby_dbid == v.standby_dbid
-
-
-def writeGpDbidFile(directory, dbid, logger=None):
-    """
-    Writes the gp_dbid file to the given directory, marking it as for the given dbid.
-    This method may be deprecating.  See comments in CR-2806.
-    """
-    d = GpDbidFile(directory, logger=logger)
-    d.dbid = dbid
-    d.write_gp_dbid()
-
-
-
-
-#
-# trivial unit test
-#
-if __name__ == '__main__':
-    import copy, shutil
-    import unittest2 as unittest
-
-    TESTDIR = 'test_gp_dbid1'
-
-    class MyTestCase(unittest.TestCase):
-        def test1(self):
-            d = GpDbidFile(TESTDIR)
-            d2 = copy.copy(d)
-            d.dbid = 10
-            d.write_gp_dbid()
-            d2.read_gp_dbid()
-            assert d.dbid == d2.dbid
-            assert d.standby_dbid == d2.standby_dbid
-       
-    if os.path.exists(TESTDIR): shutil.rmtree(TESTDIR)
-    os.mkdir(TESTDIR)
-    unittest.main()
-    shutil.rmtree(TESTDIR)

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/c90ba7cf/tools/bin/gppylib/gp_era.py
----------------------------------------------------------------------
diff --git a/tools/bin/gppylib/gp_era.py b/tools/bin/gppylib/gp_era.py
index be136f2..f23cd3d 100644
--- a/tools/bin/gppylib/gp_era.py
+++ b/tools/bin/gppylib/gp_era.py
@@ -19,7 +19,7 @@
 # Invalid name  - pylint: disable=C0103
 
 """
-  gp_era.py, based on gp_dbid.py
+  gp_era.py
 """
 
 import sys, os, stat, re

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/c90ba7cf/tools/bin/gppylib/gparray.py
----------------------------------------------------------------------
diff --git a/tools/bin/gppylib/gparray.py b/tools/bin/gppylib/gparray.py
index e793f86..262de36 100755
--- a/tools/bin/gppylib/gparray.py
+++ b/tools/bin/gppylib/gparray.py
@@ -476,9 +476,6 @@ class GpDB:
         rmCmd.run(validateAfter = True)
         res = rmCmd.get_results()
 
-        # Remove the gp_dbid file from the data dir
-        RemoveFiles.local('Remove gp_dbid file', os.path.normpath(dstDir + '/gp_dbid'))
-        logger.info("Cleaning up catalog for schema only copy on destination")
         # We need 700 permissions or postgres won't start
         Chmod.local('set template permissions', dstDir, '0700')
 

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/c90ba7cf/tools/bin/gppylib/operations/buildMirrorSegments.py
----------------------------------------------------------------------
diff --git a/tools/bin/gppylib/operations/buildMirrorSegments.py b/tools/bin/gppylib/operations/buildMirrorSegments.py
deleted file mode 100644
index 813e4d9..0000000
--- a/tools/bin/gppylib/operations/buildMirrorSegments.py
+++ /dev/null
@@ -1,860 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-# 
-#   http://www.apache.org/licenses/LICENSE-2.0
-# 
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-import os
-import pickle
-import signal
-import time
-
-from gppylib.mainUtils import *
-
-from gppylib.utils import checkNotNone, appendNewEntriesToHbaFile
-from gppylib.db import dbconn
-from gppylib import gparray, gplog
-from gppylib.gplog import *
-from gppylib.commands import unix
-from gppylib.commands import gp
-from gppylib.commands import base
-from gppylib.gparray import GpArray
-from gppylib import gphostcache
-from gppylib.testold.testUtils import *
-from gppylib.operations import startSegments
-from gppylib.operations.filespace import template_temporary_directories
-from gppylib.gp_era import read_era
-from gppylib.operations.utils import ParallelOperation
-from gppylib.operations.filespace import PG_SYSTEM_FILESPACE, GP_TRANSACTION_FILES_FILESPACE, GP_TEMPORARY_FILES_FILESPACE, GetMoveOperationList, GetFilespaceEntriesDict, GetFilespaceEntries, GetCurrentFilespaceEntries, RollBackFilespaceChanges, UpdateFlatFiles, FileType, MoveFilespaceError
-
-logger = get_default_logger()
-
-gDatabaseDirectories = [
-        # this list and the gDatabaseSubDirectories occur combined inside initdb.c
-        "global",
-        "pg_log",
-        "pg_xlog",
-        "pg_clog",
-        "pg_changetracking",
-        "pg_subtrans",
-        "pg_twophase",
-        "pg_multixact",
-        "pg_distributedxidmap",
-        "pg_distributedlog",
-        "pg_utilitymodedtmredo",
-        "base",
-        "pg_tblspc",
-        "pg_stat_tmp"
-        ]
-gDatabaseSubDirectories = [
-        "pg_xlog/archive_status",
-        "pg_multixact/members",
-        "pg_multixact/offsets",
-        "base/1"
-        ]
-
-#
-# Database files that may exist in the root directory and need deleting
-#
-gDatabaseFiles = [
-    "PG_VERSION",
-    "pg_hba.conf",
-    "pg_ident.conf",
-    "postgresql.conf",
-    "postmaster.log",
-    "postmaster.opts",
-    "postmaster.pid",
-    "gp_dbid"
-        ]
-
-def MPP_12038_fault_injection():
-    """This function will check for the environment variable
-    GP_MPP_12038 and if it is set will sleep for 2 * gp_fts_probe_interval.
-    This is used in this module to check interaction with the FTS prober and
-    should only be used for testing.  Note this delay is long enough for a
-    small test installation but would likely not be long enough for a large
-    cluster."""
-    if os.getenv("GP_MPP_12038_INJECT_DELAY", None):
-        faultProber = faultProberInterface.getFaultProber()
-        probe_interval_secs = faultProber.getFaultProberInterval()
-        logger.info("Sleeping for %d seconds for MPP-12038 test..." % (probe_interval_secs * 2))
-        time.sleep(probe_interval_secs * 2)
-
-#
-# note: it's a little quirky that caller must set up failed/failover so that failover is in gparray but
-#                                 failed is not (if both set)...change that, or at least protect against problems
-#
-
-class GpMirrorToBuild:
-
-    def __init__(self, failedSegment, liveSegment, failoverSegment, forceFullSynchronization):
-        checkNotNone("liveSegment", liveSegment)
-        checkNotNone("forceFullSynchronization", forceFullSynchronization)
-
-        if failedSegment is None and failoverSegment is None:
-            raise Exception( "No mirror passed to GpMirrorToBuild")
-
-        if not liveSegment.isSegmentQE():
-            raise ExceptionNoStackTraceNeeded("Segment to recover from for content %s is not a correct segment " \
-                    "(it is a master or standby master)" % liveSegment.getSegmentContentId())
-        if not liveSegment.isSegmentPrimary(True):
-            raise ExceptionNoStackTraceNeeded("Segment to recover from for content %s is not a primary" % liveSegment.getSegmentContentId())
-        if not liveSegment.isSegmentUp():
-            raise ExceptionNoStackTraceNeeded("Primary segment is not up for content %s" % liveSegment.getSegmentContentId())
-
-        if failedSegment is not None:
-            if failedSegment.getSegmentDbId() == liveSegment.getSegmentDbId():
-                raise ExceptionNoStackTraceNeeded("For content %d, the dbid values are the same.  " \
-                                    "A segment may not be recovered from itself" % liveSegment.getSegmentDbId())
-
-        if failoverSegment is not None:
-            if failoverSegment.getSegmentDbId() == liveSegment.getSegmentDbId():
-                raise ExceptionNoStackTraceNeeded("For content %d, the dbid values are the same.  " \
-                                    "A segment may not be built from itself" % liveSegment.getSegmentDbId())
-
-        if failedSegment is not None and failoverSegment is not None:
-            # for now, we require the code to have produced this -- even when moving the segment to another
-            #  location, we preserve the directory
-            assert failedSegment.getSegmentDbId() == failoverSegment.getSegmentDbId()
-
-        self.__failedSegment = failedSegment
-        self.__liveSegment = liveSegment
-        self.__failoverSegment = failoverSegment
-
-        """
-        __forceFullSynchronization is true if full resynchronization should be FORCED -- that is, the
-           existing segment will be cleared and all objects will be transferred by the file resynchronization
-           process on the server
-        """
-        self.__forceFullSynchronization = forceFullSynchronization
-
-
-    def getFailedSegment(self):
-        """
-        returns the segment that failed. This can be None, for example when adding mirrors
-        """
-        return self.__failedSegment
-
-    def getLiveSegment(self):
-        """
-        returns the primary segment from which the recovery will take place.  Will always be non-None
-        """
-        return self.__liveSegment
-
-    def getFailoverSegment(self):
-        """
-        returns the target segment to which we will copy the data, or None
-            if we will recover in place.  Note that __failoverSegment should refer to the same dbid
-            as __failedSegment, but should have updated path + file information.
-        """
-        return self.__failoverSegment
-
-    def isFullSynchronization(self):
-        """
-        Returns whether or not this segment to recover needs to recover using full resynchronization
-        """
-
-        if self.__forceFullSynchronization:
-            return True
-
-        # if we are failing over to a new segment location then we must fully resync
-        if self.__failoverSegment is not None:
-            return True
-
-        return False
-
-class GpMirrorListToBuild:
-    def __init__(self, toBuild, pool, quiet, parallelDegree, additionalWarnings=None):
-        self.__mirrorsToBuild = toBuild
-        self.__pool = pool
-        self.__quiet = quiet
-        self.__parallelDegree = parallelDegree
-        self.__additionalWarnings = additionalWarnings or []
-
-    def getMirrorsToBuild(self):
-        """
-        Returns a newly allocated list
-        """
-        return [m for m in self.__mirrorsToBuild]
-
-    def getAdditionalWarnings(self):
-        """
-        Returns any additional warnings generated during building of list
-        """
-        return self.__additionalWarnings
-
-    def __moveFilespaces(self, gparray, target_segment):
-        """
-            Moves filespaces for temporary and transaction files to a particular location.
-        """
-        master_seg = gparray.master
-        default_filespace_dir = master_seg.getSegmentDataDirectory()
-
-        cur_filespace_entries = GetFilespaceEntriesDict(GetFilespaceEntries(gparray,
-                                                                            PG_SYSTEM_FILESPACE).run()).run()
-        pg_system_filespace_entries = GetFilespaceEntriesDict(GetFilespaceEntries(gparray,
-                                                                                  PG_SYSTEM_FILESPACE).run()).run()
-        cur_filespace_name = gparray.getFileSpaceName(int(cur_filespace_entries[1][0]))
-        segments = [target_segment] + [seg for seg in gparray.getDbList() if seg.getSegmentContentId() == target_segment.getSegmentContentId() and seg.getSegmentDbId() != target_segment.getSegmentDbId()]
-
-        logger.info('Starting file move procedure for %s' % target_segment)
-
-        if os.path.exists(os.path.join(default_filespace_dir, GP_TRANSACTION_FILES_FILESPACE)):
-            #On the expansion segments, the current filespace used by existing nodes will be the
-            #new filespace to which we want to move the transaction and temp files.
-            #The filespace directories which have to be moved will be the default pg_system directories.
-            new_filespace_entries = GetFilespaceEntriesDict(GetCurrentFilespaceEntries(gparray,
-                                                                                FileType.TRANSACTION_FILES).run()).run()
-            logger.info('getting filespace information')
-            new_filespace_name = gparray.getFileSpaceName(int(new_filespace_entries[1][0]))
-            logger.info('getting move operations list for filespace %s' % new_filespace_name)
-            operation_list = GetMoveOperationList(segments,
-                                                  FileType.TRANSACTION_FILES,
-                                                  new_filespace_name,
-                                                  new_filespace_entries,
-                                                  cur_filespace_entries,
-                                                  pg_system_filespace_entries).run()
-            logger.info('Starting transaction files move')
-            ParallelOperation(operation_list).run()
-
-            logger.debug('Checking transaction files move')
-            try:
-                for operation in operation_list:
-                    operation.get_ret()
-                    pass
-            except Exception, e:
-                logger.info('Failed to move transaction filespace. Rolling back changes ...')
-                RollBackFilespaceChanges(gparray.getExpansionSegDbList(),
-                                        FileType.TRANSACTION_FILES,
-                                        cur_filespace_name,
-                                        cur_filespace_entries,
-                                        new_filespace_entries,
-                                        pg_system_filespace_entries).run()
-                raise
-
-        if os.path.exists(os.path.join(default_filespace_dir, GP_TEMPORARY_FILES_FILESPACE)):
-            new_filespace_entries = GetFilespaceEntriesDict(GetCurrentFilespaceEntries(gparray,
-                                                                                       FileType.TEMPORARY_FILES).run()).run()
-            new_filespace_name = gparray.getFileSpaceName(int(new_filespace_entries[1][0]))
-            operation_list = GetMoveOperationList(segments,
-                                                  FileType.TEMPORARY_FILES,
-                                                  new_filespace_name,
-                                                  new_filespace_entries,
-                                                  cur_filespace_entries,
-                                                  pg_system_filespace_entries).run()
-            logger.info('Starting temporary files move')
-            ParallelOperation(operation_list).run()
-
-            logger.debug('Checking temporary files move')
-            try:
-                for operation in operation_list:
-                    operation.get_ret()
-                    pass
-            except Exception, e:
-                logger.info('Failed to move temporary filespace. Rolling back changes ...')
-                RollBackFilespaceChanges(gparray.getExpansionDbList(),
-                                        FileType.TRANSACTION_FILES,
-                                        cur_filespace_name,
-                                        cur_filespace_entries,
-                                        new_filespace_entries,
-                                        pg_system_filespace_entries).run()
-                raise
-
-    def buildMirrors(self, actionName, gpEnv, gpArray):
-        """
-        Build the mirrors.
-
-        gpArray must have already been altered to have updated directories -- that is, the failoverSegments
-            from the mirrorsToBuild must be present in gpArray.
-
-        """
-        testOutput("building %s segment(s)" % len(self.__mirrorsToBuild))
-
-        if len(self.__mirrorsToBuild) == 0:
-            logger.info("No segments to " + actionName)
-            return
-
-        self.checkForPortAndDirectoryConflicts(gpArray)
-
-        logger.info("%s segment(s) to %s" % (len(self.__mirrorsToBuild), actionName))
-
-        self.__verifyGpArrayContents(gpArray)
-
-        # make sure the target directories are up-to-date
-        #  by cleaning them, if needed, and then copying a basic directory there
-        #  the postgresql.conf in that basic directory will need updating (to change the port)
-        toStopDirectives = []
-        toEnsureMarkedDown = []
-        cleanupDirectives = []
-        copyDirectives = []
-        for toRecover in self.__mirrorsToBuild:
-
-            if toRecover.getFailedSegment() is not None:
-                # will stop the failed segment.  Note that we do this even if we are recovering to a different location!
-                toStopDirectives.append(GpStopSegmentDirectoryDirective(toRecover.getFailedSegment()))
-                if toRecover.getFailedSegment().getSegmentStatus() == gparray.STATUS_UP:
-                    toEnsureMarkedDown.append(toRecover.getFailedSegment())
-
-            if toRecover.isFullSynchronization():
-                isTargetReusedLocation = False
-                if toRecover.getFailedSegment() is not None and \
-                    toRecover.getFailoverSegment() is None:
-                    #
-                    # We are recovering a failed segment in-place
-                    #
-                    cleanupDirectives.append(GpCleanupSegmentDirectoryDirective(toRecover.getFailedSegment()))
-                    isTargetReusedLocation = True
-
-                if toRecover.getFailoverSegment() is not None:
-                    targetSegment = toRecover.getFailoverSegment()
-                else: targetSegment = toRecover.getFailedSegment()
-
-                d = GpCopySegmentDirectoryDirective(toRecover.getLiveSegment(), targetSegment, isTargetReusedLocation)
-                copyDirectives.append(d)
-
-        self.__ensureStopped(gpEnv, toStopDirectives)
-        self.__ensureMarkedDown(gpEnv, toEnsureMarkedDown)
-        self.__cleanUpSegmentDirectories(cleanupDirectives)
-        self.__copySegmentDirectories(gpEnv, gpArray, copyDirectives)
-        #Move the filespace for transaction and temporary files
-        for toRecover in self.__mirrorsToBuild:
-            target_segment = None
-
-            if toRecover.getFailoverSegment() is not None:
-                target_segment = toRecover.getFailoverSegment()
-            elif toRecover.isFullSynchronization():
-                target_segment = toRecover.getFailedSegment()
-
-            if target_segment is not None:
-                self.__moveFilespaces(gpArray, target_segment)
-
-        #If we are adding mirrors, we need to update the flat files on the primaries as well
-        if actionName == "add":
-            try:
-                UpdateFlatFiles(gpArray, primaries=True).run()
-            except MoveFilespaceError, e:
-                logger.error(str(e))
-                raise
-        else:
-            try:
-                print 'updating flat files'
-                UpdateFlatFiles(gpArray, primaries=False).run()
-            except MoveFilespaceError, e:
-                logger.error(str(e))
-                raise
-
-        # update and save metadata in memory
-        for toRecover in self.__mirrorsToBuild:
-
-            if toRecover.getFailoverSegment() is None:
-                # we are recovering the lost segment in place
-                seg = toRecover.getFailedSegment()
-            else:
-                seg = toRecover.getFailedSegment()
-                # no need to update the failed segment's information -- it is
-                #   being overwritten in the configuration with the failover segment
-                for gpArraySegment in gpArray.getDbList():
-                    if gpArraySegment is seg:
-                        raise Exception("failed segment should not be in the new configuration if failing over to new segment")
-
-                seg = toRecover.getFailoverSegment()
-            seg.setSegmentStatus(gparray.STATUS_DOWN) # down initially, we haven't started it yet
-            if not seg.isSegmentQE():
-                seg.setSegmentMode(gparray.MODE_RESYNCHRONIZATION)
-
-        # figure out what needs to be started or transitioned
-        setToRestart = []
-        fullResyncMirrorDbIds = {}
-        for toRecover in self.__mirrorsToBuild:
-            seg = toRecover.getFailoverSegment()
-            if seg is None:
-                seg = toRecover.getFailedSegment() # we are recovering in place
-            setToRestart.append(seg)
-
-            primarySeg = toRecover.getLiveSegment()
-
-            if toRecover.isFullSynchronization() and seg.getSegmentDbId() > 0:
-                fullResyncMirrorDbIds[seg.getSegmentDbId()] = True
-
-        # should use mainUtils.getProgramName but I can't make it work!
-        programName = os.path.split(sys.argv[0])[-1]
-
-        # Disable Ctrl-C, going to save metadata in database and transition segments
-        signal.signal(signal.SIGINT,signal.SIG_IGN)
-        try:
-            if gpArray.getFaultStrategy() == gparray.FAULT_STRATEGY_NONE:
-                logger.info("HAWQ rescovery")
-
-                # Update the catalog to reflect the fact that recovery to a new
-                # host.
-                if len(fullResyncMirrorDbIds) > 0:
-                   pass
-
-                logger.info("Starting failover segments")
-                self.__startAll(gpEnv, gpArray, setToRestart)
-
-                logger.info("Updating configuration to mark segments up")
-                for seg in setToRestart:
-                    seg.setSegmentStatus(gparray.STATUS_UP)
-
-	            configInterface.getConfigurationProvider().updateSystemConfig(
-                    gpArray,
-                    "%s: segment recovery" % programName,
-                    dbIdToForceMirrorRemoveAdd = {},
-                    useUtilityMode = True,
-                    allowPrimary = False
-                )
-
-                # must same with finally statment:
-                signal.signal(signal.SIGINT,signal.default_int_handler)
-                return
-
-        finally:
-            # Reenable Ctrl-C
-            signal.signal(signal.SIGINT,signal.default_int_handler)
-
-    def __verifyGpArrayContents(self, gpArray):
-        """
-        Run some simple assertions against gpArray contents
-        """
-        for seg in gpArray.getDbList():
-            if seg.getSegmentDataDirectory() != seg.getSegmentFilespaces()[gparray.SYSTEM_FILESPACE]:
-                raise Exception("Mismatch between segment data directory and filespace entry for segment %s" %
-                            seg.getSegmentDbId())
-
-    def checkForPortAndDirectoryConflicts(self, gpArray):
-        """
-        Check gpArray for internal consistency -- no duplicate ports or directories on the same host, for example
-
-        A detected problem causes an Exception to be raised
-        """
-
-        for hostName, segmentArr in GpArray.getSegmentsByHostName(gpArray.getDbList()).iteritems():
-            usedPorts = {}
-            usedDataDirectories = {}
-            for segment in segmentArr:
-
-                # check for port conflict
-                replicationPort = segment.getSegmentReplicationPort()
-                port = segment.getSegmentPort()
-                dbid = segment.getSegmentDbId()
-                if port in usedPorts:
-                    raise Exception("On host %s, a port for segment with dbid %s conflicts with a port for segment dbid %s" \
-                            % (hostName, dbid, usedPorts.get(port)))
-
-                if segment.isSegmentQE():
-                    if replicationPort is not None:
-                        raise Exception("On host %s, the replication port is set for segment with dbid %s" \
-                                % (hostName, dbid))
-
-                    if replicationPort in usedPorts:
-                        raise Exception("On host %s, a port for segment with dbid %s conflicts with a port for segment dbid %s" \
-                                % (hostName, dbid, usedPorts.get(replicationPort)))
-
-                    if port == replicationPort:
-                        raise Exception("On host %s, segment with dbid %s has equal port and replication port" \
-                        % (hostName, dbid))
-
-                usedPorts[port] = dbid
-                if replicationPort is not None:
-                    usedPorts[replicationPort] = dbid
-
-                # check for directory conflict; could improve this by reporting nicer the conflicts
-                paths = [path for oid, path in segment.getSegmentFilespaces().items() if oid != gparray.SYSTEM_FILESPACE]
-                paths.append(segment.getSegmentDataDirectory())
-
-                for path in paths:
-                    if path in usedDataDirectories and 0:
-                        raise Exception("On host %s, directory (base or filespace) for segment with dbid %s conflicts with a " \
-                                        "directory (base or filespace) for segment dbid %s; directory: %s" % \
-                                        (hostName, dbid, usedDataDirectories.get(path), path))
-                    usedDataDirectories[path] = dbid
-
-
-    def __runWaitAndCheckWorkerPoolForErrorsAndClear(self, cmds, actionVerb, suppressErrorCheck=False):
-        for cmd in cmds:
-            self.__pool.addCommand(cmd)
-
-        self.__pool.wait_and_printdots(len(cmds), self.__quiet)
-        if not suppressErrorCheck:
-            self.__pool.check_results()
-        self.__pool.empty_completed_items()
-
-    def __copyFiles(self, srcDir, destDir, fileNames):
-        for name in fileNames:
-            cmd = gp.LocalCopy("copy file for segment", srcDir + "/" + name, destDir + "/" + name)
-            cmd.run(validateAfter=True)
-
-    def __createEmptyDirectories( self, dir, newDirectoryNames ):
-        for name in newDirectoryNames:
-            subDir = os.path.join(dir, name)
-            unix.MakeDirectory("create blank directory for segment", subDir).run(validateAfter=True)
-            unix.Chmod.local('set permissions on blank dir', subDir, '0700')
-
-    def __buildTarFileForTransfer(self, gpEnv, masterSegment, sampleSegment, newSegments):
-        """
-        Returns the file for the tarfile that should be transferred and used
-         for building the blank segment
-
-        """
-        masterDir = gpEnv.getMasterDataDir()
-
-        # note that this tempdir will be left around on the system (this is what other scripts do currently)
-        tempDir = gp.createTempDirectoryName(gpEnv.getMasterDataDir(), "gpbuildingsegment")
-        unix.MakeDirectory("create temp directory for segment", tempDir ).run(validateAfter=True)
-
-        schemaDir = tempDir + "/schema"
-        unix.MakeDirectory("create temp schema directory for segment", schemaDir ).run(validateAfter=True)
-        unix.Chmod.local('set permissions on schema dir', schemaDir, '0700') # set perms so postgres can start
-
-        #
-        # Copy remote files from the sample segment to the master
-        #
-        for toCopyFromRemote in ["postgresql.conf", "pg_hba.conf"]:
-            cmd = gp.RemoteCopy('copying %s from a segment' % toCopyFromRemote,
-                               sampleSegment.getSegmentDataDirectory() + '/' + toCopyFromRemote,
-                               masterSegment.getSegmentHostName(), schemaDir, ctxt=base.REMOTE,
-                               remoteHost=sampleSegment.getSegmentAddress())
-            cmd.run(validateAfter=True)
-        appendNewEntriesToHbaFile( schemaDir + "/pg_hba.conf", newSegments)
-
-        #
-        # Use the master's version of other files, and build
-        #
-        self.__createEmptyDirectories( schemaDir, gDatabaseDirectories )
-        self.__createEmptyDirectories( schemaDir, gDatabaseSubDirectories )
-        self.__copyFiles(masterDir, schemaDir, ["PG_VERSION", "pg_ident.conf"])
-
-
-        #
-        # Build final tar
-        #
-        tarFileName = "gp_emptySegmentSchema.tar"
-        tarFile = tempDir + "/" + tarFileName
-        cmd = gp.CreateTar('gpbuildingmirrorsegment tar segment template', schemaDir, tarFile)
-        cmd.run(validateAfter=True)
-
-        return (tempDir, tarFile, tarFileName)
-
-    def __copySegmentDirectories(self, gpEnv, gpArray, directives):
-        """
-        directives should be composed of GpCopySegmentDirectoryDirective values
-        """
-        if len(directives) == 0:
-            return
-
-        srcSegments = [d.getSrcSegment() for d in directives]
-        destSegments = [d.getDestSegment() for d in directives]
-        isTargetReusedLocation = [d.isTargetReusedLocation() for d in directives]
-        destSegmentByHost = GpArray.getSegmentsByHostName(destSegments)
-        newSegmentInfo = gp.ConfigureNewSegment.buildSegmentInfoForNewSegment(destSegments, isTargetReusedLocation)
-
-        logger.info('Building template directory')
-        # In GPSQL, we need to create a template and copy it to all of failed segments.
-        if gpArray.getFaultStrategy() == gparray.FAULT_STRATEGY_NONE:
-            tempDir = '/tmp/GPSQL'
-            templateDir = tempDir + '/gpsql_template' + time.strftime("%Y%m%d_%H%M%S")
-            unix.MakeDirectory("create blank directory for segment", templateDir).run(validateAfter=True)
-            unix.Chmod.local('set permissions on template dir', templateDir, '0700') # set perms so postgres can start
-
-            logger.info('Creating template')
-            srcSegments[0].createTemplate(templateDir)
-
-            # Don't need log files and gpperfmon files in template.
-            rmCmd = unix.RemoveFiles('gprecoverseg remove gppermfon data from template',
-                                templateDir + '/gpperfmon/data')
-            rmCmd.run(validateAfter=True)
-            rmCmd = unix.RemoveFiles('gprecoverseg remove logs from template',
-                                templateDir + '/pg_log/*')
-            rmCmd.run(validateAfter=True)
-
-            #other files not needed
-            rmCmd = unix.RemoveFiles('gprecoverseg remove postmaster.opt from template',
-                                templateDir + '/postmaster.opts')
-            rmCmd.run(validateAfter=True)
-            rmCmd = unix.RemoveFiles('gprecoverseg remove postmaster.pid from template',
-                                templateDir + '/postmaster.pid')
-            rmCmd.run(validateAfter=True)
-
-            # template the temporary directories file
-            template_temporary_directories(templateDir, srcSegments[0].content)
-
-            tarFileName = "gpsqlSegmentTemplate.tar"
-            blankTarFile = tempDir + "/" + tarFileName
-            cmd = gp.CreateTar('gpbuildingmirrorsegment tar segment template', templateDir, blankTarFile)
-            cmd.run(validateAfter=True)
-
-        def createConfigureNewSegmentCommand(hostName, cmdLabel, validationOnly):
-            segmentInfo = newSegmentInfo[hostName]
-            checkNotNone("segmentInfo for %s" % hostName, segmentInfo)
-            return gp.ConfigureNewSegment(cmdLabel,
-                                            segmentInfo,
-                                            tarFile=tarFileName,
-                                            newSegments=True,
-                                            verbose=gplog.logging_is_verbose(),
-                                            batchSize=self.__parallelDegree,
-                                            ctxt=gp.REMOTE,
-                                            remoteHost=hostName,
-                                            validationOnly=validationOnly)
-        #
-        # validate directories for target segments
-        #
-        logger.info('Validating remote directories')
-        cmds = []
-        for hostName in destSegmentByHost.keys():
-            cmds.append(createConfigureNewSegmentCommand(hostName, 'validate blank segments', True))
-        for cmd in cmds:
-            self.__pool.addCommand(cmd)
-        self.__pool.wait_and_printdots(len(cmds), self.__quiet)
-        validationErrors = []
-        for item in self.__pool.getCompletedItems():
-            results = item.get_results()
-            if not results.wasSuccessful():
-                if results.rc == 1:
-                    # stdoutFromFailure = results.stdout.replace("\n", " ").strip()
-                    lines = results.stderr.split("\n")
-                    for line in lines:
-                        if len(line.strip()) > 0:
-                            validationErrors.append("Validation failure on host %s %s" % (item.remoteHost, line))
-                else:
-                    validationErrors.append(str(item))
-        self.__pool.empty_completed_items()
-        if validationErrors:
-            raise ExceptionNoStackTraceNeeded("\n" + ("\n".join(validationErrors)))
-
-        #
-        # copy tar from master to target hosts
-        #
-        logger.info('Copying template directory file')
-        cmds = []
-        for hostName in destSegmentByHost.keys():
-            cmds.append( gp.RemoteCopy("copy segment tar", blankTarFile, hostName, tarFileName ))
-
-        self.__runWaitAndCheckWorkerPoolForErrorsAndClear(cmds, "building and transferring basic segment directory")
-
-        #
-        # unpack and configure new segments
-        #
-        logger.info('Configuring new segments')
-        cmds = []
-        for hostName in destSegmentByHost.keys():
-            cmds.append(createConfigureNewSegmentCommand(hostName, 'configure blank segments', False))
-        self.__runWaitAndCheckWorkerPoolForErrorsAndClear(cmds, "unpacking basic segment directory")
-
-        #
-        # Clean up copied tar from each remote host
-        #
-        logger.info('Cleaning files')
-        cmds = []
-        for hostName, segments in destSegmentByHost.iteritems():
-            cmds.append(unix.RemoveFiles('remove tar file', tarFileName, ctxt=gp.REMOTE, remoteHost=hostName))
-        self.__runWaitAndCheckWorkerPoolForErrorsAndClear(cmds, "cleaning up tar file on segment hosts")
-
-        #
-        # clean up the local temp directory
-        #
-        unix.RemoveFiles.local('remove temp directory', tempDir)
-
-    def __ensureStopped(self, gpEnv, directives):
-        """
-
-        @param directives a list of the GpStopSegmentDirectoryDirective values indicating which segments to stop
-
-        """
-        if len(directives) == 0:
-            return
-
-        logger.info("Ensuring %d failed segment(s) are stopped" % (len(directives)))
-        segments = [d.getSegment() for d in directives]
-        segmentByHost = GpArray.getSegmentsByHostName(segments)
-
-        cmds = []
-        for hostName, segments in segmentByHost.iteritems():
-            cmd=gp.GpSegStopCmd("remote segment stop on host '%s'" % hostName,
-                                gpEnv.getGpHome(), gpEnv.getGpVersion(),
-                                mode='fast', dbs=segments, verbose=logging_is_verbose(),
-                                ctxt=base.REMOTE, remoteHost=hostName)
-
-            cmds.append( cmd)
-
-        # we suppress checking for the error.  This is because gpsegstop will actually error
-        #  in many cases where the stop is actually done (that is, for example, the segment is
-        #  running but slow to shutdown so gpsegstop errors after whacking it with a kill)
-        #
-        # Perhaps we should make it so that it so that is checks if the seg is running and only attempt stop
-        #  if it's running?  In that case, we could propagate the error
-        #
-        self.__runWaitAndCheckWorkerPoolForErrorsAndClear(cmds, "stopping segments", suppressErrorCheck=True)
-
-    def __ensureMarkedDown(self, gpEnv, toEnsureMarkedDown):
-        """Waits for FTS prober to mark segments as down"""
-
-        wait_time = 60 * 30 # Wait up to 30 minutes to handle very large, busy
-                            # clusters that may have faults.  In most cases the
-                            # actual time to wait will be small and this operation
-                            # is only needed when moving mirrors that are up and
-                            # needed to be stopped, an uncommon operation.
-
-        dburl = dbconn.DbURL(port=gpEnv.getMasterPort(), dbname='template1')
-
-        time_elapsed = 0
-        seg_up_count = 0
-        initial_seg_up_count = len(toEnsureMarkedDown)
-        last_seg_up_count = initial_seg_up_count
-
-        if initial_seg_up_count == 0:
-            # Nothing to wait on
-            return
-
-        logger.info("Waiting for segments to be marked down.")
-        logger.info("This may take up to %d seconds on large clusters." % wait_time)
-
-        # wait for all needed segments to be marked down by the prober.  We'll wait
-        # a max time of double the interval
-        while wait_time > time_elapsed:
-            seg_up_count = 0
-            current_gparray = GpArray.initFromCatalog(dburl, True)
-            seg_db_map = current_gparray.getSegDbMap()
-
-            # go through and get the status of each segment we need to be marked down
-            for segdb in toEnsureMarkedDown:
-                if segdb.getSegmentDbId() in seg_db_map and seg_db_map[segdb.getSegmentDbId()].isSegmentUp() == True:
-                    seg_up_count += 1
-            if seg_up_count == 0:
-                break
-            else:
-                if last_seg_up_count != seg_up_count:
-                    print "\n",
-                    logger.info("%d of %d segments have been marked down." %
-                                (initial_seg_up_count - seg_up_count, initial_seg_up_count))
-                    last_seg_up_count = seg_up_count
-
-                for _i in range(1,5):
-                    time.sleep(1)
-                    sys.stdout.write(".")
-                    sys.stdout.flush()
-
-                time_elapsed += 5
-
-        if seg_up_count == 0:
-            print "\n",
-            logger.info("%d of %d segments have been marked down." %
-                        (initial_seg_up_count, initial_seg_up_count))
-        else:
-            raise Exception("%d segments were not marked down by FTS" % seg_up_count)
-
-
-    def __cleanUpSegmentDirectories(self, directives):
-        if len(directives) == 0:
-            return
-
-        logger.info("Cleaning files from %d segment(s)" % (len(directives)))
-        segments = [d.getSegment() for d in directives]
-        segmentByHost = GpArray.getSegmentsByHostName(segments)
-
-        cmds = []
-        for hostName, segments in segmentByHost.iteritems():
-            cmds.append( gp.GpCleanSegmentDirectories("clean segment directories on %s" % hostName, \
-                    segments, gp.REMOTE, hostName))
-
-        self.__runWaitAndCheckWorkerPoolForErrorsAndClear(cmds, "cleaning existing directories")
-
-    def __createStartSegmentsOp(self, gpEnv):
-        return startSegments.StartSegmentsOperation(self.__pool, self.__quiet,
-                gpEnv.getLocaleData(), gpEnv.getGpVersion(),
-                gpEnv.getGpHome(), gpEnv.getMasterDataDir()
-                )
-
-    def __updateGpIdFile(self, gpEnv, gpArray, segments):
-        segmentByHost = GpArray.getSegmentsByHostName(segments)
-        newSegmentInfo = gp.ConfigureNewSegment.buildSegmentInfoForNewSegment(segments)
-
-        cmds = []
-        for hostName in segmentByHost.keys():
-            segmentInfo = newSegmentInfo[hostName]
-            checkNotNone("segmentInfo for %s" % hostName, segmentInfo)
-            cmd = gp.ConfigureNewSegment("update gpid file",
-                                            segmentInfo,
-                                            newSegments=False,
-                                            verbose=gplog.logging_is_verbose(),
-                                            batchSize=self.__parallelDegree,
-                                            ctxt=gp.REMOTE,
-                                            remoteHost=hostName,
-                                            validationOnly=False,
-                                            writeGpIdFileOnly=True)
-
-            cmds.append(cmd)
-        self.__runWaitAndCheckWorkerPoolForErrorsAndClear(cmds, "writing updated gpid files")
-
-    def __startAll(self, gpEnv, gpArray, segments):
-
-        # the newly started segments should belong to the current era
-        era = read_era(gpEnv.getMasterDataDir(), logger=gplog.get_logger_if_verbose())
-
-        startMode = startSegments.START_AS_PRIMARY_OR_MIRROR
-        if gpArray.getFaultStrategy() == gparray.FAULT_STRATEGY_NONE:
-            startMode = startSegments.START_AS_MIRRORLESS
-        segmentStartResult = self.__createStartSegmentsOp(gpEnv).startSegments(gpArray, segments, startMode, era)
-
-        for failure in segmentStartResult.getFailedSegmentObjs():
-            failedSeg = failure.getSegment()
-            failureReason = failure.getReason()
-            logger.warn("Failed to start segment.  The fault prober will shortly mark it as down. Segment: %s: REASON: %s" % (failedSeg, failureReason))
-        pass
-        if len(segmentStartResult.getFailedSegmentObjs()) > 0:
-            raise Exception("One or more segments cannot be recovered. Please try to recovery the data directory with -F or move to a spare host with -p.");
-
-    def __convertAllPrimaries(self, gpEnv, gpArray, segments, convertUsingFullResync):
-        segmentStartResult = self.__createStartSegmentsOp(gpEnv).transitionSegments(gpArray, segments, convertUsingFullResync, startSegments.MIRROR_MODE_PRIMARY)
-        for failure in segmentStartResult.getFailedSegmentObjs():
-            failedSeg = failure.getSegment()
-            failureReason = failure.getReason()
-            logger.warn("Failed to inform primary segment of updated mirroring state.  Segment: %s: REASON: %s" % (failedSeg, failureReason))
-
-class GpCleanupSegmentDirectoryDirective:
-    def __init__(self, segment):
-        checkNotNone("segment", segment)
-        self.__segment = segment
-
-    def getSegment(self):
-        return self.__segment
-
-class GpStopSegmentDirectoryDirective:
-    def __init__(self, segment):
-        checkNotNone("segment", segment)
-        self.__segment = segment
-
-    def getSegment(self):
-        return self.__segment
-
-class GpCopySegmentDirectoryDirective:
-
-    def __init__(self, source, dest, isTargetReusedLocation ):
-        """
-        @param isTargetReusedLocation if True then the dest location is a cleaned-up location
-        """
-        checkNotNone("source", source)
-        checkNotNone("dest", dest)
-
-        self.__source = source
-        self.__dest = dest
-        self.__isTargetReusedLocation = isTargetReusedLocation
-
-    def getSrcSegment(self):
-        return self.__source
-
-    def getDestSegment(self):
-        return self.__dest
-
-    def isTargetReusedLocation(self):
-        return self.__isTargetReusedLocation