You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hawq.apache.org by rl...@apache.org on 2015/10/12 11:19:50 UTC
[1/4] incubator-hawq git commit: HAWQ-39. Remove below unused mgmt
scritps for hawq2.0:
Repository: incubator-hawq
Updated Branches:
refs/heads/master b07297aa4 -> 8ec87e6a5
http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/8ec87e6a/tools/doc/gpinitsystem_help
----------------------------------------------------------------------
diff --git a/tools/doc/gpinitsystem_help b/tools/doc/gpinitsystem_help
deleted file mode 100755
index 046df0a..0000000
--- a/tools/doc/gpinitsystem_help
+++ /dev/null
@@ -1,362 +0,0 @@
-COMMAND NAME: gpinitsystem
-
-Initializes a HAWQ system by using configuration
-parameters specified in a configuration file (gp_init_config).
-
-
-*****************************************************
-SYNOPSIS
-*****************************************************
-
-gpinitsystem -c <gpinitsystem_config>
- [-h <hostfile_gpinitsystem>]
- [-B <parallel_processes>]
- [-p <postgresql_conf_param_file>]
- [-s <standby_master_host>]
- [--max_connections=<number>] [--shared_buffers=<size>]
- [--locale=<locale>] [--lc-collate=<locale>]
- [--lc-ctype=<locale>] [--lc-messages=<locale>]
- [--lc-monetary=<locale>] [--lc-numeric=<locale>]
- [--lc-time=<locale>] [--su_password=<password>]
- [-a] [-q] [-l <logfile_directory>] [-D]
-
-gpinitsystem -?
-
-gpinitsystem -v
-
-
-*****************************************************
-DESCRIPTION
-*****************************************************
-
-The gpinitsystem utility will create a HAWQ instance
-using the values defined in a configuration file. See the
-INITIALIZATION CONFIGURATION FILE FORMAT section below.
-
-Before running this utility, make sure that you have installed
-the HAWQ software on all the hosts in the array.
-
-In a HAWQ DBMS, each database instance (the master
-and all segments) must be initialized across all of the hosts in
-the system in such a way that they can all work together as a
-unified DBMS. The gpinitsystem utility takes care of initializing
-the HAWQ master and each segment instance, and configuring
-the system as a whole.
-
-Before running gpinitsystem, you must set the $GPHOME environment
-variable to point to the location of your Greenplum Database
-installation on the master host and exchange SSH keys between
-all host addresses in the array using gpssh-exkeys.
-
-This utility performs the following tasks:
-
-* Verifies that the parameters in the configuration file are correct.
-* Ensures that a connection can be established to each host address.
- If a host address cannot be reached, the utility will exit.
-* Verifies the locale settings.
-* Displays the configuration that will be used and prompts the
- user for confirmation.
-* Initializes the master instance.
-* Initializes the standby master instance (if specified).
-* Initializes the primary segment instances.
-* Configures the HAWQ system and checks for errors.
-* Starts the HAWQ system.
-
-
-*****************************************************
-OPTIONS
-*****************************************************
-
--a (do not prompt)
-
- Do not prompt the user for confirmation.
-
-
--B <parallel_processes>
-
- The number of segments to create in parallel. If not specified,
- the utility will start up to 4 parallel processes at a time.
-
-
--c <gpinitsystem_config>
-
- Required. The full path and filename of the configuration file, which
- contains all of the defined parameters to configure and initialize a
- new HAWQ system. See INITIALIZATION CONFIGURATION FILE FORMAT below.
-
--D (debug)
-
- Sets log output level to debug.
-
-
--h <hostfile_gpinitsystem>
-
- Optional. The full path and file name of a file that contains the host
- addresses of your segment hosts. If not specified on the command line,
- you can specify the host file using the MACHINE_LIST_FILE parameter
- in the gpinitsystem_config file.
-
-
---locale=<locale> | -n <locale>
-
- Sets the default locale used by HAWQ. If not specified,
- the LC_ALL, LC_COLLATE, or LANG environment variable of the master
- host determines the locale. If these are not set, the default locale
- is C (POSIX). A locale identifier consists of a language identifier
- and a region identifier, and optionally a character set encoding.
- For example, sv_SE is Swedish as spoken in Sweden, en_US is U.S.
- English, and fr_CA is French Canadian. If more than one character
- set can be useful for a locale, then the specifications look like
- this: en_US.UTF-8 (locale specification and character set encoding).
- On most systems, the command locale will show the locale environment
- settings and locale -a will show a list of all available locales.
-
-
---lc-collate=<locale>
-
- Similar to --locale, but sets the locale used for collation (sorting data).
- The sort order cannot be changed after Greenplum Database is initialized,
- so it is important to choose a collation locale that is compatible with
- the character set encodings that you plan to use for your data. There is a
- special collation name of C or POSIX (byte-order sorting as opposed to
- dictionary-order sorting). The C collation can be used with any
- character encoding.
-
-
---lc-ctype=<locale>
-
- Similar to --locale, but sets the locale used for character classification
- (what character sequences are valid and how they are interpreted). This
- cannot be changed after Greenplum Database is initialized, so it is
- important to choose a character classification locale that is compatible
- with the data you plan to store in HAWQ.
-
-
---lc-messages=<locale>
-
- Similar to --locale, but sets the locale used for messages output by
- HAWQ. The current version of HAWQ does not
- support multiple locales for output messages (all messages are in English),
- so changing this setting will not have any effect.
-
-
---lc-monetary=<locale>
-
- Similar to --locale, but sets the locale used for formatting currency amounts.
-
-
---lc-numeric=<locale>
-
- Similar to --locale, but sets the locale used for formatting numbers.
-
-
---lc-time=<locale>
-
- Similar to --locale, but sets the locale used for formatting dates and times.
-
-
--l <logfile_directory>
-
- The directory to write the log file. Defaults to ~/gpAdminLogs.
-
-
---max_connections=<number> | -m <number>
-
- Sets the maximum number of client connections allowed to the master.
- The default is 25.
-
-
--p <postgresql_conf_param_file>
-
- Optional. The name of a file that contains postgresql.conf parameter
- settings that you want to set for HAWQ. These settings
- will be used when the individual master and segment instances are
- initialized. You can also set parameters after initialization using
- the gpconfig utility.
-
-
--q (no screen output)
-
- Run in quiet mode. Command output is not displayed on the screen,
- but is still written to the log file.
-
-
---shared_buffers=<size> | -b <size>
-
- Sets the amount of memory a HAWQ server instance uses for shared
- memory buffers. You can specify sizing in kilobytes (kB), megabytes (MB)
- or gigabytes (GB). The default is 125MB.
-
-
--s <standby_master_host>
-
- Optional. If you wish to configure a backup master host, specify the
- host name using this option. The HAWQ software must
- already be installed and configured on this host.
-
-
---su_password=<superuser_password> | -e <superuser_password>
-
- The password to set for the HAWQ
- superuser. Defaults
- to 'gparray'. You can always change the superuser password at a
- later time using the ALTER ROLE command. Client connections over
- the network require a password login for the database superuser
- account (for example, the gpadmin user).
-
- Best practises: Always use passwords, do not use default passwords,
- change default passwords immediately after installation.
-
-
--v (show utility version)
-
- Displays the version of this utility.
-
-
--? (help)
-
- Displays the online help.
-
-
-*****************************************************
-INITIALIZATION CONFIGURATION FILE FORMAT
-*****************************************************
-
-gpinitsystem requires a configuration file with the following
-parameters defined. An example initialization configuration
-file can be found in
-$GPHOME/docs/cli_help/gpconfigs/gpinitsystem_config.
-
-ARRAY_NAME
-
- Required. A name for the array you are configuring. You can use
- any name you like. Enclose the name in quotes if the name
- contains spaces.
-
-
-MACHINE_LIST_FILE
-
- Optional. Can be used in place of the -h option. This specifies
- the file that contains the list of segment host address names that
- comprise the Greenplum system. The master host is assumed to be
- the host from which you are running the utility and should not be
- included in this file. If your segment hosts have multiple network
- interfaces, then this file would include all addresses for the host.
- Give the absolute path to the file.
-
-
-SEG_PREFIX
-
- Required. This specifies a prefix that will be used to name the
- data directories on the master and segment instances. The naming
- convention for data directories in a HAWQ system is
- SEG_PREFIX<number> where <number> starts with 0 for segment
- instances (the master is always -1). So for example, if you choose
- the prefix gpseg, your master instance data directory would be
- named gpseg-1, and the segment instances would be named
- gpseg0, gpseg1, gpseg2, gpseg3, and so on.
-
-
-PORT_BASE
-
- Required. This specifies the base number by which primary segment
- port numbers are calculated. The first primary segment port on a
- host is set as PORT_BASE, and then incremented by one for each
- additional primary segment on that host. Valid values range from
- 1 through 65535.
-
-
-DATA_DIRECTORY
-
- Required. This specifies the data storage location(s) where the
- utility will create the primary segment data directories. The
- number of locations in the list dictate the number of primary
- segments that will get created per physical host (if multiple
- addresses for a host are listed in the host file, the number of
- segments will be spread evenly across the specified interface
- addresses). It is OK to list the same data storage area multiple
- times if you want your data directories created in the same location.
- The user who runs gpinitsystem (for example, the gpadmin user) must
- have permission to write to these directories. For example, this
- will create six primary segments per host:
-
- declare -a DATA_DIRECTORY=(/data1/primary /data1/primary
- /data1/primary /data2/primary /data2/primary /data2/primary)
-
-
-MASTER_HOSTNAME
-
- Required. The host name of the master instance. This host name must
- exactly match the configured host name of the machine (run the hostname
- command to determine the correct hostname).
-
-
-MASTER_DIRECTORY
-
- Required. This specifies the location where the data directory will
- be created on the master host. You must make sure that the user who runs
- gpinitsystem (for example, the gpadmin user) has permissions to write
- to this directory.
-
-
-MASTER_PORT
-
- Required. The port number for the master instance. This is the port
- number that users and client connections will use when accessing the
- HAWQ system.
-
-DFS_NAME
-
- Required. The distributed file system name for the HAWQ storing files.
- Currently, HAWQ only supported hdfs
-
-DFS_URL
-
- Required. The hostname, port, and relative path of distributed
- file system. Currently, HAWQ only supported HDFS.
-
-
-TRUSTED_SHELL
-
- Required. The shell the gpinitsystem utility uses to execute commands
- on remote hosts. Allowed values are ssh. You must set up your trusted
- host environment before running the gpinitsystem utility (you can
- use gpssh-exkeys to do this).
-
-
-CHECK_POINT_SEGMENTS
-
- Required. Maximum distance between automatic write ahead log (WAL)
- checkpoints, in log file segments (each segment is normally 16 megabytes).
- This will set the checkpoint_segments parameter in the postgresql.conf
- file for each segment instance in the HAWQ system.
-
-
-ENCODING
-
- Required. The character set encoding to use. This character set must
- be compatible with the --locale settings used, especially --lc-collate
- and --lc-ctype. HAWQ supports the same character sets
- as PostgreSQL.
-
-
-DATABASE_NAME
-
- Optional. The name of a HAWQ database to create after
- the system is initialized. You can always create a database later
- using the CREATE DATABASE command or the createdb utility.
-
-
-*****************************************************
-EXAMPLES
-*****************************************************
-
-Initialize a HAWQ array and set the superuser remote password:
-
- $ gpinitsystem -c gpinitsystem_config -h hostfile_gpinitsystem
- --su-password=mypassword
-
-
-Initialize a HAWQ array with an optional standby master host:
-
- $ gpinitsystem -c gpinitsystem_config -h hostfile_gpinitsystem -s host09S
http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/8ec87e6a/tools/doc/gpseginstall_help
----------------------------------------------------------------------
diff --git a/tools/doc/gpseginstall_help b/tools/doc/gpseginstall_help
deleted file mode 100644
index 3602a80..0000000
--- a/tools/doc/gpseginstall_help
+++ /dev/null
@@ -1,157 +0,0 @@
-COMMAND NAME: gpseginstall
-
-Installs Greenplum Database on segment hosts.
-
-
-*****************************************************
-SYNOPSIS
-*****************************************************
-
-gpseginstall -f host_file [-u user] [-p password]
- [-c [u|p|c|s|E|e|l|v]]
-
-gpseginstall --help
-
-
-*****************************************************
-DESCRIPTION
-*****************************************************
-
-The gpseginstall utility provides a simple way to quickly install Greenplum
-Database on segment hosts that you specify in a host list file. The utility
-does not install or update Greenplum Database on the master host. You can
-run gpseginstall as root or as a non-root user. gpseginstall does not perform
-database initialization. See gpinitsystem for more information about
-initializing Greenplum Database.
-
-When run as root, gpseginstall default actions are to add a system user
-(default is gpadmin), create a password (default is changeme), and deploy and
-install Greenplum Database on segment hosts. To do this, gpseginstall locates
-the current Greenplum Database binaries on the master from the installation
-path in the current user�s environment variables ($GPHOME). It compresses
-Greenplum Database software into a tar.gz file and performs an MD5 checksum
-to verify file integrity.
-
-Then, it copies Greenplum Database to the segment hosts, installs
-(decompresses) Greenplum Database, and changes the ownership of the Greenplum
-Database installation to the system user you specify with the -u option.
-Lastly, it exchanges keys between all Greenplum Database hosts as both root
-and as the system user you specify with the -u option. gpseginstall also perform
-a user limit check and verifies the version number of Greenplum Database on all
-the segments.
-
-If you run gpseginstall as a non-root user, gpseginstall only compresses, copies,
-and installs Greenplum Database on segment hosts. It can also exchanges keys
-between Greenplum Database hosts for the current system user, and verifies the
-version number of Greenplum Database on all the segments.
-
-
-*****************************************************
-OPTIONS
-*****************************************************
-
--c | --commands command_option(s)
-
-This allows you to customize gpseginstall actions. Note that these command
-options are executed by default if you do not specify the -c option in the
-gpseginstall syntax.
-
- * u: Adds a system user. (root only)
-
- * p: Changes the password for a system user. (root only)
-
- * s: Compresses, copies, decompresses (installs) Greenplum Database on all
- segments.
-
- * c: Changes the ownership of the Greenplum Database installation directory on
- the segment hosts. (root only)
-
- * E: Exchange keys between Greenplum Database master and segment hosts for the
- root user. (root only)
-
- * e: Exchange keys between Greenplum Database master and segment hosts for the
- non-root system user.
-
- * l: (Linux only) Checks and modifies the user limits configuration file
- (/etc/security/limits.conf file) when adding a new user to segment hosts.
- (root only)
-
- * v: Verifies the version of Greenplum Database running on all segments.
- gpseginstall checks the version number of the Greenplum Database
- installation referenced by the $GPHOME environment variable and symbolic
- link to the installation directory. An error occurs if there is a version
- number mismatch or the Greenplum Database installation directory cannot be
- found.
-
-
--f | --file host_file
-
-This option is required. This specifies the file that lists the segment hosts
-onto which you want to install Greenplum Database.
-
-The host list file must have one host name per line and includes a host name
-for each segment host in your Greenplum system. Make sure there are no blank
-lines or extra spaces. If a host has multiple configured host names, use only
-one host name per host. For example:
-
-sdw1-1
-sdw2-1
-sdw3-1
-sdw4-1
-
-If available, you can use the same gpssh-exkeys host list file you used to
-exchange keys between Greenplum Database hosts.
-
-
--p | --password password
-
-This sets the password for the user you specify with the -u option. The
-default password is changeme. This option is only available when you run
-gpseginstall as root.
-
-Best practises: Always use passwords, do not use default passwords,
-change default passwords immediately after installation.
-
-
--u | --user user
-
-This specifies the system user. This user is also the Greenplum Database
-administrative user. This user owns Greenplum Database installation and
-administers the database. This is also the user under which Greenplum
-Database is started/initialized. This option is only available when you run
-gpseginstall as root. The default is gpadmin.
-
-
---help (help)
-
-Displays the online help.
-
-
-*****************************************************
-EXAMPLES
-*****************************************************
-
-As root, install a Greenplum Database on all segments, leave the system user
-as the default (gpadmin) and set the gpadmin password to secret123:
-
-$ gpseginstall -f my_host_list_file -p secret123
-
-As a non-root user, compress and copy Greenplum Database binaries to all
-segments (as gpadmin)
-
-$ gpseginstall -f host_file
-
-As root, add a user (gpadmin2), set the password for the user (secret1234),
-exchange keys between hosts as the new user, check user limits, and verify
-version numbers, but do not change ownership of Greenplum binaries,
-compress/copy/ install Greenplum Database on segments, or exchange keys as
-root.
-
-$ gpseginstall -f host_file -u gpadmin2 -p secret1234 -c upelv
-
-
-*****************************************************
-SEE ALSO
-*****************************************************
-
-gpinitsystem
\ No newline at end of file
[4/4] incubator-hawq git commit: HAWQ-39. Remove below unused mgmt
scritps for hawq2.0:
Posted by rl...@apache.org.
HAWQ-39. Remove below unused mgmt scritps for hawq2.0:
gpactivatestandby gpinitstandby gpinitsystem gpseginstall lib/gpcreateseg.sh lib/gpinitsegment lib/gpseginitsb.sh
Project: http://git-wip-us.apache.org/repos/asf/incubator-hawq/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-hawq/commit/8ec87e6a
Tree: http://git-wip-us.apache.org/repos/asf/incubator-hawq/tree/8ec87e6a
Diff: http://git-wip-us.apache.org/repos/asf/incubator-hawq/diff/8ec87e6a
Branch: refs/heads/master
Commit: 8ec87e6a5be1981f0915a015b0805d770d9dd32b
Parents: b07297a
Author: rlei <rl...@pivotal.io>
Authored: Mon Oct 12 12:36:12 2015 +0800
Committer: rlei <rl...@pivotal.io>
Committed: Mon Oct 12 12:36:58 2015 +0800
----------------------------------------------------------------------
tools/Makefile | 3 -
tools/bin/gpactivatestandby | 652 ---------
tools/bin/gpinitstandby | 1030 --------------
tools/bin/gpinitsystem | 2403 ---------------------------------
tools/bin/gpseginstall | 973 -------------
tools/bin/lib/gpcreateseg.sh | 331 -----
tools/bin/lib/gpinitsegment | 542 --------
tools/bin/lib/gpseginitsb.sh | 113 --
tools/doc/gpactivatestandby_help | 160 ---
tools/doc/gpinitsystem_help | 362 -----
tools/doc/gpseginstall_help | 157 ---
11 files changed, 6726 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/8ec87e6a/tools/Makefile
----------------------------------------------------------------------
diff --git a/tools/Makefile b/tools/Makefile
index 8d87e79..277da1f 100644
--- a/tools/Makefile
+++ b/tools/Makefile
@@ -22,7 +22,6 @@ clean distclean:
#---------------------------------------------------------------------
SET_VERSION_SCRIPTS = \
- bin/gpactivatestandby \
bin/gpaddmirrors \
bin/gpbitmapreindex \
bin/gpcheck \
@@ -33,8 +32,6 @@ SET_VERSION_SCRIPTS = \
bin/gpexpand \
bin/gpextract \
bin/gpfilespace \
- bin/gpinitstandby \
- bin/gpinitsystem \
bin/gpload.py \
bin/gplogfilter \
bin/gpmigrator \
http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/8ec87e6a/tools/bin/gpactivatestandby
----------------------------------------------------------------------
diff --git a/tools/bin/gpactivatestandby b/tools/bin/gpactivatestandby
deleted file mode 100755
index db323e2..0000000
--- a/tools/bin/gpactivatestandby
+++ /dev/null
@@ -1,652 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-# Line too long - pylint: disable=C0301
-# Invalid name - pylint: disable=C0103
-#
-# Copyright (c) EMC 2011
-# Copyright (c) Greenplum Inc 2010
-# All Rights Reserved.
-#
-"""
-Activates a standby master instance when the primary master instance has
-failed. Will stop the gpsyncmaster process, update the system catalog
-tables and start the instance with the standby master instance as the
-new master.
-"""
-
-import os
-import sys
-import signal
-import glob
-import time
-import shutil
-from datetime import datetime, timedelta
-
-# import GPDB modules
-try:
- from gppylib.commands import unix, gp, pg
- from gppylib.commands.gp import GpAddConfigScript
- from gppylib.db import dbconn
- from gppylib.gpparseopts import OptParser, OptChecker, OptionGroup, SUPPRESS_HELP
- from gppylib.gplog import get_default_logger, setup_tool_logging, enable_verbose_logging, get_logger_if_verbose
- from gppylib import gparray
- from gppylib.userinput import ask_yesno
- from gppylib.gp_dbid import writeGpDbidFile
- from gppylib.operations.filespace import PG_SYSTEM_FILESPACE, GP_TRANSACTION_FILES_FILESPACE, GP_TEMPORARY_FILES_FILESPACE, GetFilespaceEntries, GetFilespaceEntriesDict
- from gppylib.commands.base import WorkerPool
-except ImportError, e_:
- sys.exit('ERROR: Cannot import modules. Please check that you '
- 'have sourced greenplum_path.sh. Detail: ' + str(e_))
-
-EXECNAME = os.path.split(__file__)[-1]
-
-# Threshold values
-LOG_TIME_THRESHOLD_MINS = 120
-
-
-_description = sys.modules[__name__].__doc__
-_usage = "\n"
-
-
-class GpActivateStandbyException(Exception):
- "Generic exception for all things activatestandby"
- pass
-
-
-#-------------------------------------------------------------------------
-def parseargs():
- """Parses and validates command line args."""
-
- parser = OptParser(option_class=OptChecker,
- description=' '.join(_description.split()),
- version='%prog version $Revision$')
- parser.setHelp([])
- parser.remove_option('-h')
-
- # General options section
- optgrp = OptionGroup(parser, 'General options')
- optgrp.add_option('-h', '-?', '--help', dest='help', action='store_true',
- help='display this help message and exit')
- optgrp.add_option('-v', dest='version', action='store_true',
- help='display version information and exit')
- parser.add_option_group(optgrp)
-
- # Logging options section
- optgrp = OptionGroup(parser, 'Logging options')
- optgrp.add_option('-q', '--quiet', action='store_true',
- help='quiet mode, do not log progress to screen')
- optgrp.add_option('-l', '--logfile', type='string', default=None,
- help='alternative logfile directory')
- optgrp.add_option('-a', help='don\'t ask to confirm standby master activation',
- dest='confirm', default=True, action='store_false')
- parser.add_option_group(optgrp)
-
- # Standby activation options section
- optgrp = OptionGroup(parser, 'Standby activation options')
- optgrp.add_option('-d', '--master-data-directory', dest='master_data_dir',
- type='string', help='standby master data directory')
- optgrp.add_option('-f', '--force', action='store_true',
- help='force activation if gpsyncmaster process not running')
- optgrp.add_option('-c', '--create-new-standby', type='string', dest='new_standby',
- help='create a new standby master instance following successful '
- 'activation of the standby master instance', metavar='HOSTNAME')
- optgrp.add_option('--ignore', dest='ignore', type='string', help=SUPPRESS_HELP)
- parser.add_option_group(optgrp)
-
- parser.set_defaults(quiet=False, force=False)
-
- # Parse the command line arguments
- (options, args) = parser.parse_args()
-
- if options.help:
- parser.print_help()
- parser.exit(0, None)
-
- if options.version:
- parser.print_version()
- parser.exit(0, None)
-
- # check we got the -d option
- if not options.master_data_dir:
- logger.fatal('Required option -d is missing.')
- parser.exit(2, None)
-
- # We have to normalize this path for a later comparison
- options.master_data_dir = os.path.abspath(os.path.normpath(options.master_data_dir))
-
- # check that there isn't a conflict between -d option and MASTER_DATA_DIRECTORY env
- env_master_data_dir = os.getenv('MASTER_DATA_DIRECTORY', None)
- if not env_master_data_dir:
- logger.fatal('MASTER_DATA_DIRECTORY environment variable not set.')
- parser.exit(2, None)
-
- if os.path.normpath(env_master_data_dir) != options.master_data_dir:
- logger.fatal('Current setting of MASTER_DATA_DIRECTORY not same as -d parameter.')
- parser.exit(2, None)
-
- # check we aren't trying to create a standby master on this host
- if options.new_standby and unix.getLocalHostname() == options.new_standby:
- logger.fatal('New standby hostname supplied is same as current hostname.')
- parser.exit(2, None)
-
- # check new standby host is up
- if options.new_standby:
- cmd = unix.Ping('Check new standby host up', options.new_standby)
- cmd.run()
- if cmd.get_results().rc != 0:
- logger.fatal('New standby host %s did not respond to ping request.' % options.new_standby)
- parser.exit(2, None)
-
- # Check master data directory does not exist already on new standby
- if unix.FileDirExists.remote('Check new standby', options.new_standby, options.master_data_dir):
- logger.fatal('%s already exists on proposed new standby master %s' % \
- (options.master_data_dir, options.new_standby))
- parser.exit(2, None)
-
- if options.logfile and not os.path.exists(options.logfile):
- logger.fatal('Log directory %s does not exist.' % options.logfile)
- parser.exit(2, None)
-
- # The default logging for gpactivatestandby is verbose
- if not options.quiet:
- enable_verbose_logging()
-
- # There shouldn't be any args
- if len(args) > 0:
- logger.error('Unknown arguments:')
- for arg in args:
- logger.error(' %s' % arg)
- parser.exit(2, None)
-
- return options, args
-
-
-#-------------------------------------------------------------------------
-def print_results(array, hostname, options):
- """Prints out the summary of the operation."""
-
- logger.info('-----------------------------------------------------')
- logger.info('The activation of the standby master has completed successfully.')
- logger.info('%s is now the new primary master.' % hostname)
- logger.info('You will need to update your user access mechanism to reflect')
- logger.info('the change of master hostname.')
- logger.info('Do not re-start the failed master while the fail-over master is')
- logger.info('operational, this could result in database corruption!')
- logger.info('MASTER_DATA_DIRECTORY is now %s if' % options.master_data_dir)
- logger.info('this has changed as a result of the standby master activation, remember')
- logger.info('to change this in any startup scripts etc, that may be configured')
- logger.info('to set this value.')
- if not options.new_standby:
- logger.info('New standby master not initialized!')
- else:
- logger.info('New standby master instance %s' % options.new_standby)
- logger.info('MASTER_PORT is now %d, if this has changed, you' % array.standbyMaster.getSegmentPort())
- logger.info('may need to make additional configuration changes to allow access')
- logger.info('to the Greenplum instance.')
- logger.info('Refer to the Administrator Guide for instructions on how to re-activate')
- logger.info('the master to its previous state once it becomes available.')
- logger.info('Query planner statistics must be updated on all databases')
- logger.info('following standby master activation.')
- logger.info('When convenient, run ANALYZE against all user databases.')
- logger.info('-----------------------------------------------------')
-
-
-#-------------------------------------------------------------------------
-def print_summary(options, warnings_errors, last_entry_time):
- # Too many statements - pylint: disable=R0915
-
- """Print summary of the action and asks user if they want to
- continue with the activation."""
-
- last_entry_ago = None
- require_force = False
- warnings_generated = False
-
- # calculate the timedelta of last log message
- if last_entry_time:
- last_entry_ago = datetime.now() - datetime.strptime(last_entry_time, '%Y-%m-%d %H:%M:%S.%f %Z')
-
- # check the syncmaster
- gpsyncmaster_running = check_gpsync_running(options)
- if not gpsyncmaster_running:
- require_force = True
-
- logger.info('-----------------------------------------------------')
- logger.info('Master data directory = %s' % options.master_data_dir)
- if options.logfile:
- logger.info('Log directory = %s' % options.logfile)
- logger.info('gpsyncmaster running = %s' % ('yes' if gpsyncmaster_running else 'no'))
- logger.info('Last log entry time = %s' % last_entry_time)
- if last_entry_ago:
- logger.info(' %s ago' % last_entry_ago)
- logger.info('Create new standby master = %s' % ('yes' if options.new_standby else 'no'))
- if options.new_standby:
- logger.info('New standby master host = %s' % options.new_standby)
- logger.info('Force standby activation = %s' % ('yes' if options.force else 'no'))
- logger.info('-----------------------------------------------------')
- if last_entry_ago > timedelta(minutes=LOG_TIME_THRESHOLD_MINS):
- logger.warning('The last log entry timestamp was over %d minutes ago.' % LOG_TIME_THRESHOLD_MINS)
- logger.warning('This indicates that the standby master is likely out of date.')
- require_force = True
- warnings_generated = True
- if len(warnings_errors) > 0:
- logger.warning('The following warnings/errors were found in the most recent log file:')
- for log_msg in warnings_errors:
- logger.warning(' %s' % log_msg)
-
- logger.warning('Greenplum has detected errors and/or warnings in your standby')
- logger.warning('master log file that indicate a problem with the synchronization process')
- logger.warning('between your primary and standby master hosts. Before activating your')
- logger.warning('standby master, it is critical to ensure that it is up to date with all')
- logger.warning('of the transactions currently committed to Greenplum Database. If you')
- logger.warning('activate a standby master that is not in sync with the transactional')
- logger.warning('state of the segments, you may introduce catalog and data')
- logger.warning('inconsistencies that will render your Greenplum Database instance')
- logger.warning('unusable. If your primary master is no longer available and you suspect')
- logger.warning('that you do not have an up-to-date standby master, contact Greenplum')
- logger.warning('Customer Support for further assistance.')
- logger.warning('It is also recommended that you make a backup of the standby master')
- logger.warning('data directory (%s) before continuing.' % options.master_data_dir)
-
- require_force = True
- warnings_generated = True
- # Check if we require a force
- if require_force and not options.force:
- logger.warning('If you wish to continue you must use the -f option to force')
- logger.warning('the activation process.')
- warnings_generated = True
- raise GpActivateStandbyException('Force activation required')
- if options.confirm:
- yn = ask_yesno(None, 'Do you want to continue with standby master activation?', 'N')
- if not yn:
- raise GpActivateStandbyException('User canceled')
-
- return warnings_generated
-
-
-#-------------------------------------------------------------------------
-def get_most_recent_log(options):
- """
- Returns the file name of the most recent log file.
- """
- file_pattern = options.master_data_dir + '/pg_log/gpdb*.csv'
- file_pair = lambda f: (time.localtime(os.stat(f)[8]), f)
- file_list = sorted([file_pair(f) for f in glob.glob(file_pattern)])
- if len(file_list) == 0:
- return None
- return file_list[-1][1]
-
-
-#-------------------------------------------------------------------------
-def setup_ignore_filter(options):
- """
- Returns ignore_filter function found in python file specified by options.ignore.
- """
- if not options.ignore:
- return None
-
- # load the filter from the file, giving it access to the logger
- #
- gdict = { 'logger':logger }
- try:
- execfile(options.ignore, gdict)
- except (OSError, IOError), e:
- logger.info('Could not read ignore_filter file: %s' % str(e))
-
- return gdict.get('ignore_filter')
-
-
-#-------------------------------------------------------------------------
-def check_recent_log(options):
- """
- Checks the most recent GPDB log file for warnings and errors related to standby master.
- Returns a tuple containing the warnings and the time of the last log entry.
- """
- warnings_errors = []
- recent_log = get_most_recent_log(options)
- ignore_filter = setup_ignore_filter(options)
- ignore_count = 0
- how = "Filtering" if ignore_filter is not None else "Examining"
-
- logger.info('%s log file %s for warnings and errors...' % (how, recent_log))
-
- lines = gp.GpLogFilter.local('log search', recent_log, trouble=True)
- for line in lines:
-
- # filter out lines matched by the ignore_filter
- if ignore_filter is not None and ignore_filter(line):
- ignore_count += 1
- continue
-
- # collect errors
- try:
- warnings_errors.append(line.split('|')[18])
- except Exception, e:
- # badly formatted log entry or empty line??
- logger.info(str(e))
- logger.info(line)
-
- # report lines ignored
- if ignore_filter is not None and ignore_count > 0:
- logger.info("Note: %d line(s) ignored by %s's ignore_filter" % (ignore_count, options.ignore))
-
- # get date from last line in log file
- last_entry_time = None
- last_line = gp.GpLogFilter.local('last log msg', recent_log, count=1)
- if last_line:
- last_entry_time = last_line[0].split('|')[0]
-
- return (warnings_errors, last_entry_time)
-
-
-#-------------------------------------------------------------------------
-def check_standby_master_activation(options):
- """Runs general sanity checks on the standby master looking for any
- obvious situations that would cause problems."""
-
- # Parse the most recent log file looking for sync errors and warnings.
- (warnings_errors, last_entry_time) = check_recent_log(options)
- return (warnings_errors, last_entry_time)
-
-
-#-------------------------------------------------------------------------
-def get_config():
- """Retrieves configuration information from the catalog."""
-
- logger.info('Reading current configuration...')
- dburl = dbconn.DbURL()
- array = gparray.GpArray.initFromCatalog(dburl, utility=True)
-
- master_hostname = array.master.getSegmentHostName()
- master_port = array.master.getSegmentPort()
-
- cmd = pg.ReadPostmasterTempFile.remote('Read postmaster file', master_port, master_hostname)
- (file_exists, _, _) = cmd.getResults()
- if file_exists:
- logger.warn('Appears that there is an active postgres process on %s port=%d' % (master_hostname, master_port))
- logger.info('This may have been caused by a kill -9 of the master postgres process.')
- logger.info('Traces of this process will need to be removed, please follow the instructions below.')
- logger.info('1. Delete the /tmp/.s.PGSQL.%d and /tmp/.s.PGSQL.%d.* files on %s' % (master_port, master_port, master_hostname))
- logger.info('2. Remove the %s/postmaster.pid file on %s' % (array.master.getSegmentDataDirectory(), master_hostname))
- logger.info('3. Then call this utility again.')
- stop_master()
- raise GpActivateStandbyException('Active postgres process on master')
-
- return array
-
-#-------------------------------------------------------------------------
-def update_flat_file(array, flat_file):
- """
- If the transaction/temporary filespaces have
- ever been moved, we need to update the flat file.
- The filespace directories are copied by the
- copy_master_filespaces method.
- """
-
- logger.info('Updating filespace flat files')
-
- pg_system_fs_entries = GetFilespaceEntriesDict(GetFilespaceEntries(array, PG_SYSTEM_FILESPACE).run()).run()
-
- flat_file_location = os.path.join(pg_system_fs_entries[1][2], flat_file)
-
- if not os.path.exists(flat_file_location):
- return
-
- logger.debug('flat file location for transaction files = %s' % flat_file_location)
- #Copy over the updated flat file to the standby
- with open(flat_file_location) as read_file:
- lines_to_write = ''
- for line in read_file:
- tokens = line.split()
- if len(tokens) != 2:
- lines_to_write += line
- elif tokens[0] == '1':
- lines_to_write += line
-
- temp_flat_file = os.path.join(flat_file_location + '.tmp')
-
- try:
- with open(temp_flat_file, 'w') as write_file:
- write_file.write(lines_to_write)
-
- #Rewrite the master flat file to include the standby information
- shutil.move(temp_flat_file, flat_file_location)
- except Exception, e:
- raise Exception('Failed to update flat file')
-
-def set_repair_global_sequence(datadir):
- """set gp_persistent_repair_global_sequenece for new standby master"""
-
- pool = WorkerPool()
- cmd = GpAddConfigScript(unix.getLocalHostname(), datadir, 'gp_persistent_repair_global_sequence', 'true', False)
- pool.addCommand(cmd)
- try:
- pool.join()
- items = pool.getCompletedItems()
- failure = False
- for i in items:
- if not i.was_successful():
- logger.error('failed updating the postgresql.conf files on host: ' + i.remoteHost)
- failure = True
-
- pool.check_results()
- except Exception, e:
- logger.error('errors in job:')
- logger.error(e.__str__())
- logger.error('exiting early')
-
- pool.haltWork()
- pool.joinWorkers()
-
-
-def reset_repair_global_sequence(datadir):
- """reset gp_persistent_repair_global_sequenece after the activate standby"""
- pool = WorkerPool()
- cmd = GpAddConfigScript(unix.getLocalHostname(), datadir, 'gp_persistent_repair_global_sequence', None, True)
- pool.addCommand(cmd)
- try:
- pool.join()
- items = pool.getCompletedItems()
- failure = False
- for i in items:
- if not i.was_successful():
- logger.error('failed updating the postgresql.conf files on host: ' + i.remoteHost)
- failure = True
-
- pool.check_results()
- except Exception, e:
- logger.error('errors in job:')
- logger.error(e.__str__())
- logger.error('exiting early')
-
- pool.haltWork()
- pool.joinWorkers()
-
-
-#-------------------------------------------------------------------------
-def update_config():
- """Updates the configuration information in the catalog."""
-
- dburl = dbconn.DbURL()
- conn = dbconn.connect(dburl, utility=True)
-
- logger.info('Updating catalog...')
- sql = "SELECT gp_activate_standby()"
- dbconn.execSQL(conn, sql)
-
- conn.commit()
- conn.close()
-
- logger.info('Database catalog updated successful')
-
-#-------------------------------------------------------------------------
-def update_gpdbid_file(array):
- """Updates to gp_dbid file in the data directory to reflect the standby masters dbid."""
-
- standby_datadir = os.path.normpath(array.standbyMaster.getSegmentDataDirectory())
-
- # MPP-13245, use single mechanism to manage gp_dbid file instead of ad-hoc replace
- writeGpDbidFile(standby_datadir, 1, get_logger_if_verbose())
-
-
-#-------------------------------------------------------------------------
-def create_new_standby_master(options):
- """Creates a new standby master."""
-
- logger.info('Creating new standby master...')
-
- gphome = os.environ.get("GPHOME")
- # we have to use os.system here because gpinitstandby will be interactive
- # due to filespace remapping.
- rc = os.system("%s/bin/gpinitstandby -s %s" % (gphome, options.new_standby))
- if rc != 0:
- logger.warning('Failed to create the new standby master on %s' % options.new_standby)
- logger.warning('You will need to manually run \'gpinitstandby -s %s\' to create the new standby master.' % options.new_standby)
- raise GpActivateStandbyException('Failed to create new standby')
-
-
-#-------------------------------------------------------------------------
-def check_gpsync_running(options):
- """Checks if the gpsyncmaster process is running."""
-
- return gp.getSyncmasterPID('localhost', options.master_data_dir) > 0
-
-
-#-------------------------------------------------------------------------
-def stop_gpsync_process(options):
- """Stops the gpsyncmaster process."""
-
- logger.info('Stopping gpsync process...')
-
- # check to see if the gpsyncmaster process is active
- pid = gp.getSyncmasterPID('localhost', options.master_data_dir)
-
- if not pid > 0:
- # gpsyncmaster is not running so check if the force option was given.
- if options.force:
- # DbStatus only uses data directory so we can ignore the other values
- db = gparray.GpDB(None, None, None, None, None, None, None, None, None, options.master_data_dir, None)
- # check that postmaster isn't already running
- if pg.DbStatus.local('check db status', db) == True:
- logger.error('Located a postgres process on this host')
- logger.error('Has the master standby instance already been activated?')
- logger.error('Run the gpstate utility to check.')
- logger.error('Possible standby master instance active')
- raise GpActivateStandbyException('postgres process already running')
- else:
- gp.SegmentStop.local('stopping gpsyncmaster', options.master_data_dir, mode='fast')
- if unix.check_pid(pid):
- # not able to stop normally so give it a go with immediate mode
- logger.warning('Process gpsyncmaster still running, will issue fast shutdown with immediate')
- gp.SegmentStop.local('stopping gpsyncmaster', options.master_data_dir, mode='immediate')
-
- if unix.check_pid(pid):
- logger.error('Unable to stop sync process')
- raise GpActivateStandbyException('Unable to stop sync process')
- else:
- logger.info('Successfully shutdown sync process.')
- else:
- logger.info('Successfully shutdown sync process')
-
-
-#-------------------------------------------------------------------------
-def start_master():
- """Starts the master."""
-
- logger.info('Starting standby master database in utility mode...')
- gp.GpStart.local('Start GPDB', masterOnly=True)
-
-
-#-------------------------------------------------------------------------
-def stop_master():
- """Stops the master."""
-
- logger.info('Stopping standby master...')
- gp.GpStop.local('Stop GPDB', masterOnly=True, fast=True)
-
-
-#-------------------------------------------------------------------------
-def start_database():
- """Starts the database."""
-
- logger.info('Starting database in production mode...')
- gp.GpStart.local('Start database in production mode')
-
-#-------------------------------------------------------------------------
-def stop_database():
- """Stops the database."""
-
- logger.info('Stopping database...')
- gp.GpStop.local('Stopping database')
-
-
-
-#-------------------------------------------------------------------------
-# Main
-#-------------------------------------------------------------------------
-
-# setup logging
-logger = get_default_logger()
-setup_tool_logging(EXECNAME, unix.getLocalHostname(), unix.getUserName())
-
-# parse args and options
-(options_, args_) = parseargs()
-
-# if we got a new log dir, we can now set it up.
-if options_.logfile:
- setup_tool_logging(EXECNAME, unix.getLocalHostname(), unix.getUserName(), logdir=options_.logfile)
-
-try:
- (warnings_errors_, last_entry_time_) = check_standby_master_activation(options_)
-
- warnings_generated_ = print_summary(options_, warnings_errors_, last_entry_time_)
-
- # disable keyboard interrupt to prevent users from canceling
- # out of the process at a very bad time. If there is a partial
- # update to the gp_configuration catalog and the user cancels
- # you get stuck where you can't go forward and you can't go
- # backwards.
- signal.signal(signal.SIGINT, signal.SIG_IGN)
- stop_gpsync_process(options_)
- # Prevent the global_sequence regression happened.
- set_repair_global_sequence(options_.master_data_dir)
- start_master()
- array_ = get_config()
- reset_repair_global_sequence(options_.master_data_dir)
- update_config()
- update_gpdbid_file(array_)
- update_flat_file(array_, GP_TRANSACTION_FILES_FILESPACE)
- update_flat_file(array_, GP_TEMPORARY_FILES_FILESPACE)
-
- # This should be stop_master, but due to filerep issue (MPP-9559)
- # we need to stop all the segments too.
- # Catch the exception that can be thrown if some segments are already down.
- try:
- stop_database()
- except Exception, e_:
- logger.info('Exception observed while stopping database')
- logger.info(str(e_))
-
- start_database()
-
- # At this point, cancel isn't all that bad so re-enable
- # keyboard interrupt. They may cancel out of the creating
- # of a new standby, but gpinitstandby can be used to
- # create one at a later point.
- signal.signal(signal.SIGINT, signal.default_int_handler)
-
- if options_.new_standby:
- create_new_standby_master(options_)
-
- print_results(array_, unix.getLocalHostname(), options_)
-
- if warnings_generated_:
- sys.exit(1)
- else:
- sys.exit(0)
-
-except Exception, e_:
- logger.fatal('Error activating standby master: %s' % str(e_))
- sys.exit(2)
-
-
http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/8ec87e6a/tools/bin/gpinitstandby
----------------------------------------------------------------------
diff --git a/tools/bin/gpinitstandby b/tools/bin/gpinitstandby
deleted file mode 100755
index decd7a6..0000000
--- a/tools/bin/gpinitstandby
+++ /dev/null
@@ -1,1030 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-#
-# Copyright (c) Greenplum Inc 2010. All Rights Reserved.
-#
-import os
-import sys
-import signal
-import shutil
-
-# import GPDB modules
-try:
- from gppylib.gpparseopts import *
- from gppylib.gplog import *
- from gppylib.commands import unix, gp, base
- from gppylib import gparray
- from gppylib.db import dbconn
- from gppylib.db import catalog
- from gppylib.userinput import *
- from gppylib.gp_dbid import GpDbidFile
- from gppylib.operations.package import SyncPackages
- from gppylib.operations.filespace import PG_SYSTEM_FILESPACE, GP_TRANSACTION_FILES_FILESPACE, GP_TEMPORARY_FILES_FILESPACE, GetFilespaceEntries, GetFilespaceEntriesDict, MoveFilespaceError, create_temporary_directories, remove_temporary_directories
-except ImportError, e:
- sys.exit('ERROR: Cannot import modules. Please check that you '
- 'have sourced greenplum_path.sh. Detail: ' + str(e))
-
-EXECNAME = os.path.split(__file__)[-1]
-
-# initstandby state constants for rollback
-INIT_STANDBY_STATE_NOT_STARTED=0
-INIT_STANDBY_STATE_UPDATE_CATALOG=1
-INIT_STANDBY_STATE_COPY_FILES=2
-INIT_STANDBY_STATE_UPDATE_GPDBID=3
-INIT_STANDBY_STATE_DONE=4
-
-g_init_standby_state=INIT_STANDBY_STATE_NOT_STARTED
-
-
-# default batch size
-DEFAULT_BATCH_SIZE=16
-
-# backup filename
-PG_HBA_BACKUP = 'pg_hba.conf.gpinitstandby.bak'
-
-_description = """The gpinitstandby utility adds a backup master host to your
-Greenplum Database system. If your system has an existing backup
-master host configured, use the -r option to remove it before adding
-the new standby master host.
-
-Before running this utility, make sure
-that the Greenplum Database software is installed on the backup master
-host and that you have exchanged SSH keys between hosts. Also make sure
-that the master port is set to the same port number on the master host
-and the backup master host. This utility should be run on the currently
-active primary master host.
-
-The utility will perform the following steps:
-* Shutdown your Greenplum Database system
-* Update the Greenplum Database system catalog to remove the
- existing backup master host information (if the -r option is supplied)
-* Update the Greenplum Database system catalog to add the new backup
- master host information (use the -n option to skip this step)
-* Edit the pg_hba.conf files of the segment instances to allow access
- from the newly added standby master.
-* Setup the backup master instance on the alternate master host
-* Start the synchronization process
-* Restart your Greenplum Database system
-
-A backup master host serves as a 'warm standby' in the event of the
-primary master host becoming nonoperational. The backup master is kept
-up to date by a transaction log replication process (gpsyncmaster),
-which runs on the backup master host and keeps the data between the
-primary and backup master hosts synchronized. If the primary master
-fails, the log replication process is shutdown, and the backup master
-can be activated in its place by using the gpactivatestandby utility.
-Upon activation of the backup master, the replicated logs are used to
-reconstruct the state of the master host at the time of the last
-successfully committed transaction.
-"""
-
-_usage = """
-"""
-
-class GpInitStandbyException(Exception):
- pass
-
-
-#-------------------------------------------------------------------------
-def parseargs():
- """parses and validates command line args."""
-
- parser = OptParser(option_class=OptChecker,
- version='%prog version $Revision$')
-
- parser.setHelp([])
- parser.remove_option('-h')
-
- # General options section
- optgrp = OptionGroup(parser, 'General options')
- optgrp.add_option('-?', '--help', dest='help', action='store_true',
- help='display this help message and exit')
- optgrp.add_option('-v', dest='version', action='store_true',
- help='display version information and exit')
- parser.add_option_group(optgrp)
-
- # Logging options section
- optgrp = OptionGroup(parser, 'Logging options')
- optgrp.add_option('-q', '--quiet', action='store_true',
- help='quiet mode, do not log progress to screen')
- optgrp.add_option('-l', '--logfile', type='string', default=None,
- help='alternative logfile directory')
- optgrp.add_option('-a', help='don\'t ask to confirm standby master activation',
- dest='confirm', default=True, action='store_false')
- optgrp.add_option('-D', '--debug', action='store_true', default=False,
- help='enable debug logging')
- parser.add_option_group(optgrp)
-
- # Standby initialization options section
- optgrp = OptionGroup(parser, 'Standby initialization options')
- optgrp.add_option('-s', '--standby-host', type='string', dest='standby_host',
- help='hostname of system to create standby master on')
- optgrp.add_option('-n', '--no-update', action='store_true', dest='no_update',
- help='do not update system catalog tables')
- optgrp.add_option('-r', '--remove', action='store_true',
- help='remove current warm master standby. Use this option '
- 'if the warm master standby host has failed. This option will '
- 'need to shutdown the GPDB array to be able to complete the request')
- optgrp.add_option('-M', '--mode', type='string', default='smart',
- help='use specified mode when stopping the GPDB array. Default: smart')
- optgrp.add_option('-L', '--no-restart', dest='no_restart', default=False, action='store_true',
- help='leave the GPDB array in a stopped state after removing the warm standby master')
- parser.add_option_group(optgrp)
-
-
- # Parse the command line arguments
- (options, args) = parser.parse_args()
-
- if options.help:
- parser.print_help()
- parser.exit(0, None)
-
- if options.version:
- parser.print_version()
- parser.exit(0, None)
-
- if options.logfile and not os.path.exists(options.logfile):
- logger.error('Log directory %s does not exist.' % options.logfile)
- parser.exit(2, None)
-
- # -s and -n are exclusive
- if options.standby_host and options.no_update:
- logger.error('Options -s and -n cannot be specified together.')
- parser.exit(2, None)
-
- # -s and -r are exclusive
- if options.standby_host and options.remove:
- logger.error('Options -s and -r cannot be specified together.')
- parser.exit(2, None)
-
- # -L and -s are exclusive
- if options.standby_host and options.no_restart:
- logger.error('Options -s and -L cannot be specified together.')
- parser.exit(2, None)
-
- # we either need to delete or create or sync
- if not options.remove and not options.standby_host and not options.no_update:
- logger.error('No action provided in the options.')
- parser.print_help()
- parser.exit(2, None)
-
- # check that new standby host is up
- if options.standby_host:
- try:
- gp.Ping.local('check new standby up', options.standby_host)
- except:
- logger.error('Unable to ping new standby host %s' % options.standby_host)
- parser.exit(2, None)
-
- # make sure we aren't trying to create a standby on this host
- if options.standby_host and options.standby_host == unix.getLocalHostname():
- logger.error('Cannot run this script on the standby master host')
- parser.exit(2, None)
-
- return options, args
-
-
-#-------------------------------------------------------------------------
-def print_summary(options, array, standby_filespace_map):
- """Display summary of gpinitstandby operations."""
-
- logger.info('-----------------------------------------------------')
- if options.remove:
- logger.info('Warm master standby removal parameters')
- else:
- logger.info('Greenplum standby master initialization parameters')
- logger.info('-----------------------------------------------------')
- logger.info('Greenplum master hostname = %s' \
- % array.master.getSegmentHostName())
- logger.info('Greenplum master data directory = %s' \
- % array.master.getSegmentDataDirectory())
- logger.info('Greenplum master port = %s' \
- % array.master.getSegmentPort())
- if options.remove:
- logger.info('Greenplum standby master hostname = %s' \
- % array.standbyMaster.getSegmentHostName())
- else:
- logger.info('Greenplum standby master hostname = %s' \
- % options.standby_host)
- logger.info('Greenplum standby master port = %s' \
- % array.master.getSegmentPort())
- if not array.standbyMaster:
- pg_system = None
- for fs in standby_filespace_map:
- if fs[0] == 'pg_system':
- pg_system = fs[1]
- break
- if pg_system:
- logger.info('Greenplum standby master data directory = %s' % pg_system)
- else:
- GpInitStandbyException('Failed to find pg_system '
- 'filespace for standby master')
- else:
- logger.info('Greenplum standby master data directory = %s' \
- % array.standbyMaster.getSegmentDataDirectory())
- if not options.remove and options.no_update:
- logger.info('Greenplum update system catalog = Off')
- elif not options.remove:
- logger.info('Greenplum update system catalog = On')
- logger.info('Greenplum stop database mode = %s' % options.mode)
- if options.remove and options.no_restart:
- logger.info('Restart Greenplum database after delete = No')
- elif options.remove:
- logger.info('Restart Greenplum database after delete = Yes')
-
- if not options.remove and standby_filespace_map:
- logger.info('-----------------------------------------------------')
- logger.info(' Filespace locations')
- logger.info('-----------------------------------------------------')
- for item in standby_filespace_map:
- logger.info('%s -> %s' % (item[0], item[1]))
-
- # Confirm the action
- if options.confirm:
- if options.remove:
- yn = ask_yesno(None, 'Do you want to continue with deleting '
- 'the standby master?', 'N')
- else:
- yn = ask_yesno(None, 'Do you want to continue with '
- 'standby master initialization?', 'N')
- if not yn:
- raise GpInitStandbyException('User canceled')
-
-
-#-------------------------------------------------------------------------
-def stop_database(options):
- """Stops the database."""
-
- try:
- logger.info('Stopping database...')
- if options.mode == 'fast':
- gp.GpStop.local('Stop GPDB', fast=True)
- else:
- gp.GpStop.local('Stop GPDB')
- except Exception, ex:
- logger.error('Failed to stop the database.')
- raise GpInitStandbyException(ex)
-
-def getDbUrlForInitStandby():
- """
- Return the dbconn.DbURL instance that should be used for connecting
- """
-
- #
- # use template0 to avoid using PGDATABASE value (which definitely won't work during initsystem)
- #
- return dbconn.DbURL(dbname="template0")
-
-#-------------------------------------------------------------------------
-def start_database():
- """Starts the database."""
-
- logger.info('Starting database in production mode...')
- try:
- gp.GpStart.local('Start database in production mode')
- except Exception, ex:
- logger.error('Failed to start the database')
- raise GpInitStandbyException(ex)
-
-#-------------------------------------------------------------------------
-def stop_master():
- """Stops the master only."""
-
- logger.info('Stopping master...')
- try:
- gp.GpStop.local('Stop GPDB', masterOnly=True, fast=True)
- except Exception, ex:
- logger.error('Failed to stop the master.')
- raise GpInitStandbyException(ex)
-
-#-------------------------------------------------------------------------
-def start_master():
- """Starts the master in utility mode."""
-
- logger.info('Starting master in utility mode...')
- try:
- gp.GpStart.local('Start GPDB', masterOnly=True)
- except Exception, ex:
- logger.error('Failed to start the master.')
- raise GpInitStandbyException(ex)
-
-#-------------------------------------------------------------------------
-def delete_standby(options):
- """Removes the standby master."""
- try:
- dburl = getDbUrlForInitStandby()
- array = gparray.GpArray.initFromCatalog(dburl, utility=True)
- except:
- logger.error('Failed to retrieve configuration information from the master.')
- raise
-
- # make sure we have a standby to delete
- if not array.standbyMaster:
- logger.error('Request made to remove warm master standby, '
- 'but no standby located.')
- raise GpInitStandbyException('no standby configured')
-
- print_summary(options, array, None)
-
- # Disable Ctrl-C
- signal.signal(signal.SIGINT,signal.SIG_IGN)
-
- stop_database(options)
-
- try:
- remove_standby_from_catalog(options, array)
- except Exception, ex:
- logger.error('Failed to remove standby master from catalog.')
- raise GpInitStandbyException(ex)
-
- #repopulate flat file
- pg_system_fs_entries = GetFilespaceEntriesDict(GetFilespaceEntries(array, PG_SYSTEM_FILESPACE).run()).run()
- flat_file_location = os.path.join(pg_system_fs_entries[1][2], GP_TRANSACTION_FILES_FILESPACE)
- remove_standby_from_flat_file(flat_file_location, GP_TRANSACTION_FILES_FILESPACE, array.standbyMaster)
- flat_file_location = os.path.join(pg_system_fs_entries[1][2], GP_TEMPORARY_FILES_FILESPACE)
- remove_standby_from_flat_file(flat_file_location, GP_TEMPORARY_FILES_FILESPACE, array.standbyMaster)
-
- if not options.no_restart:
- start_database()
-
- # check if syncmaster running on standby
- try:
- gpsyncmaster_pid = gp.getSyncmasterPID(array.standbyMaster.getSegmentHostName(),
- array.standbyMaster.getSegmentDataDirectory())
- if gpsyncmaster_pid > 0:
- # stop it
- logger.info('Stopping gpsyncmaster on %s' %
- array.standbyMaster.getSegmentHostName())
- gp.SegmentStop.remote('stop gpsyncmaster',
- array.standbyMaster.getSegmentHostName(),
- array.standbyMaster.getSegmentDataDirectory())
- except Exception, ex:
- logger.error('Failed to stop gpsyncmaster process on standby master.')
- raise GpInitStandbyException(ex)
-
- # delete temporary directories
- remove_temporary_directories(array.standbyMaster.getSegmentHostName(),
- array.standbyMaster.getSegmentDataDirectory())
-
- # delete directory
- remove_standby_filespace_dirs(array)
-
- # Reenable Ctrl-C
- signal.signal(signal.SIGINT,signal.default_int_handler)
-
-#-------------------------------------------------------------------------
-def remove_standby_filespace_dirs(array):
- """Removes the filespace directories on the standby master."""
-
- if array.standbyMaster:
- logger.info('Removing filespace directories on standby master...')
-
- fs_dirs = array.standbyMaster.getSegmentFilespaces().values()
-
- pool = base.WorkerPool(numWorkers=DEFAULT_BATCH_SIZE)
-
- for fs_dir in fs_dirs:
- cmd = unix.RemoveFiles('delete standby filespace dir',
- fs_dir, ctxt=base.REMOTE,
- remoteHost=array.standbyMaster.getSegmentHostName())
- pool.addCommand(cmd)
-
- pool.join()
- try:
- pool.check_results()
- except Exception, ex:
- logger.error('Failed to remove filespace directories on standby master.')
- raise GpInitStandbyException(ex)
- finally:
- pool.haltWork()
-
-
-
-#-------------------------------------------------------------------------
-def create_standby(options):
- """Creates the standby master."""
-
- global g_init_standby_state
-
- master_filespace_map = None
- standby_filespace_map = None
- array = None
- conn = None
-
- # The mode the master was in when we started
- # This is needed because when gpinitstandby is
- # called by gpinitsystem the db is in master only
- # mode and needs to remain that way.
- master_mode='production'
-
- try:
- try:
- dburl = getDbUrlForInitStandby()
- array = gparray.GpArray.initFromCatalog(dburl, utility=True)
-
- # get list of master filespaces for later
- conn = dbconn.connect(dburl, utility=True)
- master_filespace_map = catalog.get_master_filespace_map(conn)
-
- # get standby filespace map
- if not options.no_update:
- # create new standby
- standby_filespace_map = get_filespace_mappings(array, master_filespace_map)
- else:
- standby_filespace_map = catalog.get_standby_filespace_map(conn)
- except Exception, ex:
- logger.error('Failed to retrieve configuration information from the master.')
- raise GpInitStandbyException(ex)
- finally:
- if conn:
- conn.close()
-
- # Get the mode the master is in so we can restore it to that mode
- cmd = unix.FileContainsTerm('check mode', 'gp_role=utility',
- array.master.getSegmentDataDirectory() + '/postmaster.opts')
- cmd.run(validateAfter=False)
-
- if cmd.contains_term():
- master_mode = 'utility'
- else:
- master_mode = 'production'
-
- # validate
- validate_standby_init(options, array, standby_filespace_map)
-
- # display summary
- print_summary(options, array, standby_filespace_map)
-
- # GPSQL does not support gppkg and package, and there is known issue for GPSQL rpm build:
- # MPP-15568 and GPSQL-99.
- #
- # sync packages
- # The design decision here is to squash any exceptions resulting from the
- # synchronization of packages. We should *not* disturb the user's attempts
- # initialize a standby.
- # try:
- # logger.info('Syncing Greenplum Database extensions to standby')
- # SyncPackages(options.standby_host).run()
- # except Exception, e:
- # logger.exception('Syncing of Greenplum Database extensions has failed.')
- # logger.warning('Please run gppkg --clean after successful standby initialization.')
-
- # Disable Ctrl-C
- signal.signal(signal.SIGINT,signal.SIG_IGN)
-
- if master_mode == 'utility':
- stop_master()
- else:
- stop_database(options)
-
- # update the catalog if needed
- if not options.no_update:
- update_pg_hba_conf(options, array)
- array = add_standby_to_catalog(options,
- standby_filespace_map)
- else:
- logger.info('-n option given, skipping catalog update')
-
- copy_master_filespaces_to_standby(options, array,
- master_filespace_map,
- standby_filespace_map)
- update_gpdbid_file(options, array)
- update_flat_file(array, standby_filespace_map, GP_TRANSACTION_FILES_FILESPACE)
- update_flat_file(array, standby_filespace_map, GP_TEMPORARY_FILES_FILESPACE)
- # no need to recreate the temporaries directory
- if not options.no_update:
- create_temporary_directories(array.standbyMaster.getSegmentHostName(),
- array.standbyMaster.getSegmentDataDirectory())
- cleanup_pg_hba_conf_backup(array)
-
- if master_mode == 'utility':
- start_master()
- else:
- start_database()
-
- # Reenable Ctrl-C
- signal.signal(signal.SIGINT,signal.default_int_handler)
- except Exception, ex:
- # Something went wrong. Based on the current state, we can rollback
- # the operation.
- logger.info('Trying to rollback changes that have been made...')
- if g_init_standby_state == INIT_STANDBY_STATE_NOT_STARTED:
- # nothing to rollback
- pass
- elif g_init_standby_state == INIT_STANDBY_STATE_UPDATE_CATALOG:
- undo_catalog_update(options, array)
- elif g_init_standby_state == INIT_STANDBY_STATE_COPY_FILES or \
- g_init_standby_state == INIT_STANDBY_STATE_UPDATE_GPDBID:
- undo_update_pg_hba_conf(array)
- undo_catalog_update(options, array)
- undo_file_copy(options, array)
- # at this point we are back at the original state so we'll start up
- # the database.
- if g_init_standby_state != INIT_STANDBY_STATE_NOT_STARTED:
- start_database()
- raise GpInitStandbyException(ex)
-
-
-#-------------------------------------------------------------------------
-def update_pg_hba_conf(options, array):
- """Updates the pg_hba.conf file to include the ip addresses of the
- standby master."""
-
- logger.info('Updating pg_hba.conf file...')
- try:
- master_data_dir = array.master.getSegmentDataDirectory()
- standby_ips = unix.InterfaceAddrs.remote('get standby ips', options.standby_host)
- current_user = unix.UserId.local('get userid')
-
- # back it up
- os.system('cp %s/pg_hba.conf %s/%s' \
- % (master_data_dir, master_data_dir, PG_HBA_BACKUP))
-
- # read in current pg_hba.conf file
- fp = open(master_data_dir + '/pg_hba.conf', 'r')
- pg_hba_conf = fp.readlines()
- fp.close()
-
- # Find where the comments stop
- index = 0
- while pg_hba_conf[index].strip().startswith('#'):
- index += 1
-
- new_section = ['# standby master host ip addresses\n']
- for ip in standby_ips:
- cidr_suffix = '/128' if ':' in ip else '/32' # MPP-15889
- new_section.append('host\tall\t%s\t%s%s\ttrust\n' % (current_user, ip, cidr_suffix))
-
- # insert new section
- pg_hba_conf[index:index] = new_section
-
- # write it out
- fp = open(array.master.getSegmentDataDirectory() + '/pg_hba.conf', 'w')
- fp.writelines(pg_hba_conf)
- fp.close()
-
- except Exception, ex:
- logger.error('Failed to update pg_hba.conf file on master.')
- raise GpInitStandbyException(ex)
-
-
-#-------------------------------------------------------------------------
-def cleanup_pg_hba_conf_backup(array):
- """Removes the pg_hba.conf backup."""
-
- logger.info('Removing pg_hba.conf backup...')
- master_data_dir = array.master.getSegmentDataDirectory()
- standby_data_dir = array.standbyMaster.getSegmentDataDirectory()
-
- try:
- unix.RemoveFiles.local('cleanup master pg_hba.conf backup', '%s/%s' % (master_data_dir, PG_HBA_BACKUP))
- unix.RemoveFiles.remote('cleanup standby pg_hba.conf backup',
- array.standbyMaster.getSegmentHostName(),
- '%s/%s' % (standby_data_dir, PG_HBA_BACKUP))
- except:
- # ignore...
- pass
-
-
-#-------------------------------------------------------------------------
-def validate_standby_init(options, array, standby_filespace_map):
- """Validates the parameters and environment."""
-
- logger.info('Validating environment and parameters for standby initialization...')
- if array.standbyMaster and not options.no_update:
- logger.error('Standby master already configured')
- logger.info('If you want to resync the standby master, use the -n option')
- raise GpInitStandbyException('standby master already configured')
-
- if options.no_update:
- if array.standbyMaster:
- options.standby_host = array.standbyMaster.getSegmentHostName()
- else:
- logger.error('Cannot use -n option when standby master has not yet been configured')
- raise GpInitStandbyException('Standby master not configured')
-
- # make sure we have top level dirs
- for fs_name, fs_dir in standby_filespace_map:
- base_dir = os.path.dirname(os.path.normpath(fs_dir))
- # In GPSQL, user should not maintain the master local path anymore.
- unix.MakeDirectory.remote('make dir for ' + str(fs_name), options.standby_host, base_dir)
-
- if not unix.FileDirExists.remote('check for filespace dir',
- options.standby_host,
- base_dir):
- logger.error('Parent directory %s does not exist on host %s' %(base_dir, options.standby_host))
- logger.error('This directory must be created before running gpactivatestandby')
- raise GpInitStandbyException('Parent directory %s does not exist' % base_dir)
-
- # check that master data dir does not exist on new host unless we are just re-syncing
- logger.info('Checking for filespace directory %s on %s' % (fs_dir, options.standby_host))
- if not options.no_update and unix.FileDirExists.remote('check for filespace dir', options.standby_host,
- fs_dir):
- logger.error('Filespace directory already exists on host %s' % options.standby_host)
- if array.standbyMaster:
- logger.error('If you want to just sync the data directory, use the -n option')
- raise GpInitStandbyException('master data directory exists')
-
-
-#-------------------------------------------------------------------------
-def get_add_standby_sql(hostname, address, filespaces):
- """Returns the SQL for adding a standby master."""
-
- sql = "select gp_add_master_standby('%s', '%s', '%s')" % (hostname,
- address,
- filespaces[0][1])
- return sql
-
-
-#-------------------------------------------------------------------------
-def get_remove_standby_sql():
- """Returns the SQL for removing a standby master."""
-
- sql = "select gp_remove_master_standby()"
- return sql
-
-
-#-------------------------------------------------------------------------
-def filespace_map_to_string(filespace_map):
- """Converts the filespace map into a postgres array string."""
-
- filespace_map_str = "{"
- for item in filespace_map:
- filespace_map_str += '{"%s","%s"},' % (item[0], item[1])
- filespace_map_str = filespace_map_str.rstrip(',') + "}"
- return filespace_map_str
-
-
-#-------------------------------------------------------------------------
-def get_filespace_mappings(array, master_filespace_map):
- """Asks user for the mapping from master filespaces -> standby master.
- master_filespace_map should be a 2d array of:
- [ ['master_fs1name', 'master_fs1path'],
- ['master_fs2name', 'master_fs2path'],
- ...
- ['master_fsnname', 'master_fsnpath'] ]"""
-
- standby_filespace_map = []
- tmp_validator = lambda str, default, ignore1: str if str and str != '' else default
-
- if len(master_filespace_map) > 1:
- print """The filespace locations on the master must be mapped to
-locations on the standby. These locations must be empty on the
-standby master host. The default provided is the location of
-the filespace on the master. In most cases the defaults can be
-used. The exception is the pg_system filespace which must be in
-the same location on both the master and standby master.
- """
-
- for item in master_filespace_map:
- if item[0] != 'pg_system':
- continue
-
- fs_loc = item[1]
- # fs_loc = ask_input(None,'Enter standby filespace location for filespace %s (default: %s)' % (item[0], item[1]), '',
- # item[1], tmp_validator, None)
- if not os.path.isabs(fs_loc):
- raise GpInitStandbyException('Filespace paths must be absolute paths. %s is a relative path.' % fs_loc)
- standby_filespace_map.append([item[0], fs_loc])
-
- return standby_filespace_map
-
-#-------------------------------------------------------------------------
-def add_standby_to_catalog(options, standby_filespace_map):
- """Adds the standby to the catalog."""
-
- global g_init_standby_state
-
- try:
- g_init_standby_state=INIT_STANDBY_STATE_UPDATE_CATALOG
- start_master()
- dburl = getDbUrlForInitStandby()
- conn = dbconn.connect(dburl, utility=True)
-
- logger.info('Adding standby master to catalog...')
-
- sql = get_add_standby_sql(options.standby_host, options.standby_host,
- standby_filespace_map)
-
- dbconn.execSQL(conn, sql)
- conn.commit()
- conn.close()
- logger.info('Database catalog updated successfully.')
- array = gparray.GpArray.initFromCatalog(dburl, utility=True)
- stop_master()
-
- # MPP-13245, store the new standby_dbid in the gp_dbid file
- d = GpDbidFile( array.master.getSegmentDataDirectory(), do_read=True, logger=get_logger_if_verbose() )
- d.standby_dbid = int(array.standbyMaster.getSegmentDbId())
- d.write_gp_dbid()
-
- return array
- except Exception, ex:
- logger.error('Failed to add standby to master catalog.')
- raise GpInitStandbyException(ex)
-
-
-#-------------------------------------------------------------------------
-def remove_standby_from_catalog(options, array):
- """Removes the standby from the catalog."""
- # update catalog
- try:
- start_master()
- dburl = getDbUrlForInitStandby()
- conn = dbconn.connect(dburl, utility=True)
-
- logger.info('Removing standby master from catalog...')
- sql = get_remove_standby_sql()
-
- dbconn.execSQL(conn, sql)
- conn.commit()
- conn.close()
-
- logger.info('Database catalog updated successfully.')
- stop_master()
-
- # MPP-13245, remove the standby_dbid from the gp_dbid file
- d = GpDbidFile( array.master.getSegmentDataDirectory(), do_read=True, logger=get_logger_if_verbose() )
- d.standby_dbid = None
- d.write_gp_dbid()
-
- except Exception, ex:
- logger.error('Failed to remove standby from master catalog.')
- stop_master()
- raise GpInitStandbyException(ex)
-
-
-#-------------------------------------------------------------------------
-def copy_master_filespaces_to_standby(options, array, master_filespace_map, standby_filespace_map):
- """Copies the filespaces from the master to the standby according to
- the maps provided."""
-
- global g_init_standby_state
-
- g_init_standby_state=INIT_STANDBY_STATE_COPY_FILES
- #maps -> dicts
- master_fs_dict = {}
- standby_fs_dict = {}
- for i in master_filespace_map:
- master_fs_dict[i[0]] = i[1]
- for i in standby_filespace_map:
- standby_fs_dict[i[0]] = i[1]
-
- # The worker pool for the copies
- pool = base.WorkerPool(numWorkers=DEFAULT_BATCH_SIZE)
-
- # need to make sure file spaces are sync'd
- for fs_name, fs_dir in master_fs_dict.iteritems():
- cwd = os.getcwd()
- os.chdir(fs_dir)
- logger.info('Forcing changed blocks to disk for filespace %s...' % fs_dir)
- os.system('sync')
- os.chdir(cwd)
-
- # resolve the mapping
- standby_fs_dir = standby_fs_dict[fs_name]
-
- # create the directory
- if not unix.FileDirExists.remote('check dir', options.standby_host, standby_fs_dir):
- logger.info('Filespace directory does not exist on %s' % options.standby_host)
- logger.info('Creating %s:%s' % (options.standby_host, standby_fs_dir))
- unix.MakeDirectory.remote('create dir', options.standby_host, standby_fs_dir)
- unix.Chmod.remote('chmod', options.standby_host, standby_fs_dir, "0700")
-
- # Do the copy using pysync
- logger.info('Copying filespace directory to %s' % options.standby_host)
-
- exclude_dirs = ['gpperfmon/data', 'pg_log', 'db_dumps']
- pysync_options = '-x ' + ' -x '.join(exclude_dirs)
-
- if options.debug:
- pysync_options = pysync_options + ' -v'
-
- cmd = gp.PySync('master data dir sync', fs_dir,
- options.standby_host, standby_fs_dir,
- options=pysync_options)
- pool.addCommand(cmd)
-
- pool.join()
- try:
- pool.check_results()
- except Exception, ex:
- logger.error('Failed to copy filespace directories from master to standby.')
- raise GpInitStandbyException(ex)
- finally:
- pool.haltWork()
-
-
-#-------------------------------------------------------------------------
-def update_gpdbid_file(options, array):
- """Updates the gp_dbid file on the standby master to reflect the correct dbid."""
- global g_init_standby_state
-
- g_init_standby_state = INIT_STANDBY_STATE_UPDATE_GPDBID
-
- standby_dbid = array.standbyMaster.getSegmentDbId()
- standby_datadir = os.path.normpath(array.standbyMaster.getSegmentDataDirectory())
- try:
- # MPP-13245, use single mechanism to manage gp_dbid file instead of ad-hoc replace
- cmd = gp.GpCreateDBIdFile('update gp_dbid file',
- standby_datadir,
- standby_dbid,
- verbose=logging_is_verbose(),
- ctxt=base.REMOTE,
- remoteHost=options.standby_host)
-
- cmd.run(validateAfter=True)
- except Exception, ex:
- logger.error('Failed to update standby master\'s gp_dbid file.')
- raise GpInitStandbyException(ex)
-
-#-------------------------------------------------------------------------
-def write_temp_flat_file(flat_file_location, flat_file, array, standby_filespace_dict):
-
- standby_master = array.standbyMaster
- master = array.master
-
- temp_file = None
- if os.path.exists(flat_file_location):
- logger.info('Writing standby information to %s flat file' % flat_file_location)
- temp_file = flat_file + '.tmp'
- lines_to_write = ""
-
- #Read data
- with open(flat_file_location, 'r') as read_file:
- for line in read_file:
- fs_info = line.split()
- if len(fs_info) != 2:
- fs_oid = fs_info[0]
- lines_to_write += line.strip()
- else:
- fs_dir = fs_info[1]
- fs_info[0] = str(standby_master.getSegmentDbId())
- fs_info[1] = standby_filespace_dict[array.getFileSpaceName(int(fs_oid))]
- lines_to_write += ' '.join(fs_info)
-
- lines_to_write += '\n'
- #We now write the peer information
- lines_to_write +=( str(master.getSegmentDbId()) + ' ' + fs_dir + '\n' )
-
- #Write data
- with open(temp_file, 'w') as write_file:
- write_file.write(lines_to_write)
-
- #Check what we've written
- with open(temp_file) as file:
- contents = file.read()
- if contents != lines_to_write:
- raise MoveFilespaceError('Failed to write contents to flat file.')
-
- return temp_file
-
-#-------------------------------------------------------------------------
-def remove_standby_from_flat_file(flat_file_location, flat_file, standby_master):
-
- if os.path.exists(flat_file_location):
- logger.info('Removing standby entry from %s flat file' % flat_file)
- temp_file = flat_file + '.tmp'
- lines_to_write = ""
- with open(temp_file, 'w') as write_file:
- with open(flat_file_location, 'r') as read_file:
- for line in read_file:
- fs_info = line.split()
- if fs_info[0] == str(standby_master.getSegmentDbId()):
- continue
- else:
- lines_to_write += line
-
- write_file.write(lines_to_write)
- logger.debug('Wrote %s to %s' % (lines_to_write, temp_file))
-
- #Check what we've written
- with open(temp_file) as file:
- contents = file.read()
- if contents != lines_to_write:
- raise MoveFilespaceError('Failed to write contents to flat file.')
-
- shutil.move(temp_file, flat_file_location)
-
-#-------------------------------------------------------------------------
-def update_flat_file(array, standby_filespace_map, flat_file):
- """
- If the transaction/temporary filespaces have
- ever been moved, we need to update the flat file.
- The filespace directories are copied by the
- copy_master_filespaces method.
- """
-
- logger.info('Updating filespace flat files')
-
- standby_filespace_dict = {}
- for i in standby_filespace_map:
- standby_filespace_dict[i[0]] = i[1]
-
- pg_system_fs_entries = GetFilespaceEntriesDict(GetFilespaceEntries(array, PG_SYSTEM_FILESPACE).run()).run()
-
- flat_file_location = os.path.join(pg_system_fs_entries[1][2], flat_file)
- logger.debug('flat file location for transaction files = %s' % flat_file_location)
- #Copy over the updated flat file to the standby
- temp_flat_file = write_temp_flat_file(flat_file_location, flat_file,
- array, standby_filespace_dict)
-
- if temp_flat_file:
- cpCmd = unix.Scp('gpinitstandby updating flat file for transaction filespace',
- os.path.join(os.getcwd(), temp_flat_file),
- flat_file_location,
- dstHost=array.standbyMaster.getSegmentHostName()
- )
- cpCmd.run(validateAfter=True)
- logger.debug('results of scp = %s' % cpCmd.get_results())
-
- #Rewrite the master flat file to include the standby information
- shutil.move(temp_flat_file, flat_file_location)
-
-#-------------------------------------------------------------------------
-# Rollback functions
-#-------------------------------------------------------------------------
-
-def undo_catalog_update(options, array):
- """Undoes the catalog updates."""
-
- # See if we can connect to master
- conn = None
- try:
- dburl = getDbUrlForInitStandby()
- conn = dbconn.connect(dburl, utility=True)
- stop_master()
- except:
- pass
- finally:
- if conn:
- conn.close()
-
- try:
- remove_standby_from_catalog(options, array)
- except:
- # Can't undo because the update never occured. Ok to
- # ignore this exception and continue
- pass
-
-
-#-------------------------------------------------------------------------
-def undo_file_copy(options, array):
- """Undoes the filespace copy."""
-
- try:
- remove_standby_filespace_dirs(array)
- except Exception, ex:
- # Just log a warning here.
- logger.warn('There was an error trying to cleanup the filespace')
- logger.warn('directories on the standby host %s' % options.standby_host)
-
-
-#-------------------------------------------------------------------------
-def undo_update_pg_hba_conf(array):
- """Undoes the pg_hba.conf update."""
-
- logger.info('Backing up pg_hba.conf file...')
- master_data_dir = array.master.getSegmentDataDirectory()
- os.system('mv %s/%s %s/pg_hba.conf' % (master_data_dir, PG_HBA_BACKUP, master_data_dir))
-
-#-------------------------------------------------------------------------
-# Main
-#-------------------------------------------------------------------------
-try:
- # setup logging
- logger = get_default_logger()
-
- (options, args) = parseargs()
-
- setup_tool_logging(EXECNAME,unix.getLocalHostname(),unix.getUserName(),options.logfile)
-
- # Turn on debug logging if needed
- if options.debug:
- enable_verbose_logging()
- if options.quiet:
- quiet_stdout_logging()
-
- # Kick off the work
- if options.remove:
- delete_standby(options)
- logger.info('Successfully removed standby master')
- else:
- create_standby(options)
- if options.no_update:
- logger.info('Successfully syncronized standby master.')
- else:
- logger.info('Successfully created standby master on %s' % options.standby_host)
-
-except KeyboardInterrupt:
- logger.error('User canceled')
- sys.exit(2)
-except Exception, ex:
- if options.remove:
- logger.error('Error removing standby master: %s' % str(ex))
- else:
- logger.error('Error initializing standby master: %s' % str(ex))
- if options.debug:
- logger.exception(ex)
- sys.exit(2)
-
[2/4] incubator-hawq git commit: HAWQ-39. Remove below unused mgmt
scritps for hawq2.0:
Posted by rl...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/8ec87e6a/tools/bin/gpseginstall
----------------------------------------------------------------------
diff --git a/tools/bin/gpseginstall b/tools/bin/gpseginstall
deleted file mode 100755
index a1f5bd4..0000000
--- a/tools/bin/gpseginstall
+++ /dev/null
@@ -1,973 +0,0 @@
-#!/usr/bin/env python
-'''
-USAGE: gpseginstall -f|--file HOSTFILE [-u|--user USER] [-g|--group GROUP] [-p|--password PASSWORD] [-c|--commands COMMAND_OPTIONS] [--verbose]
- where HOSTFILE lists all the hosts to install the software on
- where USER will be the user to install the software as. The default is gpadmin. (root only option)
- where GROUP will be the group to install the software as. The default is gpadmin.
- where PASSWORD will be set for the USER default is changeme (root only option)
-
- where COMMAND_OPTIONS list a subset of commands to be run from the list below. The default is all commands.
- u -- add user (root only)
- c -- chown software on master host (root only)
- s -- tar, zip and copy over software to machines in cluster
- p -- change passwords (root only)
- E -- exchange keys for root (root only)
- e -- exchange keys for user
- l -- check and fix user limits for new user -- requires software to be installed on cluster with command 's' or equivalent (root only)
- v -- verify software on remote machines
-'''
-
-import os, sys, re
-import subprocess, warnings, logging, tarfile
-warnings.simplefilter('ignore', DeprecationWarning)
-sys.path.append(sys.path[0] + '/lib')
-
-try:
- import paramiko, getpass, pexpect
- import gppylib.userinput
- from optparse import Option, OptionParser
- from gppylib.gpparseopts import OptParser, OptChecker
- from gppylib.gplog import get_default_logger, setup_tool_logging
- from gppylib.commands.unix import getLocalHostname, getUserName, SYSTEM
- from gppylib.commands.base import WorkerPool, Command, NakedExecutionPasswordMap, NakedExecutionInfo, NAKED
-except ImportError, e:
- sys.exit('Cannot import modules. Please check that you have sourced greenplum_path.sh. Detail: ' + str(e))
-
-EXECNAME = os.path.split(__file__)[-1]
-
-logger = get_default_logger()
-setup_tool_logging(EXECNAME,getLocalHostname(),getUserName())
-hosts = dict() # address to HostMapping object
-uniqueseghosts = dict() # hostname to UniqueHost object
-options = None
-passwordMap = None
-nullFile = logging.FileHandler('/dev/null')
-logging.getLogger('paramiko.transport').addHandler(nullFile)
-pool = None
-gphome = None
-installation_info = None
-commands = set() # selected commands
-rootCommands = set(['u', 'p', 'c', 's', 'e', 'E', 'l', 'v']) # valid command types for root
-nonRootCommands = set(['s', 'e', 'v']) # valid command types for non-root user
-defaultNonRootCommands = set(['s', 'v']) # commands to run by default for non-root user
-currentUser = None
-isLinux = False
-GPSSH_EXKEYS_TIMEOUT = 600
-
-class InstallationInfo:
- def __init__(self, link_name, binary_path, binary_dir_location, binary_dir_name):
- self.install_link_name = link_name # greenplum-db
- self.install_binary_path = binary_path # /data/release-4.0
- self.install_binary_dir_location = binary_dir_location # /data
- self.install_binary_dir_name = binary_dir_name # release-4.0
- self.install_md5 = None
- self.install_version_string = None
-
- def debugStr(self):
- return "link_name %s\nbinary_path %s\nbinary_dir_location %s\nbinary_dir_name %s" % (self.install_link_name, self.install_binary_path, self.install_binary_dir_location, self.install_binary_dir_name)
-
-class HostMapping:
- def __init__(self, address):
- self.address = address
- self.hostname = None
-
- def debugStr(self):
- return "address %s hostname %s" % (self.address, self.hostname)
-
-class UniqueHost:
- def __init__(self, address, hostname):
- self.address = address
- self.hostname = hostname
- self.userExists = False
-
- def debugStr(self):
- return "address %s hostname %s userExists %s" % (self.address, self.hostname, self.userExists)
-
-def cli_help():
- help_path = os.path.join(sys.path[0], '..', 'docs', 'cli_help', EXECNAME + '_help')
- f = None
- try:
- try:
- f = open(help_path);
- return f.read(-1)
- except:
- return ''
- finally:
- if f: f.close()
-
-
-def usage():
- print cli_help() or __doc__
-
-# True is an error
-def parseargs():
-
- global options, isLinux
-
- parser = OptParser(option_class=OptChecker)
- parser.remove_option('-h')
- parser.add_option('-h', '-?', '--help', action='store_true')
- parser.add_option('-f', '--file', type='string')
- parser.add_option('-u', '--user', type='string', default='gpadmin')
- parser.add_option('-g', '--group', type='string')
- parser.add_option('-p', '--password', type='string')
- parser.add_option('-c', '--commands', type='string')
- parser.add_option('--verbose', action='store_true')
- (options, args) = parser.parse_args()
-
- global gphome
- gphome = os.environ.get('GPHOME')
- if not gphome:
- logger.error("GPHOME not set")
- return True
-
- if SYSTEM.getName() != "linux" and SYSTEM.getName() != "sunos":
- logger.error("This utility is only supported on the linux and solaris operating system")
- return True
-
- if SYSTEM.getName() == "linux":
- isLinux = True
-
- if options.help:
- usage()
- return True
-
- if not options.file:
- logger.error("--file must be specified")
- return True
-
- if not os.path.exists(options.file):
- logger.error("file %s does not exist" % options.file)
- return True
-
- if options.user == "root":
- logger.error("can not install the software into the account of user root")
- return True
-
- global currentUser
- currentUser = getpass.getuser()
-
- if currentUser == "root":
- validCommands = rootCommands
- else:
- validCommands = nonRootCommands
-
- if options.user != currentUser:
- logger.error("--user option '%s' does not equal non-root user running this utility '%s'" % (options.user, currentUser))
- return True
-
- if not options.group:
- options.group = options.user
-
- global commands
- if options.commands and len(options.commands):
- for i in range(len(options.commands)):
- if options.commands[i] in validCommands:
- if options.commands[i] not in commands:
- commands.add(options.commands[i])
- else:
- logger.error("'%s' is not a valid command for user(%s)" % (options.commands[i], currentUser))
- return True
- else:
- if currentUser == "root":
- commands = rootCommands
- else:
- commands = defaultNonRootCommands
-
- location = os.path.abspath(gphome)
- if os.path.islink(location):
- link_name = os.path.split(location)[1]
- binary_path = os.path.realpath(location)
- else:
- link_name = None
- binary_path = location
-
- (binary_dir_location, binary_dir_name) = os.path.split(binary_path)
-
- global installation_info
- installation_info = InstallationInfo(link_name, binary_path, binary_dir_location, binary_dir_name)
- logger.info("Installation Info:\n" + installation_info.debugStr())
-
- return False
-
-# True is an error
-def readHostList():
-
- try:
- for line in open(options.file, "r"):
- hostname = line.strip()
- if not len(hostname):
- continue
- hosts[hostname] = HostMapping(hostname)
- except Exception, e:
- logger.error("Error while reading file: %s" % options.file)
- logger.error(e.__str__())
- return True
-
- if not len(hosts.keys()):
- logger.error("No hosts read from hostfile: %s" % options.file)
- return True
-
- return False
-
-# True is an error
-def discoverPasswordMap():
-
- logger.info("check cluster password access")
-
- global passwordMap
-
- try:
- passwordMap = NakedExecutionPasswordMap(hosts.keys())
- passwordMap.discover()
- except Exception, e:
- logger.error("could not successfully access all machines")
- msg = e.__str__()
- if msg:
- logger.error("trace: %s" % msg)
- return True
-
- if passwordMap.complete:
- return False
- else:
- return True
-
-# True is an error
-def dedupeHosts():
-
- logger.info("de-duplicate hostnames")
-
- masterHostName = None
-
- try:
- cmd = Command("master", "hostname")
- pool.addCommand(cmd)
- pool.join()
- items = pool.getCompletedItems()
- for i in items:
- if i.results.rc or i.results.halt or not i.results.completed:
- logger.error("error trying to obtain hostname on master %s" % i.results.stderr)
- return True
- masterHostName = i.results.stdout.strip()
- break
- except Exception, e:
- logger.error("exception trying to obtain hostname on master: %s" % e.__str__())
- return True
-
- if not masterHostName:
- logger.error("Could not find master hostname")
- return True
-
- logger.info("master hostname: %s" % masterHostName)
-
- try:
- for host in hosts.keys():
- cmd = Command(host, "hostname", NAKED, host, nakedExecutionInfo=NakedExecutionInfo(passwordMap))
- pool.addCommand(cmd)
-
- pool.join()
- items = pool.getCompletedItems()
- for i in items:
- address = i.remoteHost
- if i.results.rc or i.results.halt or not i.results.completed:
- logger.error("error obtaining information from host %s" % address)
- return True
- hostname = i.results.stdout[0].strip()
- hosts[address].hostname = hostname
- except Exception, e:
- logger.error("%s" % e.__str__())
- return True
-
- global uniqueseghosts
- for k in hosts.keys():
- hostname = hosts[k].hostname
- address = hosts[k].address
-
- if hostname in uniqueseghosts:
- # Here we do a heuristic:
- # there are several interfaces to each host
- # and we don't want to pick a slow interface
- # it is very likely that a hostname with a '-' will represent a fast link
- # so try to pick an address that contains a '-'
- # for example choose sdw1-1 over vert1
- if (not re.search("-", uniqueseghosts[hostname].address)) and (re.search("-", address)):
- uniqueseghosts[hostname].address = address
- continue
-
- if masterHostName == hostname:
- continue
-
- uniqueseghosts[hostname] = UniqueHost(address, hostname)
-
- if options.verbose:
- for k in uniqueseghosts.keys():
- logger.info("segment hostname: %s (%s)" % (uniqueseghosts[k].address, k))
-
- return False
-
-# True is an error
-def checkUsernames():
-
- logger.info("check for user %s on cluster" % options.user)
-
- try:
- for k in uniqueseghosts.keys():
- uh = uniqueseghosts[k]
- cmd = Command(uh.address, "id %s" % options.user, NAKED, uh.address, nakedExecutionInfo=NakedExecutionInfo(passwordMap))
- pool.addCommand(cmd)
- pool.join()
- items = pool.getCompletedItems()
- for i in items:
- if not i.results.rc:
- address = i.remoteHost
- hostname = hosts[address].hostname
- uniqueseghosts[hostname].userExists = True
- except Exception, e:
- logger.error("%s" % e.__str__())
- return True
-
- return False
-
-def getAddUserCommand():
- if isLinux:
- return "useradd -m %s" % options.user
- else:
- return "groupadd %s; useradd -d /export/home/%s -m -g %s -s /bin/bash %s" % (options.user, options.user, options.user, options.user)
-
-# True is an error
-def addUserIfRequired():
-
- ###################################################################################
- logger.info("add user %s on master" % options.user)
- try:
- cmd = Command("useradd", getAddUserCommand())
- pool.addCommand(cmd)
- pool.join()
- items = pool.getCompletedItems() # get the completed items but ignore them
- except Exception, e:
- logger.error("%s" % e.__str__())
- return True
-
- try:
- cmd = Command("checkId", "id %s" % options.user)
- pool.addCommand(cmd)
- pool.join()
- items = pool.getCompletedItems()
- for i in items:
- if i.results.rc:
- logger.error("failed to add user %s to master host: %s" % (options.user, i.results.stderr))
- return True
- except Exception, e:
- logger.error("%s" % e.__str__())
- return True
-
- ###################################################################################
- logger.info("add user %s on cluster" % options.user)
-
- failedToAddUser = set() # set of address's where useradd failed
- try:
- for k in uniqueseghosts.keys():
- uh = uniqueseghosts[k]
- if uh.userExists:
- continue
-
- cmd = Command(uh.address, getAddUserCommand(), NAKED, uh.address, nakedExecutionInfo=NakedExecutionInfo(passwordMap))
- pool.addCommand(cmd)
- pool.join()
- items = pool.getCompletedItems()
- for i in items:
- address = i.remoteHost
- if not i.results.rc:
- hostname = hosts[address].hostname
- uniqueseghosts[hostname].userExists = True
- else:
- logger.error("%s: %s" % (address, i.results.stderr))
- failedToAddUser.add(address)
-
- except Exception, e:
- logger.error("%s" % e.__str__())
- return True
-
- if len(failedToAddUser):
- for h in failedToAddUser:
- logger.error("could not create user %s on host %s" % (options.user, h))
- return True
-
- return False
-
-# True is an error
-def getLocalSoftwareVersion():
-
- global installation_info
- cmdStr = "%s/bin/gpssh --version" % gphome
- try:
- cmd = Command("version", cmdStr)
- pool.addCommand(cmd)
- pool.join()
- items = pool.getCompletedItems()
- for i in items:
- if i.results.rc:
- logger.error("Failed to run command: %s" % cmd.cmdStr)
- logger.error(i.results.stderr)
- return True
-
- installation_info.install_version_string = i.results.stdout.strip()
- except Exception, e:
- logger.error("%s" % e.__str__())
- return True
-
- return False
-
-# True is an error
-def simpleLocalCommand(cmdStr, checkError=True, verbose=True):
- if verbose:
- logger.info(cmdStr)
- try:
- cmd = Command("simpleLocalCommand", cmdStr)
- pool.addCommand(cmd)
- pool.join()
- items = pool.getCompletedItems()
- if checkError:
- for i in items:
- if i.results.rc:
- logger.error("Failed to run command: %s" % cmd.cmdStr)
- logger.error(i.results.stderr)
- return True
- except Exception, e:
- logger.error("%s" % e.__str__())
- return True
-
- return False
-
-# True is an error
-def simpleRemoteCommand(cmdStr, checkError=True, verbose=True):
-
- failures = set()
- if verbose:
- logger.info("remote command: %s" % cmdStr)
-
- try:
- for k in uniqueseghosts.keys():
- uh = uniqueseghosts[k]
- cmd = Command(uh.address, cmdStr, NAKED, uh.address, nakedExecutionInfo=NakedExecutionInfo(passwordMap))
- pool.addCommand(cmd)
- pool.join()
- items = pool.getCompletedItems()
- if checkError:
- for i in items:
- if i.results.rc:
- logger.error("%s: %s" % (i.remoteHost, i.results.stderr))
- failures.add(i.remoteHost)
-
- except Exception, e:
- logger.error("%s" % e.__str__())
- return True
-
- if len(failures):
- for h in failures:
- logger.error("error running command %s on host %s" % (cmdStr, h))
- return True
-
- return False
-
-
-# True is an error
-def chownMasterSoftware():
-
- ###################################################################################
- if installation_info.install_link_name:
-
- cmdStr = "chown -R %s:%s %s/%s" % (options.user, options.group, installation_info.install_binary_dir_location, installation_info.install_link_name)
- if (simpleLocalCommand(cmdStr, True)):
- return True
-
- ###################################################################################
- cmdStr = "chown -R %s:%s %s" % (options.user, options.group, installation_info.install_binary_path)
- if (simpleLocalCommand(cmdStr, True)):
- return True
-
-
-# True is an error
-def changeUserPassword():
-
- if not options.password:
- password = gppylib.userinput.ask_create_password()
- if not password:
- logger.error("error obtaining password from user")
- return True
- else:
- password = options.password
- cmd = "%s/sbin/gpchangeuserpassword --user %s --password %s" % (gphome, options.user, password)
-
- logger.info("Changing system passwords ...")
- if (simpleLocalCommand(cmd, False, verbose=False)):
- return True
-
- cmdStr = ". %s/greenplum_path.sh; %s" % (gphome, cmd)
-
- if (simpleRemoteCommand(cmdStr, True, verbose=False)):
- return True
-
-def md5Command():
- if isLinux:
- return "md5sum"
- else:
- return "digest -a md5"
-
-def md5OutputWords():
- if isLinux:
- return 2
- else:
- return 1
-
-
-# True is an error
-def setMd5Locally():
-
- try:
- cmd = Command("md5", "%s %s.tar" % (md5Command(), installation_info.install_binary_path))
- pool.addCommand(cmd)
- pool.join()
- items = pool.getCompletedItems()
- for i in items:
- if i.results.rc:
- logger.error("Failed to run command: %s" % cmd.cmdStr)
- logger.error(i.results.stderr)
- return True
- else:
- fields = i.results.stdout.split()
- if len(fields) != md5OutputWords():
- raise Exception("Unexpected output from md5sum: %s" % i.results.stdout)
- installation_info.install_md5 = fields[0].strip()
- break
- except Exception, e:
- logger.error("%s" % e.__str__())
- return True
-
- return False
-
-# True is an error
-def copyOverSoftware():
-
- ###################################################################################
- cmdStr = "rm -f %s.tar; rm -f %s.tar.gz" % (installation_info.install_binary_path, installation_info.install_binary_path)
- if (simpleLocalCommand(cmdStr, False)):
- return True
-
- cmdStr = "cd %s; tar cf %s.tar %s" % (installation_info.install_binary_dir_location, installation_info.install_binary_dir_name, installation_info.install_binary_dir_name)
- if (simpleLocalCommand(cmdStr, False)):
- return True
-
- ###################################################################################
- if setMd5Locally():
- return True
-
- ###################################################################################
-
- cmdStr = "gzip %s.tar" % installation_info.install_binary_path
- if (simpleLocalCommand(cmdStr, False)):
- return True
-
- ###################################################################################
- cmdStr = "mkdir -p %s" % installation_info.install_binary_dir_location
- if (simpleRemoteCommand(cmdStr, True)):
- return True
-
- ###################################################################################
- # a couple paranoid checks
- if installation_info.install_binary_path == "/" or \
- installation_info.install_binary_path == "/usr" or \
- installation_info.install_binary_path == "/usr/bin" or \
- re.search('boot', installation_info.install_binary_path):
- raise Exception("illegal path for installaiton %s" % installation_info.install_binary_path)
-
- cmdStr = "rm -rf %s" % (installation_info.install_binary_path)
- if (simpleRemoteCommand(cmdStr, True)):
- return True
-
- ###################################################################################
- logger.info("scp software to remote location")
-
- failures = set()
-
- filename = installation_info.install_binary_path + ".tar.gz"
- try:
- for k in uniqueseghosts.keys():
- uh = uniqueseghosts[k]
- cmdStr = "scp %s %s:%s" % (filename, uh.address, installation_info.install_binary_dir_location)
- cmd = Command(uh.address, cmdStr)
- pool.addCommand(cmd)
- pool.join()
- items = pool.getCompletedItems()
- for i in items:
- if i.results.rc:
- logger.error("command failed: '%s': %s" % (i.cmdStr, i.results.stderr.strip()))
- failures.add(i.name)
-
- except Exception, e:
- logger.error("%s" % e.__str__())
- return True
-
- if len(failures):
- return True
-
-
- ###################################################################################
- cmdStr = "gzip -f -d %s.tar.gz" % installation_info.install_binary_path
- if (simpleRemoteCommand(cmdStr, True)):
- return True
-
- ###################################################################################
- logger.info("md5 check on remote location")
- failures.clear()
-
- try:
- for k in uniqueseghosts.keys():
- uh = uniqueseghosts[k]
- cmdStr = "%s %s.tar" % (md5Command(), installation_info.install_binary_path)
- cmd = Command(uh.address, cmdStr, NAKED, uh.address, nakedExecutionInfo=NakedExecutionInfo(passwordMap))
- pool.addCommand(cmd)
- pool.join()
- items = pool.getCompletedItems()
- for i in items:
- address = i.remoteHost
- if i.results.rc:
- logger.error("%s: %s" % (address, i.results.stderr))
- failures.add(address)
- else:
- fields = i.results.stdout[0].split()
- if len(fields) == md5OutputWords():
- md5 = fields[0].strip()
- if md5 != installation_info.install_md5:
- logger.error("on host %s md5sum %s != expected %s" % (address, md5, installation_info.install_md5))
- failures.add(address)
- else:
- logger.error("Unexpected output on host %s from md5sum: %s" % (address, i.results.stdout))
- failures.add(address)
-
- except Exception, e:
- logger.error("%s" % e.__str__())
- return True
-
- if len(failures):
- for h in failures:
- logger.error("md5sum check of %s.tar on host %s failed" % (installation_info.install_binary_path, h))
- return True
-
- ###################################################################################
- cmdStr = "cd %s; tar xf %s.tar" % (installation_info.install_binary_dir_location, installation_info.install_binary_dir_name)
- if (simpleRemoteCommand(cmdStr, True)):
- return True
-
- ###################################################################################
- cmdStr = "rm -f %s.tar" % (installation_info.install_binary_path)
- if (simpleRemoteCommand(cmdStr, True)):
- return True
-
- ###################################################################################
- if installation_info.install_link_name:
- cmdStr = "cd %s; rm -f %s; ln -fs %s %s" % (installation_info.install_binary_dir_location, installation_info.install_link_name, installation_info.install_binary_dir_name, installation_info.install_link_name)
- if (simpleRemoteCommand(cmdStr, True)):
- return True
-
- if currentUser == "root":
- cmdStr = "chown -R %s:%s %s/%s" % (options.user, options.group, installation_info.install_binary_dir_location, installation_info.install_link_name)
- if (simpleRemoteCommand(cmdStr, True)):
- return True
-
- ###################################################################################
- if currentUser == "root":
- cmdStr = "chown -R %s:%s %s" % (options.user, options.group, installation_info.install_binary_path)
- if (simpleRemoteCommand(cmdStr, True)):
- return True
-
- ###################################################################################
- cmdStr = "rm -f %s.tar.gz" % (installation_info.install_binary_path)
- if (simpleLocalCommand(cmdStr, True)):
- return True
-
- ###################################################################################
- return False
-
-
-# True is an error
-def verifyVersionAtPath(usepath):
-
- cmdStr = ". %s/greenplum_path.sh; %s/bin/gpssh --version" % (usepath, usepath)
- logger.info("remote command: %s" % cmdStr)
-
- try:
- for k in uniqueseghosts.keys():
- uh = uniqueseghosts[k]
- cmd = Command(uh.address, cmdStr, NAKED, uh.address, nakedExecutionInfo=NakedExecutionInfo(passwordMap))
- pool.addCommand(cmd)
- pool.join()
- items = pool.getCompletedItems()
- for i in items:
- if i.results.rc:
- logger.error("error on host with command: %s" % (i.remoteHost, cmdStr))
- return True
- if not i.results.stdout:
- logger.error("could not find version string from host %s with command: %s" % (i.remoteHost, cmdStr))
- return True
- version_string = i.results.stdout[0].strip()
- if version_string != installation_info.install_version_string:
- logger.error("version string on host %s: '%s' does not match expected: '%s'" % (i.remoteHost, version_string, installation_info.install_version_string))
- return True
-
- except Exception, e:
- logger.error("%s" % e.__str__())
- return True
-
- return False
-
-
-# True is an error
-def verifySoftware():
-
- if (getLocalSoftwareVersion()):
- return True
-
- logger.info("version string on master: %s" % installation_info.install_version_string)
-
- if verifyVersionAtPath(gphome):
- return True
-
- if gphome != installation_info.install_binary_path:
- if verifyVersionAtPath(installation_info.install_binary_path):
- return True
-
- return False
-
-# True is an error
-def checkAndFixUserLimits():
-
- if not isLinux:
- return False
-
- cmd = "%s/sbin/gpfixuserlimts -f /etc/security/limits.conf -u %s" % (gphome, options.user)
- if (simpleLocalCommand(cmd, True)):
- return True
-
- cmdStr = ". %s/greenplum_path.sh; %s" % (gphome, cmd)
- if (simpleRemoteCommand(cmdStr, True)):
- return True
-
- return False
-
-
-# True is an error
-def interactiveCommand(cmdStr):
-
- try:
- p = subprocess.Popen(cmdStr, shell=True, executable="/bin/bash")
- sts = os.waitpid(p.pid, 0)[1]
- if sts:
- logger.error("error on cmd: %s" % cmdStr)
- except Exception, e:
- logger.error("Exception running cmd: %s" % cmdStr)
- logger.error(e.__str__())
- return True
-
- return False
-
-# True is an error
-def exchangeKeysUser():
-
- if currentUser == "root":
- return exchangeKeysAsRootForUser()
- else:
- return exchangeKeysAsSelf()
-
-# True is an error
-def exchangeKeysAsSelf():
-
- cmdStr = 'gpssh-exkeys -f %s' % options.file
- return interactiveCommand(cmdStr)
-
-# True is an error
-def exchangeKeysAsRootForUser():
-
- ###################################################################################
- logger.info("exchange ssh keys for user %s" % options.user)
-
- testCmd = 'su %s -c "cat %s &> /dev/null"' % (options.user, options.file)
- try:
- p = subprocess.Popen(testCmd, shell=True, executable="/bin/bash")
- sts = os.waitpid(p.pid, 0)[1]
- if sts:
- logger.error("failed doing a test read of file: %s" % testCmd)
- logger.error("%s is not accessible as user %s" % (options.file, options.user))
- return True
- except Exception, e:
- logger.error("failed doing a test read of file: %s" % testCmd)
- logger.error("%s is not accessible as user %s" % (options.file, options.user))
- logger.error(e.__str__())
- return True
-
- done = False
- badPassword = False
- child = None
- cmdStr = None
-
- try:
- cmdStr = 'su %s -c "gpssh-exkeys -f %s"' % (options.user, options.file)
- child = pexpect.spawn(cmdStr)
-
- index = 0
- while 1:
- index = child.expect(["password", "bad", pexpect.EOF, pexpect.TIMEOUT], timeout=GPSSH_EXKEYS_TIMEOUT)
- if index == 0:
- child.sendline(options.password)
- continue
- if index == 1:
- badPassword = True
- break
- if index == 2:
- done = True
- break
- if index == 3:
- logger.info("Timeout running command: %s" % cmdStr)
- break
-
- except Exception, e:
- logger.info("Exception running cmd: %s" % cmdStr)
- logger.info(e.__str__())
-
- if done:
- child.close()
- if child.exitstatus:
- logger.info("Cmd '%s' failed with error code %s" % (cmdStr, child.exitstatus))
- else:
- return False
-
- # using the supplied password did not work... lets try again in interactive mode
- logger.info("gppsh-exkeys failed running from within pexpect ... now try outside of pexpect")
- return interactiveCommand(cmdStr)
-
-# True is an error
-def exchangeKeysRoot():
- ###################################################################################
- logger.info("exchange ssh keys for user root")
-
- rootPasswords = list(passwordMap.unique_passwords)
-
- done = False
- child = None
- cmdStr = None
- passwordIndex = 0
-
-
- try:
- cmdStr = 'gpssh-exkeys -f %s' % options.file
- child = pexpect.spawn(cmdStr)
-
- index = 0
- while 1:
- index = child.expect(["password", "bad", pexpect.EOF, pexpect.TIMEOUT], timeout=GPSSH_EXKEYS_TIMEOUT)
- if index == 0:
- passwordIndex = 0
- child.sendline(rootPasswords[passwordIndex])
- continue
- if index == 1:
- passwordIndex += 1
- if passwordIndex >= len(rootPasswords):
- raise Exception("could not determine root password on all machines")
- child.sendline(rootPasswords[passwordIndex])
- continue
- if index == 2:
- done = True
- break
- if index == 3:
- logger.error("Timeout running command: %s" % cmdStr)
- break
-
- except Exception, e:
- logger.info("Error running cmd: %s" % cmdStr)
- logger.info(e.__str__())
-
- if done:
- child.close()
- if child.exitstatus:
- logger.info("Cmd '%s' failed with error code %s" % (cmdStr, child.exitstatus))
- else:
- return False
-
- # using the supplied password did not work... lets try again in interactive mode
- logger.info("gppsh-exkeys failed running from within pexpect ... now try outside of pexpect")
- return interactiveCommand(cmdStr)
-
-
-def earlyExit(error = False):
- if error:
- logger.fatal("early exit from gpseginstall")
- if pool:
- try:
- pool.join()
- pool.haltWork()
- pool.joinWorkers()
- except KeyboardInterrupt:
- logger.info("Exiting, please wait ...")
-
- if error:
- sys.exit(1)
- else:
- logger.info("SUCCESS -- Requested commands completed")
- sys.exit(0)
-
-if (parseargs()):
- sys.exit(0)
-
-pool = WorkerPool()
-
-try:
-
- if (readHostList()):
- earlyExit(True)
-
- if (discoverPasswordMap()):
- earlyExit(True)
-
- if (dedupeHosts()):
- earlyExit(True)
-
- if 'u' in commands:
- if (checkUsernames()):
- earlyExit(True)
-
- if (addUserIfRequired()):
- earlyExit(True)
-
- if 'c' in commands:
- if (chownMasterSoftware()):
- earlyExit(True)
-
- if 's' in commands:
- if (copyOverSoftware()):
- earlyExit(True)
-
- if 'p' in commands:
- if (changeUserPassword()):
- earlyExit(True)
-
- if 'E' in commands:
- if (exchangeKeysRoot()):
- earlyExit(True)
-
- if 'e' in commands:
- if (exchangeKeysUser()):
- earlyExit(True)
-
- if 'l' in commands:
- if (checkAndFixUserLimits()):
- earlyExit(True)
-
- if 'v' in commands:
- if (verifySoftware()):
- earlyExit(True)
-
-except KeyboardInterrupt:
- logger.info("Job stopped by user")
- earlyExit(True)
-
-earlyExit(False)
http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/8ec87e6a/tools/bin/lib/gpcreateseg.sh
----------------------------------------------------------------------
diff --git a/tools/bin/lib/gpcreateseg.sh b/tools/bin/lib/gpcreateseg.sh
deleted file mode 100755
index dcc3bdc..0000000
--- a/tools/bin/lib/gpcreateseg.sh
+++ /dev/null
@@ -1,331 +0,0 @@
-#!/bin/bash
-# Filename:- gpcreateseg.sh
-# Version:- $Revision$
-# Updated:- $Date$
-# Status:- Released
-# Author:- G Coombe
-# Contact:- gcoombe@greenplum.com
-# Release date:- Dec 2006
-# Release stat:- Released
-# Copyright (c) Metapa 2005. All Rights Reserved.
-# Copyright (c) 2007 Greenplum Inc
-#******************************************************************************
-# Update History
-#******************************************************************************
-# Ver Date Who Update
-#******************************************************************************
-# Detailed Description
-#******************************************************************************
-#******************************************************************************
-# Prep Code
-
-WORKDIR=`dirname $0`
-
-# Source required functions file, this required for script to run
-# exit if cannot locate this file. Change location of FUNCTIONS variable
-# as required.
-FUNCTIONS=$WORKDIR/gp_bash_functions.sh
-if [ -f $FUNCTIONS ]; then
- . $FUNCTIONS
-else
- echo "[FATAL]:-Cannot source $FUNCTIONS file Script Exits!"
- exit 2
-fi
-
-#******************************************************************************
-# Script Specific Variables
-#******************************************************************************
-# Log file that will record script actions
-CUR_DATE=`$DATE +%Y%m%d`
-TIME=`$DATE +%H%M%S`
-PROG_NAME=`$BASENAME $0`
-# Level of script feedback 0=small 1=verbose
-unset VERBOSE
-unset PG_CONF_ADD_FILE
-# MPP database specific parameters
-GP_USER=$USER_NAME
-GP_TBL=gp_id
-# System table names
-GP_CONFIGURATION_TBL=gp_segment_configuration
-EXIT_STATUS=0
-# ED_PG_CONF search text values
-PORT_TXT="#port"
-LOG_STATEMENT_TXT="#log_statement ="
-LISTEN_ADR_TXT="listen_addresses"
-CHKPOINT_SEG_TXT="checkpoint_segments"
-TMP_PG_HBA=/tmp/pg_hba_conf_master.$$
-
-#******************************************************************************
-# Functions
-#******************************************************************************
-USAGE () {
- $ECHO
- $ECHO " `basename $0`"
- $ECHO
- $ECHO " Script called by gpinitsystem, this should not"
- $ECHO " be run directly"
- exit $EXIT_STATUS
-}
-
-CHK_CALL () {
- FILE_PREFIX=`$ECHO $PARALLEL_STATUS_FILE|$CUT -d"." -f1`
- if [ ! -f ${FILE_PREFIX}.$PARENT_PID ];then
- $ECHO "[FATAL]:-Not called from correct parent program"
- exit 2
- fi
-}
-
-SET_VAR () {
- #
- # MPP-13617: If segment contains a ~, we assume ~ is the field delimiter.
- # Otherwise we assume : is the delimiter. This allows us to easily
- # handle IPv6 addresses which may contain a : by using a ~ as a delimiter.
- #
- I=$1
- case $I in
- *~*)
- S="~"
- ;;
- *)
- S=":"
- ;;
- esac
- GP_HOSTADDRESS=`$ECHO $I|$CUT -d$S -f1`
- GP_PORT=`$ECHO $I|$CUT -d$S -f2`
- GP_DIR=`$ECHO $I|$CUT -d$S -f3`
- GP_DBID=`$ECHO $I|$CUT -d$S -f4`
- GP_CONTENT=`$ECHO $I|$CUT -d$S -f5`
-}
-
-PARA_EXIT () {
- if [ $1 -ne 0 ];then
- $ECHO "FAILED:$SEGMENT_LINE" >> $PARALLEL_STATUS_FILE
- LOG_MSG "[FATAL][$INST_COUNT]:-Failed $2"
- exit 2
- else
- LOG_MSG "[INFO][$INST_COUNT]:-Completed $2"
- fi
-}
-
-PROCESS_QE () {
-
- LOG_MSG "[INFO][$INST_COUNT]:-Start Function $FUNCNAME"
- LOG_MSG "[INFO][$INST_COUNT]:-Processing segment $GP_HOSTADDRESS"
-
- MIRROR_ONLY_INITDB_OPTION=
- if [ x"" != x"$COPY_FROM_PRIMARY_HOSTADDRESS" ]; then
- MIRROR_ONLY_INITDB_OPTION=-m
- fi
-
- # build initdb command, capturing output in ${GP_DIR}.initdb
- cmd="$EXPORT_LIB_PATH;$INITDB"
- cmd="$cmd $MIRROR_ONLY_INITDB_OPTION"
- cmd="$cmd -E $ENCODING"
- cmd="$cmd -D $GP_DIR"
- cmd="$cmd --locale=$LOCALE_SETTING"
- cmd="$cmd $LC_ALL_SETTINGS"
- cmd="$cmd --max_connections=$QE_MAX_CONNECT"
- cmd="$cmd --shared_buffers=$QE_SHARED_BUFFERS"
- cmd="$cmd --backend_output=$GP_DIR.initdb"
-
- $TRUSTED_SHELL ${GP_HOSTADDRESS} $cmd >> $LOG_FILE 2>&1
- RETVAL=$?
-
- # if there was an error, copy ${GP_DIR}.initdb to the log before cleaning it up
- if [ $RETVAL -ne 0 ]; then
- $TRUSTED_SHELL ${GP_HOSTADDRESS} "cat $GP_DIR.initdb" >> $LOG_FILE 2>&1
- fi
- $TRUSTED_SHELL ${GP_HOSTADDRESS} "rm -f $GP_DIR.initdb" >> $LOG_FILE 2>&1
- BACKOUT_COMMAND "$TRUSTED_SHELL ${GP_HOSTADDRESS} \"$RM -rf $GP_DIR > /dev/null 2>&1\""
- BACKOUT_COMMAND "$ECHO \"removing directory $GP_DIR on $GP_HOSTADDRESS\""
- PARA_EXIT $RETVAL "to start segment instance database $GP_HOSTADDRESS $GP_DIR"
-
- # on mirror, copy data from primary
- if [ x"" != x"$COPY_FROM_PRIMARY_HOSTADDRESS" ]; then
- LOG_MSG "[INFO]:-Copying data for mirror on ${GP_HOSTADDRESS} using remote copy from primary ${COPY_FROM_PRIMARY_HOSTADDRESS} ..." 1
- RUN_COMMAND_REMOTE ${COPY_FROM_PRIMARY_HOSTADDRESS} "${EXPORT_GPHOME}; . ${GPHOME}/greenplum_path.sh; ${GPHOME}/bin/lib/pysync.py -x pg_log -x postgresql.conf -x postmaster.pid ${COPY_FROM_PRIMARY_DIR} \[${GP_HOSTADDRESS}\]:${GP_DIR}"
- RETVAL=$?
- PARA_EXIT $RETVAL "remote copy of segment data directory from ${COPY_FROM_PRIMARY_HOSTADDRESS} to ${GP_HOSTADDRESS}"
- fi
-
- # Configure postgresql.conf
- LOG_MSG "[INFO][$INST_COUNT]:-Configuring segment $PG_CONF"
- $TRUSTED_SHELL ${GP_HOSTADDRESS} "$ECHO \"#MPP Specific parameters\" >> ${GP_DIR}/$PG_CONF"
- RETVAL=$?
- PARA_EXIT $RETVAL "Update ${GP_DIR}/$PG_CONF file"
- $TRUSTED_SHELL ${GP_HOSTADDRESS} "$ECHO \"#----------------------\" >> ${GP_DIR}/$PG_CONF"
- RETVAL=$?
- PARA_EXIT $RETVAL "Update ${GP_DIR}/$PG_CONF file"
- ED_PG_CONF ${GP_DIR}/$PG_CONF "$PORT_TXT" port=$GP_PORT 0 $GP_HOSTADDRESS
- PARA_EXIT $RETVAL "Update port number to $GP_PORT"
- ED_PG_CONF ${GP_DIR}/$PG_CONF "$LISTEN_ADR_TXT" listen_addresses=\'*\' 0 $GP_HOSTADDRESS
- PARA_EXIT $RETVAL "Update listen address"
- ED_PG_CONF ${GP_DIR}/$PG_CONF "$CHKPOINT_SEG_TXT" checkpoint_segments=$CHECK_POINT_SEGMENTS 0 $GP_HOSTADDRESS
- PARA_EXIT $RETVAL "Update checkpoint segments"
-
- if [ x"" != x"$PG_CONF_ADD_FILE" ]; then
- LOG_MSG "[INFO][$INST_COUNT]:-Processing additional configuration parameters"
- for NEW_PARAM in `$CAT $PG_CONF_ADD_FILE|$TR -s ' '|$TR -d ' '|$GREP -v "^#"`
- do
- LOG_MSG "[INFO][$INST_COUNT]:-Adding config $NEW_PARAM to segment"
- SEARCH_TXT=`$ECHO $NEW_PARAM |$CUT -d"=" -f1`
- ED_PG_CONF ${GP_DIR}/$PG_CONF $SEARCH_TXT $NEW_PARAM 0 $GP_HOSTADDRESS
- PARA_EXIT $RETVAL "Update $PG_CONF $SEARCH_TXT $NEW_PARAM"
- done
- fi
-
- # Configuring PG_HBA -- on mirror, only need to add local addresses (skip the other addresses)
- LOG_MSG "[INFO][$INST_COUNT]:-Configuring segment $PG_HBA"
- if [ x"" = x"$COPY_FROM_PRIMARY_HOSTADDRESS" ]; then
- for MASTER_IP in "${MASTER_IP_ADDRESS[@]}"
- do
- # MPP-15889
- CIDR_MASTER_IP=$(GET_CIDRADDR $MASTER_IP)
- $TRUSTED_SHELL ${GP_HOSTADDRESS} "$ECHO host all all ${CIDR_MASTER_IP} trust >> ${GP_DIR}/$PG_HBA"
- PARA_EXIT $? "Update $PG_HBA for master IP address ${CIDR_MASTER_IP}"
- done
- if [ x"" != x"$STANDBY_HOSTNAME" ];then
- LOG_MSG "[INFO][$INST_COUNT]:-Processing Standby master IP address for segment instances"
- for STANDBY_IP in "${STANDBY_IP_ADDRESS[@]}"
- do
- # MPP-15889
- CIDR_STANDBY_IP=$(GET_CIDRADDR $STANDBY_IP)
- $TRUSTED_SHELL ${GP_HOSTADDRESS} "$ECHO host all all ${CIDR_STANDBY_IP} trust >> ${GP_DIR}/$PG_HBA"
- PARA_EXIT $? "Update $PG_HBA for master standby address ${CIDR_STANDBY_IP}"
- done
- fi
- fi
-
- # Add all local IPV4 addresses
- SEGMENT_IPV4_LOCAL_ADDRESS_ALL=(`$TRUSTED_SHELL $GP_HOSTADDRESS "$IFCONFIG $IFCONFIG_TXT |$GREP \"inet \"|$GREP -v \"127.0.0\"|$AWK '{print \\$2}'|$CUT -d: -f2"`)
- for ADDR in "${SEGMENT_IPV4_LOCAL_ADDRESS_ALL[@]}"
- do
- # MPP-15889
- CIDR_ADDR=$(GET_CIDRADDR $ADDR)
- $TRUSTED_SHELL ${GP_HOSTADDRESS} "$ECHO host all $USER_NAME $CIDR_ADDR trust >> ${GP_DIR}/$PG_HBA"
- done
-
- # Add all local IPV6 addresses
- SEGMENT_IPV6_LOCAL_ADDRESS_ALL=(`$TRUSTED_SHELL $GP_HOSTADDRESS "$IPV6_ADDR_LIST_CMD | $GREP inet6 | $AWK '{print \\$2}' |$CUT -d'/' -f1"`)
- for ADDR in "${SEGMENT_IPV6_LOCAL_ADDRESS_ALL[@]}"
- do
- # MPP-15889
- CIDR_ADDR=$(GET_CIDRADDR $ADDR)
- $TRUSTED_SHELL ${GP_HOSTADDRESS} "$ECHO host all $USER_NAME $CIDR_ADDR trust >> ${GP_DIR}/$PG_HBA"
- done
-
- # Create temp directories file
- LOG_MSG "[INFO][$INST_COUNT]:-temporary directory: $TEMP_DIRECTORY_LIST"
- if [ "${#TEMP_DIRECTORY_LIST[*]}" -ne 0 ]; then
- BACKOUT_COMMAND "$TRUSTED_SHELL ${GP_HOSTADDRESS} \"$RM -rf ${GP_DIR}/${GP_TEMP_DIRECTORIES_FILE} > /dev/null 2>&1\""
- fi
- for DIR in "${TEMP_DIRECTORY_LIST[@]}"
- do
- tmp_seg_dir="${DIR}/${SEG_PREFIX}${GP_CONTENT}"
- LOG_MSG "[INFO]:-create temporary ${tmp_seg_dir}"
- BACKOUT_COMMAND "$TRUSTED_SHELL ${GP_HOSTADDRESS} \"$RM -rf ${tmp_seg_dir} > /dev/null 2>&1\""
- $TRUSTED_SHELL ${GP_HOSTADDRESS} "$MKDIR ${tmp_seg_dir}"
- RETVAL=$?
- if [ $RETVAL -ne 0 ]; then
- # should not happen, we check the permission in gpinitsystem.
- LOG_MSG "[FATAL]:-temp directory created failed: ${tmp_seg_dir}"
- exit 2
- fi
- $TRUSTED_SHELL ${GP_HOSTADDRESS} "$ECHO ${tmp_seg_dir} >> ${GP_DIR}/${GP_TEMP_DIRECTORIES_FILE}"
- done
-
- if [ x"" = x"$COPY_FROM_PRIMARY_HOSTADDRESS" ]; then
- # Primary: start the segment to fill in configuration
- START_QE
- UPDATE_MPP $GP_PORT "$ARRAY_NAME" $TOTAL_SEG $GP_DBID $GP_CONTENT 1 $GP_HOSTADDRESS $GP_DIR
- STOP_QE
- fi
-
- LOG_MSG "[INFO]:-[$INST_COUNT]-End Function $FUNCNAME"
-}
-
-STOP_QE() {
- # we don't add backout commands here. We could get double-stop calls since the same QE is sometimes started, stopped, and restarted. But hopefully that's okay
-
- LOG_MSG "[INFO]:-Start Function $FUNCNAME" 1
- LOG_MSG "[INFO]:-Stopping instance on segment ${GP_HOSTADDRESS}:${GP_PORT}" 1
- $TRUSTED_SHELL ${GP_HOSTADDRESS} "$EXPORT_LIB_PATH;export PGPORT=${GP_PORT}; $PG_CTL -w -l $GP_DIR/pg_log/startup.log -D $GP_DIR -o \"-i -p ${GP_PORT}\" stop" >> $LOG_FILE 2>&1
- LOG_MSG "[INFO]:-End Function $FUNCNAME"
-}
-
-START_QE() {
- LOG_MSG "[INFO][$INST_COUNT]:-Starting Functioning instance on segment ${GP_HOSTADDRESS}"
- $TRUSTED_SHELL ${GP_HOSTADDRESS} "$EXPORT_LIB_PATH;export PGPORT=${GP_PORT}; $PG_CTL -w -l $GP_DIR/pg_log/startup.log -D $GP_DIR -o \"-i -p ${GP_PORT} -M mirrorless -b ${GP_DBID} -C ${GP_CONTENT} -z 0\" start" >> $LOG_FILE 2>&1
- RETVAL=$?
- if [ $RETVAL -ne 0 ]; then
- BACKOUT_COMMAND "$TRUSTED_SHELL $GP_HOSTADDRESS \"${EXPORT_LIB_PATH};export PGPORT=${GP_PORT}; $PG_CTL -w -D $GP_DIR -o \"-i -p ${GP_PORT}\" -m immediate stop\""
- BACKOUT_COMMAND "$ECHO \"Stopping segment instance on $GP_HOSTADDRESS\""
- $TRUSTED_SHELL ${GP_HOSTADDRESS} "$CAT ${GP_DIR}/pg_log/startup.log "|$TEE -a $LOG_FILE
- PARA_EXIT $RETVAL "Start segment instance database"
- fi
- BACKOUT_COMMAND "$TRUSTED_SHELL $GP_HOSTADDRESS \"${EXPORT_LIB_PATH};export PGPORT=${GP_PORT}; $PG_CTL -w -D $GP_DIR -o \"-i -p ${GP_PORT}\" -m immediate stop\""
- BACKOUT_COMMAND "$ECHO \"Stopping segment instance on $GP_HOSTADDRESS\""
- LOG_MSG "[INFO][$INST_COUNT]:-Successfully started segment instance on $GP_HOSTADDRESS"
-}
-
-#******************************************************************************
-# Main Section
-#******************************************************************************
-trap '$ECHO "KILLED:$SEGMENT_LINE" >> $PARALLEL_STATUS_FILE;ERROR_EXIT "[FATAL]:-[$INST_COUNT]-Recieved INT or TERM signal" 2' INT TERM
-while getopts ":v'?'aiqe:c:l:p:m:h:on:s:" opt
-do
- case $opt in
- v ) VERSION_INFO ;;
- '?' ) USAGE ;;
- q ) unset VERBOSE ;;
- p ) PG_CONF_ADD_FILE=$OPTARG
- shift
- shift ;;
- * ) USAGE
- esac
-done
-
-#Now process supplied call parameters
-PARENT_PID=$1;shift #PID of calling gpinitsystem program
-CHK_CALL
-
-TYPE=$1;shift
-case $TYPE in
- 1 )
- SEGMENT_LINE=$1;shift #String used to build segment instance
- COPY_FROM_PRIMARY_SEGMENT_LINE=$1;shift # String used to build primary segment instance from which to copy data
- if [ x"IS_PRIMARY" != x"$COPY_FROM_PRIMARY_SEGMENT_LINE" ]; then
- SET_VAR $COPY_FROM_PRIMARY_SEGMENT_LINE
- COPY_FROM_PRIMARY_HOSTADDRESS=$GP_HOSTADDRESS
- COPY_FROM_PRIMARY_DIR=$GP_DIR
- COPY_FROM_PRIMARY_PORT=$GP_PORT
- COPY_FROM_PRIMARY_CONTENT=$GP_CONTENT
- fi
- SET_VAR $SEGMENT_LINE
-
- if [ x"IS_PRIMARY" != x"$COPY_FROM_PRIMARY_SEGMENT_LINE" ]; then
- if [ x"$GP_CONTENT" != x"$COPY_FROM_PRIMARY_CONTENT" ]; then
- $ECHO "[FATAL]:-mismatch between content id and primary content id"
- exit 2
- fi
- fi
- TEMP_DIRECTORY_COMPACT_LIST=$1;shift
- TEMP_DIRECTORY_LIST=(`$ECHO ${TEMP_DIRECTORY_COMPACT_LIST[@]} | $TR ',' ' '`)
- SEG_PREFIX=$1;shift
- IS_FILEREP_MIRRORED_OPTION=$1;shift # yes or no, should we tell initdb to create persistent values
- INST_COUNT=$1;shift #Unique number for this parallel script, starts at 0
- BACKOUT_FILE=/tmp/gpsegcreate.sh_backout.$$
- LOG_FILE=$1;shift #Central logging file
- LOG_MSG "[INFO][$INST_COUNT]:-Start Main"
- LOG_MSG "[INFO][$INST_COUNT]:-Command line options passed to utility = $*"
- TMP_MASTER_IP_ADDRESS=$1;shift #List of IP addresses for the master instance
- MASTER_IP_ADDRESS=(`$ECHO $TMP_MASTER_IP_ADDRESS|$TR '~' ' '`)
- TMP_STANDBY_IP_ADDRESS=$1;shift #List of IP addresses for standby master
- STANDBY_IP_ADDRESS=(`$ECHO $TMP_STANDBY_IP_ADDRESS|$TR '~' ' '`)
- PROCESS_QE
- $ECHO "COMPLETED:$SEGMENT_LINE" >> $PARALLEL_STATUS_FILE
- ;;
-esac
-
-LOG_MSG "[INFO][$INST_COUNT]:-End Main"
-exit $EXIT_STATUS
http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/8ec87e6a/tools/bin/lib/gpinitsegment
----------------------------------------------------------------------
diff --git a/tools/bin/lib/gpinitsegment b/tools/bin/lib/gpinitsegment
deleted file mode 100755
index e79c158..0000000
--- a/tools/bin/lib/gpinitsegment
+++ /dev/null
@@ -1,542 +0,0 @@
-#!/usr/bin/env python
-# ==============================================================================
-'''
-Usage: gpinitsegment [options]
-
-Options:
- -l, --logdir
- -c, --content
- -h, --host
- -i, --dbid
- -n, --numseg
- -p, --port
- -A, --array-name
- -C, --copy
- -D, --datadir
- -E, --client_encoding
- -G, --gphome
- -K, --checkpoint_segments
- -L, --locale
- -M, --max_connnections
-'''
-
-# TODO MIRRORING: NEED TO TAKE in replicationPort
-
-import sys, os, re, optparse
-import time, datetime
-import signal, subprocess, tempfile
-
-EXECNAME = os.path.split(__file__)[1]
-os_type = os.uname()[0].lower()
-localhost = os.uname()[1]
-
-# ==============================================================================
-class SegmentError(Exception): pass
-
-
-# ==============================================================================
-def ParseInput():
- '''
- Parse input
- '''
- try:
- parser = optparse.OptionParser(usage=__doc__,
- add_help_option=False)
- parser.add_option('-?', '--help', action='store_true',
- help=optparse.SUPPRESS_HELP)
- parser.add_option('-l', '--logdir',
- help=optparse.SUPPRESS_HELP)
- parser.add_option('-h', '--host',
- help=optparse.SUPPRESS_HELP)
- parser.add_option('-A', '--array-name', dest='cluster',
- help=optparse.SUPPRESS_HELP)
- parser.add_option('-p', '--port', type='int',
- help=optparse.SUPPRESS_HELP)
- parser.add_option('-c', '--content', type='int',
- help=optparse.SUPPRESS_HELP)
- parser.add_option('-i', '--dbid', type='int',
- help=optparse.SUPPRESS_HELP)
- parser.add_option('-n', '--numseg', type='int',
- help=optparse.SUPPRESS_HELP)
- parser.add_option('-D', '--datadir',
- help=optparse.SUPPRESS_HELP)
- parser.add_option('-C', '--copy',
- help=optparse.SUPPRESS_HELP)
- parser.add_option('-G', '--gphome',
- help=optparse.SUPPRESS_HELP)
- parser.add_option('-K', '--checkpoint_segments',
- help=optparse.SUPPRESS_HELP)
- parser.add_option('-E', '--client_encoding',
- help=optparse.SUPPRESS_HELP)
- parser.add_option('-L', '--locale',
- help=optparse.SUPPRESS_HELP)
- parser.add_option('-M', '--max_connections',
- help=optparse.SUPPRESS_HELP)
- (options, args) = parser.parse_args()
-
- if options.help:
- print __doc__
- sys.exit(0)
-
- elif len(args) > 0:
- print __doc__
- sys.exit(1)
-
- except Exception, e:
- print __doc__
- print 'Error parsing input: '
- raise e
-
- return options
-
-def RunCmd(cmd, env):
- try:
- pipe = subprocess.Popen(cmd, shell=True, env=env,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE)
- except OSError, e:
- raise SegmentError(cmd.split(' ')[0] + ' : ' + str(e))
-
- try:
- result = pipe.communicate();
- except OSError, e:
- raise SegmentError(cmd + ' : ' + str(e))
-
- if (pipe.returncode):
- print result[0].strip()
- print result[1].strip()
- raise SegmentError(cmd + ' : returned ' + str(pipe.returncode))
-
- return result[0].strip()
-
-
-# ==============================================================================
-class GPInitSegment():
-
- #----------------------------------------------------------------------
- def __init__(self, options=None):
- '''
- Sets the default values for a master segment
- '''
-
- home = os.environ.get('HOME')
-
- self.logfile = None
-
- def notNone(a,b):
- if a != None: return a
- return b
-
- if home == None:
- self.logdir = options.logdir
- else:
- self.logdir = notNone(options.logdir,
- os.path.join(home, 'gpAdminLogs'))
- if not os.path.exists(self.logdir):
- self.logdir = None
-
- self.host = notNone(options.host, localhost)
- self.cluster = notNone(options.cluster, 'Greenplum')
- self.port = notNone(options.port, '5432')
- self.content = notNone(options.content, '-1')
- self.dbid = notNone(options.dbid, '1')
- self.numseg = notNone(options.numseg, '-1')
- self.dir = notNone(options.datadir,
- os.environ.get('MASTER_DATA_DIRECTORY'))
- self.gphome = options.gphome or os.environ.get('GPHOME')
- self.source = options.copy
- self.encoding = options.client_encoding
- self.checkseg = options.checkpoint_segments
- self.locale = options.locale
- self.maxconn = options.max_connections
-
- self.user = os.environ.get('USER') or os.environ.get('LOGNAME')
-
- if not self.gphome:
- print "Could not determine GPHOME"
- sys.exit(0)
-
- if int(self.numseg) <= 0:
- print "Total number of segments must be > 0"
- sys.exit(1)
-
- if self.dir[0] != '/':
- self.dir = os.path.join(os.getcwd(), self.dir)
-
- if self.logdir:
- today = datetime.date.today().strftime('%Y%m%d')
- logname = os.path.join(self.logdir, '%s_%s.log' % (EXECNAME, today))
- self.logfile = open(logname, 'a')
-
-
- self.env = {}
- self.env['HOME'] = os.environ.get('HOME')
- self.env['USER'] = self.user
- self.env['LOGNAME'] = self.user
- self.env['GPHOME'] = self.gphome
-
- # Need to establish an environment to run commands in
- # + Add $GPHOME/bin to the path for this environment
- # + Add /sbin to the path for ifconfig
- path = os.environ.get('PATH') or ''
- path = '%s/bin:%s/ext/python/bin:/sbin:%s' % (self.gphome, self.gphome, path)
- self.env['PATH'] = path
-
- path = os.environ.get('LD_LIBRARY_PATH') or ''
- path = '%s/lib:%s/ext/python/lib:%s' % (self.gphome, self.gphome, path)
- self.env['LD_LIBRARY_PATH'] = path
-
- path = os.environ.get('DYLD_LIBRARY_PATH') or ''
- path = '%s/lib:%s/ext/python/lib:%s' % (self.gphome, self.gphome, path)
- self.env['DYLD_LIBRARY_PATH'] = path
-
- #----------------------------------------------------------------------
- def CreateSegment(self):
- '''
- Creates the new segment via one of three main methods:
- 1) Executes gpinitsegment on remote host
- 2) Runs initdb in the directory
- 3) Copies the data directory from to source location
- '''
- if self.host != localhost:
- self.ExecuteRemote()
- elif self.source:
- self.CopySeg()
- else:
- self.InitSeg()
-
- #----------------------------------------------------------------------
- def ExecuteRemote(self):
- '''
- Remote execution strategy is simply to use ssh to call
- gpinitsegment on the remote host.
- '''
-
- self.Log('INFO', 'Executing initseg remotely on %s' % self.host)
-
- ENV = '%s/bin/lib/gpenv.sh' % self.gphome
- cmd = 'ssh %s %s %s' % (self.host, ENV, __file__)
- cmd += ' -G %s' % self.gphome
- cmd += ' -A \'\\"%s\\"\'' % self.cluster
- cmd += ' -p %s' % self.port
- cmd += ' -c %s' % self.content
- cmd += ' -i %s' % self.dbid
- cmd += ' -n %s' % self.numseg
- cmd += ' -D %s' % self.dir
-
- # If using "--copy" then the current hostname will be prefixed
- # to the copy source if a hostname is not already specified
- if self.source:
-
- source_re = re.compile('(?:([^:]+)\:(\d*))?(.*)')
- m = source_re.match(self.source)
- if not m:
- raise SegmentError("Invalid source description: '%s'" % self.source)
- s_host = m.group(1)
- s_port = m.group(2)
- s_dir = m.group(3)
-
- # make s_dir into a full path
- if s_dir[0] != '/':
- s_dir = os.path.join(os.getcwd(), s_dir)
-
- if s_host == None:
- s_host = localhost
-
- cmd += ' -C %s:%s' % (s_host, s_dir)
-
- result = RunCmd(cmd, self.env)
- if (self.logfile):
- self.logfile.write(result + "\n")
- sys.stdout.write(result + "\n")
-
-
- #----------------------------------------------------------------------
- def Log(self, type, msg):
- '''
- Writes a message to stderr and the logfile
- '''
- prefix = ':'.join(
- [time.strftime('%Y%m%d:%H:%M:%S', time.localtime()), EXECNAME,
- self.host, self.user, '[%s]' % type, '-'])
-
- for m in msg.splitlines():
- if m.strip() == '':
- continue
-
- if (self.logfile):
- self.logfile.write('%s%s\n' % (prefix, m))
- self.logfile.flush()
-
- sys.stdout.write('%s%s\n' % (prefix, m))
-
- #----------------------------------------------------------------------
- def MakeDirectory(self):
- '''
- Creates the directory for the segment
- '''
- self.Log('INFO', 'Creating data directory: %s' % self.dir)
-
- if os.path.exists(self.dir):
- raise SegmentError("'%s' already exists" % self.dir)
-
- (path, dir) = os.path.split(self.dir)
-
- if not os.path.exists(path):
- raise SegmentError("'%s' does not exist" % path)
-
- if not os.access(path, os.W_OK):
- raise SegmentError("'%s' no write permission" % path)
-
- try:
- os.mkdir(self.dir)
- RunCmd('chmod 700 ' + self.dir, self.env)
- except Exception, e:
- raise SegmentError(str(e))
-
- #----------------------------------------------------------------------
- def SetPort(self):
- '''
- Modifies postgresql.conf file to have the correct port
- '''
-
- self.Log('INFO', 'Setting port')
-
- if not os.path.exists(self.dir):
- raise SegmentError("'%s' does not exist" % self.dir)
- pidfile = os.path.join(self.dir, 'postmaster.pid')
- if os.path.exists(pidfile):
- raise SegmentError("Can not adjust port when segment is running")
-
- conf = os.path.join(self.dir, 'postgresql.conf')
- if not os.path.exists(conf):
- raise SegmentError("'%s' does not exist" % conf)
-
- # Rewrite the postgresql.conf file
- port_re = re.compile('#?port\s*=\s*\d+.*')
- chkpnt_re = re.compile('#?checkpoint_segments\s*=\s*\d+.*')
- encode_re = re.compile('#?client_encoding\s*=\s*\w+.*')
- old = open(conf, 'r')
- new = open(conf+'.new', 'w')
- for line in old:
- if port_re.match(line):
- new.write('port = %s # sets the database listener port\n'
- % self.port)
- elif (self.checkseg and chkpnt_re.match(line)):
- new.write('checkpoint_segments = %s # in logfile segments, min 1, 16MB each\n'
- % self.checkseg)
- elif (self.encoding and encode_re.match(line)):
- new.write('client_encoding = %s # default database encoding\n'
- % self.encoding)
- else:
- new.write(line)
-
- # Replace the old file with the new one
- os.rename(conf+'.new', conf)
-
- def SetHba(self):
- '''
- Modifies pg_hba.conf file to allow access
- '''
-
- self.Log('INFO', 'Configuring pg_hba.conf')
-
- # Customize for OS
- if os_type == 'sunos':
- cmd = 'ifconfig -a inet'
- else:
- cmd = 'ifconfig'
-
- # open pg_hba.conf for writing
- pg_hba = open(os.path.join(self.dir, 'pg_hba.conf'), 'w')
-
- # Add an entry for local connections by the admin user
- if os_type == 'sunos':
- pg_hba.write('local all all trust\n')
- else:
- pg_hba.write('local all %s ident\n' % self.user)
-
- # Find all known ip addresses that this host identifies with
- ifconf = RunCmd(cmd, self.env)
- ip_re = re.compile('inet (?:addr:)?(\d+\.\d+\.\d+\.\d+)')
- for line in ifconf.split('\n'):
- m = ip_re.search(line)
- if m:
- ip = m.group(1)
- cidr_suffix = '/128' if ':' in ip else '/32' # MPP-15889
- pg_hba.write('host all all %s%s trust\n' % (ip, cidr_suffix))
-
- pg_hba.close()
-
-
- #----------------------------------------------------------------------
- def SetIdentity(self):
- '''
- Modifies gp_id for the segment in both postgres and template1 dbs
- '''
-
- ident = "('%s', %s, %s, %s)" % \
- (self.cluster, self.numseg, self.dbid, self.content)
- self.Log('INFO', 'Setting identity %s' % ident)
-
- options = "PGOPTIONS='-c gp_session_role=utility'"
- psql = options + ' psql -p %s' % self.port
-
- def runSQL(sql):
- r = subprocess.call(psql + ' -c "%s" template1' % sql,
- shell=True,
- stdout=self.logfile,
- stderr=self.logfile)
- if r != 0:
- raise SegmentError('Error Executing SQL (return code %s)' % str(r))
-
- runSQL("delete from gp_id")
- runSQL("insert into gp_id values %s" % ident)
- runSQL("vacuum freeze gp_id")
-
-
- #----------------------------------------------------------------------
- def Startup(self):
- '''
- Uses pg_ctl to startup the database:
- + pg_ctl will wait for the startup to complete
- + pg_ctl may gives up after about a minute if the startup fails
- '''
-
- self.Log('INFO', "Starting Greenplum (port=%s, dir=%s)"
- % (self.port, self.dir))
-
- cmd = 'pg_ctl start -w -D %s -l %s.log' % (self.dir, self.dir)
- cmd += ' -o "-i -p %s -c gp_role=utility"' % str(self.port)
- self.Log('INFO', cmd)
- retcode = subprocess.call(cmd, shell=True, env=self.env)
- if retcode != 0:
- raise SegmentError('Segment failed to startup, pg_ctl returned %s'
- % str(retcode))
-
-
- #----------------------------------------------------------------------
- def Shutdown(self):
- '''
- Checks for a postmaster.pid file.
- + If found it calls pg_ctl to stop the database.
- + pg_ctl will wait until the database is stopped
- + pg_ctl may gives up after about a minute if the shutdown fails
- '''
-
- if os.path.exists(os.path.join(self.dir, 'postmaster.pid')):
- self.Log('INFO', "Stopping Greenplum (port=%s, dir=%s)"
- % (self.port, self.dir))
-
- cmd = 'pg_ctl stop -D %s -m fast' % self.dir
- self.Log('INFO', cmd)
- retcode = subprocess.call(cmd, shell=True, env=self.env)
- if retcode != 0:
- raise SegmentError('Segment failed to stop, pg_ctl returned %s'
- % str(retcode))
-
-
- #----------------------------------------------------------------------
- def InitSeg(self):
- '''
-
- '''
- self.MakeDirectory()
-
- self.Log('INFO', 'Running initdb')
-
- try:
- cmd = 'initdb -D ' + self.dir
- if self.encoding:
- cmd += ' -E ' + self.encoding
- if self.locale:
- cmd += ' --locale=' + self.locale
- if self.maxconn:
- cmd += ' --max_connections=' + self.maxconn
-
-
- self.Log('INFO', cmd)
-
- subprocess.call(cmd, shell=True,
- stdout=self.logfile,
- stderr=self.logfile)
- except BaseException, e:
- try:
- os.removedirs(self.dir)
- except: pass
- raise SegmentError(str(e))
-
- self.SetPort()
- self.SetHba()
- self.Startup()
- self.SetIdentity()
-
- #----------------------------------------------------------------------
- def CopySeg(self):
- '''
- '''
- source_re = re.compile('(?:([^:]+)\:(\d*))?(.*)')
- m = source_re.match(self.source)
- if not m:
- raise SegmentError("Invalid source description: '%s'" % self.source)
- s_host = m.group(1)
- s_port = m.group(2)
- s_dir = m.group(3)
-
- # make s_dir into a full path
- if s_dir[0] != '/':
- s_dir = os.path.join(os.getcwd(), s_dir)
-
- # Determine what that host thinks its hostname is
- if not s_host:
- s_host = localhost
-
- SSH = 'ssh %s' % s_host
- ENV = '%s/bin/lib/gpenv.sh' % self.gphome
-
- HOSTNAME = RunCmd('which hostname', self.env)
- TAR = RunCmd('which tar', self.env)
- PG_CTL = RunCmd('which pg_controldata', self.env)
- PG_CTL = '%s %s' % (ENV, PG_CTL)
-
- # Validate that it's a real data-directory
- try:
- ctl = RunCmd('%s %s %s' % (SSH, PG_CTL, s_dir), self.env)
- except:
- raise SegmentError("Source Directory '%s' invalid" % s_dir)
-
- # Everything looks good, get ready to rumble
- self.MakeDirectory()
-
- self.Log('INFO', 'Copying data directory')
-
- cmd = "'cd %s && %s -cf - .' | (cd %s && %s -xBf - .)" \
- % (s_dir, TAR, self.dir, TAR)
-
- RunCmd("%s %s" % (SSH, cmd), self.env)
- self.SetPort()
- self.Startup()
- self.SetIdentity()
-
-
-# ==============================================================================
-if __name__ == "__main__":
- try:
- options = ParseInput()
-
- s = GPInitSegment(options)
- try:
- s.CreateSegment()
- except BaseException, e:
- s.Log('FATAL', str(e))
- raise Exception('')
- finally:
- s.Shutdown()
-
- sys.exit(0)
-
- except Exception, e:
- print str(e)
- sys.exit(1)
-
-
http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/8ec87e6a/tools/bin/lib/gpseginitsb.sh
----------------------------------------------------------------------
diff --git a/tools/bin/lib/gpseginitsb.sh b/tools/bin/lib/gpseginitsb.sh
deleted file mode 100755
index 7484a9d..0000000
--- a/tools/bin/lib/gpseginitsb.sh
+++ /dev/null
@@ -1,113 +0,0 @@
-#!/bin/bash
-# Filename:- gpseginitsb.sh
-# Version:- $Revision$
-# Updated:- $Date$
-# Status:- Released
-# Author:- G Coombe
-# Contact:- gcoombe@greenplum.com
-# Release date:- Oct 2007
-# Release stat:- Released
-# Copyright (c) Metapa 2005. All Rights Reserved.
-# Copyright (c) 2007 Greenplum Inc
-#******************************************************************************
-# Update History
-#******************************************************************************
-# Ver Date Who Update
-#******************************************************************************
-# Detailed Description
-#******************************************************************************
-#******************************************************************************
-# Prep Code
-
-WORKDIR=`dirname $0`
-
-# Source required functions file, this required for script to run
-# exit if cannot locate this file. Change location of FUNCTIONS variable
-# as required.
-FUNCTIONS=$WORKDIR/gp_bash_functions.sh
-if [ -f $FUNCTIONS ]; then
- . $FUNCTIONS
-else
- echo "[FATAL]:-Cannot source $FUNCTIONS file Script Exits!"
- exit 2
-fi
-#******************************************************************************
-# Script Specific Variables
-#******************************************************************************
-# Log file that will record script actions
-CUR_DATE=`$DATE +%Y%m%d`
-TIME=`$DATE +%H%M%S`
-PROG_NAME=`$BASENAME $0`
-# Level of script feedback 0=small 1=verbose
-unset VERBOSE
-GP_USER=$USER_NAME
-EXIT_STATUS=0
-#******************************************************************************
-# Functions
-#******************************************************************************
-USAGE () {
- $ECHO
- $ECHO " `basename $0`"
- $ECHO
- $ECHO " Script called by gpinitstandby, this should not be run directly"
- exit $EXIT_STATUS
-}
-
-CHK_CALL () {
- FILE_PREFIX=`$ECHO $PARALLEL_STATUS_FILE|$CUT -d"." -f1`
- if [ ! -f ${FILE_PREFIX}.$PARENT_PID ];then
- $ECHO "[FATAL]:-Not called from from correct parent program"
- exit 2
- fi
-}
-
-UPDATE_PGHBA () {
- LOG_MSG "[INFO][$INST_COUNT]:-Start Function $FUNCNAME"
- STANDBY_IP_ADDRESS=(`$ECHO $IP_ADDRESS_LIST|$TR '~' ' '`)
- for STANDBY_IP in "${STANDBY_IP_ADDRESS[@]}"
- do
- # MPP-15889
- CIDR_STANDBY_IP=$(GET_CIDRADDR $STANDBY_IP)
- CHK_COUNT=`$TRUSTED_SHELL $QE_NAME "$GREP -c \"${CIDR_STANDBY_IP}\" ${QE_BASE_DIR}/$PG_HBA"`
- if [ $CHK_COUNT -eq 0 ];then
- LOG_MSG "[INFO][$INST_COUNT]:-Adding standby IP address $QE_NAME ${QE_BASE_DIR}/$PG_HBA file" 1
- $TRUSTED_SHELL $QE_NAME "$ECHO host all all ${CIDR_STANDBY_IP} trust >> ${QE_BASE_DIR}/$PG_HBA"
- if [ $? -ne 0 ];then
- $ECHO "FAILED:${QE_LINE}:ADD_IP" >> $PARALLEL_STATUS_FILE
- ERROR_EXIT "Failed to add standby IP address $QE_NAME ${QE_BASE_DIR}/$PG_HBA file" 2
- else
- LOG_MSG "[INFO][$INST_COUNT]:-Added standby IP address $QE_NAME ${QE_BASE_DIR}/$PG_HBA file" 1
- fi
- else
- LOG_MSG "[INFO][$INST_COUNT]:-IP address $QE_NAME ${QE_BASE_DIR}/$PG_HBA already there, no update required" 1
- fi
- done
- LOG_MSG "[INFO][$INST_COUNT]:-End Function $FUNCNAME"
-}
-#******************************************************************************
-# Main Section
-#******************************************************************************
-trap '$ECHO "KILLED:${QE_NAME}:${QE_BASE_DIR}" >> $PARALLEL_STATUS_FILE;ERROR_EXIT "[FATAL]:-[$INST_COUNT]-Recieved INT or TERM signal" 2' INT TERM
-while getopts ":v'?'" opt
- do
- case $opt in
- v ) VERSION_INFO ;;
- '?' ) USAGE ;;
- * ) USAGE
- esac
-done
-#Now process supplied call parameters
-PARENT_PID=$1;shift #PID of gpstate process calling this script
-CHK_CALL
-INST_COUNT=$1;shift #Unique number for this parallel script, starts at 0
-LOG_FILE=$1;shift #Central utility log file
-QE_NAME=$1;shift
-QE_BASE_DIR=$1;shift
-IP_ADDRESS_LIST=$1;shift
-PARALLEL_STATUS_FILE=$1;shift
-QE_LINE=${QE_NAME}:${QE_BASE_DIR}
-LOG_MSG "[INFO][$INST_COUNT]:-Start Main"
-UPDATE_PGHBA
-$ECHO "COMPLETED:${QE_LINE}" >> $PARALLEL_STATUS_FILE
-LOG_MSG "[INFO][$INST_COUNT]:-End Main"
-exit $EXIT_STATUS
http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/8ec87e6a/tools/doc/gpactivatestandby_help
----------------------------------------------------------------------
diff --git a/tools/doc/gpactivatestandby_help b/tools/doc/gpactivatestandby_help
deleted file mode 100755
index 923a640..0000000
--- a/tools/doc/gpactivatestandby_help
+++ /dev/null
@@ -1,160 +0,0 @@
-COMMAND NAME: gpactivatestandby
-
-Activates a standby master host and makes it the active master
-for the Greenplum Database system, or configures a host to be
-the standby master for a Greenplum Database system.
-
-
-*****************************************************
-SYNOPSIS
-*****************************************************
-
-
-gpactivatestandby -d <standby_master_datadir> [-c <new_standby_master>]
-[-f] [-a] [-q] [-l <logfile_directory>]
-
-gpactivatestandby -? | -h | --help
-
-gpactivatestandby --version
-
-*****************************************************
-DESCRIPTION
-*****************************************************
-
-
-The gpactivatestandby script activates a backup master host and
-brings it into operation as the active master instance for a Greenplum
-Database system. The activated standby master effectively becomes the
-Greenplum Database master, accepting client connections on the master port
-(which must be set to the same port number on the master host and the
-backup master host).
-
-You must run this script from the master host you are
-activating, not the failed master host you are disabling. Running this
-script assumes you have a backup master host configured for the system
-(see gpinitstandby).
-
-The script will perform the following steps:
-
-* Stop the synchronization process (gpsyncmaster) on the backup master
-
-* Update the system catalog tables of the backup master using the logs
-
-* Activate the backup master to be the new active master for the system
-
-* (optional) Make the host specified with the -c option the new standby
- master host
-
-* Restart the Greenplum Database system with the new master host
-
-A backup Greenplum master host serves as a warm standby in the event
-of the primary Greenplum master host becoming unoperational. The backup
-master is kept up to date by a transaction log replication process
-(gpsyncmaster), which runs on the backup master host and keeps the
-data between the primary and backup master hosts synchronized.
-
-If the primary master fails, the log replication process is shutdown,
-and the backup master can be activated in its place by using the
-gpactivatestandby script. Upon activation of the backup master, the
-replicated logs are used to reconstruct the state of the Greenplum master
-host at the time of the last successfully committed transaction. To specify
-a new standby master host after making your current standby
-master active, use the -c option.
-
-In order to use gpactivatestandby to activate a new primary master host,
-the master host that was previously serving as the primary master cannot
-be running. The script checks for a postmaster.pid file in the data
-directory of the disabled master host, and if it finds it there, it will
-assume the old master host is still active. In some cases, you may need
-to remove the postmaster.pid file from the disabled master host data
-directory before running gpactivatestandby (for example, if the
-disabled master host process was terminated unexpectedly).
-
-After activating a standby master, run ANALYZE to update the database
-query statistics. For example:
-
-psql dbname -c 'ANALYZE;'
-
-
-*****************************************************
-OPTIONS
-*****************************************************
-
-
--d <standby_master_datadir>
-
-Required. The absolute path of the data directory for the master host
-you are activating.
-
-
--c <new_standby_master_hostname>
-
-Optional. After you activate your standby master you may want to specify
-another host to be the new standby, otherwise your Greenplum Database
-system will no longer have a standby master configured. Use this option
-to specify the hostname of the new standby master host. You can also use
-gpinitstandby at a later time to configure a new standby master host.
-
-
--f (force activation)
-
-Use this option to force activation of the backup master host.
-Only use this option if you are sure that the backup and primary master
-hosts are consistent. This option may be useful if you have just
-initialized a new backup master using gpinitstandby, and want to
-activate it immediately.
-
-
--a (do not prompt)
-
-Do not prompt the user for confirmation.
-
-
--q (no screen output)
-
-Run in quiet mode. Command output is not displayed on the screen,
-but is still written to the log file.
-
-
--l <logfile_directory>
-
-The directory to write the log file. Defaults to ~/gpAdminLogs.
-
-
--? | -h | --help
-
-Displays the online help.
-
-
--v (show script version)
-
-Displays the version, status, last updated date, and check sum of this script.
-
-
---version (show utility version)
-
-Displays the version of this utility.
-
-
-*****************************************************
-EXAMPLES
-*****************************************************
-
-
-Activate the backup master host and make it the active master instance for a
-Greenplum Database system (run from backup master host you are activating):
-
-gpactivatestandby -d /gpdata
-
-
-Activate the backup master host and at the same time configure another
-host to be your new standby master:
-
-gpactivatestandby -d /gpdata -c new_standby_hostname
-
-
-*****************************************************
-SEE ALSO
-*****************************************************
-
-gpinitsystem, gpinitstandby
[3/4] incubator-hawq git commit: HAWQ-39. Remove below unused mgmt
scritps for hawq2.0:
Posted by rl...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/8ec87e6a/tools/bin/gpinitsystem
----------------------------------------------------------------------
diff --git a/tools/bin/gpinitsystem b/tools/bin/gpinitsystem
deleted file mode 100755
index a990c60..0000000
--- a/tools/bin/gpinitsystem
+++ /dev/null
@@ -1,2403 +0,0 @@
-#!/bin/bash
-# Filename:- gpinitsystem
-# Status:- Released
-# Author:- L Lonergan/G Coombe
-# Contact:- gcoombe@greenplum.com
-# Release date:- March 2006
-# Release stat:- Released
-# Copyright (c) Metapa 2005. All Rights Reserved.
-# Copyright Greenplum database cluster
-#******************************************************************************
-# Update History
-#******************************************************************************
-# Date Who Update
-# 12/05/2006 G Coombe Added a parallel create segment option
-# 4/26/2007 cmcdevitt renamed from gpcreatecluster.sh
-#
-#******************************************************************************
-# Detailed Description
-#******************************************************************************
-#******************************************************************************
-# Prep Code
-
-WORKDIR=`dirname $0`
-
-# Source required functions file, this required for script to run
-# exit if cannot locate this file. Change location of FUNCTIONS variable
-# as required.
-FUNCTIONS=$WORKDIR/lib/gp_bash_functions.sh
-if [ -f $FUNCTIONS ]; then
- . $FUNCTIONS
-else
- echo "[FATAL]:-Cannot source $FUNCTIONS file Script Exits!"
- exit 2
-fi
-
-#******************************************************************************
-# Script Specific Variables
-#******************************************************************************
-# Log file that will record script actions
-CUR_DATE=`$DATE +%Y%m%d`
-FILE_TIME=`$DATE +%H%M%S`
-PROG_NAME=`$BASENAME $0`
-HELP_DOC_NAME=`$ECHO $PROG_NAME|$AWK -F'.' '{print $1}'`_help
-BACKOUT_FILE=$DEFLOGDIR/backout_gpinitsystem_${USER_NAME}_${CUR_DATE}_$FILE_TIME
-# Level of script feedback 0=small 1=verbose
-VERBOSE=1
-INTERACTIVE=1
-MIRRORING=0
-INST_COUNT=0
-# Greenplum database specific parameters
-GP_USER=$USER_NAME
-# System table names
-GP_TBL=gp_id
-GP_CONFIG_TBL=gp_segment_configuration
-PG_SYSTEM_FILESPACE=3052
-GP_FAILOVER_CONFIG_TBL=gp_fault_strategy
-GP_FILESPACE_NAME=dfs_system
-GP_TABLESPACE_NAME=dfs_default
-unset PG_CONF_ADD_FILE
-#unset QD_PRIMARY_ARRAY QE_PRIMARY_ARRAY QE_MIRROR_ARRAY
-EXIT_STATUS=0
-# ED_PG_CONF search text values
-PORT_TXT="#port"
-LOG_STATEMENT_TXT="#log_statement ="
-LISTEN_ADR_TXT="listen_addresses"
-CHKPOINT_SEG_TXT="checkpoint_segments"
-KERBEROS_KEYFILE_TXT="krb_server_keyfile"
-ENABLE_SECURE_FILESYSTEM_TXT="enable_secure_filesystem"
-INIT_STANDBY_PROG=$GPINITSTANDBY
-QE_MIRROR_ARRAY=""
-GP_PASSWD=gparray
-TMP_PG_HBA=/tmp/pg_hba_conf_master.$$
-TMP_FILE=/tmp/cluster_tmp_file.$$
-TMP_HOSTNAME_FILE=/tmp/hostname_test_file.$$
-PARALLEL_STATUS_FILE=/tmp/gpinitsystem_parallel_status_file.$$
-GPCREATESEG=$WORKDIR/lib/gpcreateseg.sh
-ULIMIT_WARN=0
-MIRROR_TYPE=0
-REMOTE_HOST_COUNT=0
-SINGLE_HOST_BATCH_LIMIT=4
-INPUT_CONFIG=""
-OUTPUT_CONFIG=""
-GPHOSTCACHELOOKUP=$WORKDIR/lib/gphostcachelookup.py
-
-#******************************************************************************
-# DCA Specific Variables
-#******************************************************************************
-DCA_VERSION_FILE="/etc/gpdb-appliance-version"
-
-DCA_RESQUEUE_PRIORITY_NAME="gp_resqueue_priority"
-DCA_RESQUEUE_PRIORITY_CPUCORES_PER_SEGMENT_NAME="gp_resqueue_priority_cpucores_per_segment"
-DCA_RESQUEUE_PRIORITY_SWEEPER_INTERVAL_NAME="gp_resqueue_priority_sweeper_interval"
-
-DCA_MASTER_RESQUEUE_PRIORITY_VAL="on"
-DCA_MASTER_RESQUEUE_PRIORITY_CPUCORES_PER_SEGMENT_VAL=24
-DCA_MASTER_RESQUEUE_PRIORITY_SWEEPER_INTERVAL_VAL=1000
-
-DCA_SEGMENT_RESQUEUE_PRIORITY_VAL="on"
-DCA_SEGMENT_RESQUEUE_PRIORITY_CPUCORES_PER_SEGMENT_VAL=4
-DCA_SEGMENT_RESQUEUE_PRIORITY_SWEEPER_INTERVAL_VAL=1000
-
-#******************************************************************************
-# Functions
-#******************************************************************************
-USAGE () {
- if [ -f ${GPDOCDIR}/$HELP_DOC_NAME ] && [ x"" == x"$SCRIPT_USAGE" ];then
- $LESSCMD ${GPDOCDIR}/$HELP_DOC_NAME
- exit 0
- else
- $ECHO
- $ECHO " `basename $0` -c gp_config_file [OPTIONS]"
- $ECHO
- $ECHO " Creates a new HAWQ instance on a Master host and a number of"
- $ECHO " segment instance hosts."
- $ECHO
- $ECHO " General options:"
- $ECHO " -?, display this help message & exit"
- $ECHO " -v, display version information & exit"
- $ECHO
- $ECHO " Logging options:"
- $ECHO " -q, quiet mode, do not log progress to screen [default:- verbose output to screen]"
- $ECHO " -a, don't ask to confirm instance creation [default:- ask]"
- $ECHO " -l, logfile_directory [optional]"
- $ECHO " Alternative logfile directory"
- $ECHO " -D, set log output to debug level, shows all function calls"
- $ECHO
- $ECHO " Configuration options:"
- $ECHO " -c, gp_config_file [mandatory]"
- $ECHO " Supplies all Greenplum configuration information required by this utility."
- $ECHO " Full description of all parameters contained within the example file"
- $ECHO " supplied with this distribution."
- $ECHO " Also see gpinitsystem_INSTRUCTIONS file for greater detail on"
- $ECHO " the operation and configuration of this script"
- $ECHO " -h, gp_hostlist_file [optional]"
- $ECHO " Contains a list of all segment instance hostnames required to participate in"
- $ECHO " the new Greenplum instance. Normally set in gp_config_file."
- $ECHO " -p, postgresql_conf_gp_additions [optional]"
- $ECHO " List of additional PostgreSQL parameters to be applied to each Master/Segment"
- $ECHO " postgresql.conf file during Greenplum database initialization."
- $ECHO " -s, standby_hostname [optional]"
- $ECHO " -i, do not initialize standby sync process [default:- start process]"
- $ECHO " -m, maximum number of connections for master instance [default ${DEFAULT_QD_MAX_CONNECT}]"
- $ECHO " -b, shared_buffers per instance [default $DEFAULT_BUFFERS]"
- $ECHO " Specify either the number of database I/O buffers (without suffix) or the"
- $ECHO " amount of memory to use for buffers (with suffix 'kB', 'MB' or 'GB')."
- $ECHO " Applies to master and all segments."
- $ECHO " -e, <password>, password to set for Greenplum superuser in database [default $GP_PASSWD]"
- $ECHO " -n, <locale>, setting for locale to be set when database initialized [default $DEFAULT_LOCALE_SETTING]"
- $ECHO " -B, <number> run this batch of create segment processes in parallel [default $BATCH_DEFAULT]"
- $ECHO
- $ECHO " Return codes:"
- $ECHO " 0 - No problems encountered with requested operation"
- $ECHO " 1 - Warning generated, but instance operational"
- $ECHO " 2 - Fatal error, instance not created/started, or in an inconsistent state,"
- $ECHO " see log file for failure reason."
- $ECHO
- exit $EXIT_STATUS
- fi
-}
-
-# derive proper options for gpstart/gpstop based on VERBOSE and DEBUG_LEVEL.
-# sample use:
-# GPSTOP_OPTS=$(OUTPUT_LEVEL_OPTS)
-#
-OUTPUT_LEVEL_OPTS () {
- if [ $VERBOSE ]; then
- if [ $DEBUG_LEVEL -eq 1 ]; then
- echo ' -v '
- else
- echo ''
- fi
- else
- echo ' -q '
- fi
-}
-
-# Check whether two intervals ($1, $2), ($3, $4) overlap
-# Returns 0 if the intervals overlap, 1 otherwise
-CHK_OVERLAP() {
- if [[ $1 -ge $3 && $1 -le $4 ]]; then
- return 0;
- elif [[ $3 -ge $1 && $3 -le $2 ]]; then
- return 0;
- else
- return 1;
- fi
-}
-
-CHK_PARAMS () {
- LOG_MSG "[INFO]:-Start Function $FUNCNAME"
- LOG_MSG "[INFO]:-Checking configuration parameters, please wait..." 1
- if [ $USER_NAME == "root" ]; then
- ERROR_EXIT "[FATAL]:-Unable to run this script as $USER" 2
- fi
- if [ x"" = x"$GPHOME" ]; then
- LOG_MSG "[FATAL]:-Environment variable \$GPHOME not set" 1
- ERROR_EXIT "[FATAL]:-Unable to continue" 2
- fi
- # Check that we can see initdb
- if [ x"$INITDB" = x"" ] || [ ! -x $INITDB ];then
- ERROR_EXIT "[FATAL]:-Unable to locate initdb" 2
- fi
- # Make sure that script has been supplied a config file
- if [ x"" == x"$CLUSTER_CONFIG" ] && [ x"" == x"$INPUT_CONFIG" ] ; then
- ERROR_EXIT "[FATAL]:-Greenplum configuration filename [-c] option or [-I] option not provided." 2
- fi
-
- if [ x"" != x"$CLUSTER_CONFIG" ] ; then
- # Check that we have a non-zero configuration file
- CHK_FILE $CLUSTER_CONFIG
-
- if [ $EXISTS -ne 0 ]; then
- ERROR_EXIT "[FATAL]:-Configuration file $CLUSTER_CONFIG does not exist." 2
- fi
-
- # Make sure old CLUSTER_CONFIG settings are not hanging around.
- unset PORT_BASE SEG_PREFIX DATA_DIRECTORY REPLICATION_PORT_BASE
-
- # Make sure it is not a dos file with CTRL M at end of each line
- $CAT $CLUSTER_CONFIG|$SED -e 's/^M$//g' > $TMP_FILE
- $MV $TMP_FILE $CLUSTER_CONFIG
- LOG_MSG "[INFO]:-Dumping $CLUSTER_CONFIG to logfile for reference"
- $CAT $CLUSTER_CONFIG|$GREP -v "^#" >> $LOG_FILE
- LOG_MSG "[INFO]:-Completed $CLUSTER_CONFIG dump to logfile"
- # Source the cluster configuration file
- LOG_MSG "[INFO]:-Reading Greenplum configuration file $CLUSTER_CONFIG" 1
- . $CLUSTER_CONFIG
-
- if [ x"" != x"$QD_PRIMARY_ARRAY" ] ; then
- ERROR_EXIT "[FATAL]:-Cannot specify QD_PRIMARY_ARRAY in '-c <config file>'. Only valid with '-I <input file>'" 2
- fi
-
- if [ x"" = x"$PORT_BASE" ]; then
- ERROR_EXIT "[FATAL]:-PORT_BASE not specified in $CLUSTER_CONFIG file, is this the correct instance configuration file." 2
- fi
-
- if [ $MIRROR_PORT_BASE ]; then
- LOG_MSG "[WARNING]:-Should not specify MIRRORING configuration for HAWQ."
- fi
-
- else
- LOG_MSG "[INFO]:-Reading Greenplum input configuration file $INPUT_CONFIG"
- READ_INPUT_CONFIG
- fi
-
- if [ x"" != x"$REQ_LOCALE_SETTING" ];then LOCALE_SETTING=$REQ_LOCALE_SETTING;fi
-
-
- if [ x"" == x"$INPUT_CONFIG" ] ; then
- if [ x"" != x"$MACHINE_LIST_FILE_ALT" ];then
- MACHINE_LIST_FILE=$MACHINE_LIST_FILE_ALT
- fi
- # Now check to see if MACHINE_LIST_FILE is still empty
- if [ x"" == x"$MACHINE_LIST_FILE" ];then
- LOG_MSG "[FATAL]:-MACHINE_LIST_FILE variable is unset, and -h option not supplied" 1
- ERROR_EXIT "[FATAL]:-Unable to continue" 2
- fi
- fi
-
- # Check for required definitions in the CLUSTER_CONFIG file
- if [ x"" = x"$SEG_PREFIX" ]; then
- ERROR_EXIT "[FATAL]:-SEG_PREFIX not specified in $CLUSTER_CONFIG file, is this the correct instance configuration file." 2
- fi
- if [ x"" = x"$DATA_DIRECTORY" ]; then
- ERROR_EXIT "[FATAL]:-DATA_DIRECTORY not specified in $CLUSTER_CONFIG file, is this the correct instance configuration file." 2
- fi
- if [ x"" == x"$LOCALE_SETTING" ];then
- LOG_MSG "[INFO]:-Locale has not been set in $CLUSTER_CONFIG, will set to default value" 1
- LOCALE_SETTING=$DEFAULT_LOCALE_SETTING
- # Now check to see if the system has this locale available
- CHK_LOCALE_KNOWN
- LOG_MSG "[INFO]:-Locale set to $DEFAULT_LOCALE_SETTING" 1
- else
- LOG_MSG "[INFO]:-Locale set to $LOCALE_SETTING"
- CHK_LOCALE_KNOWN
- fi
- LOG_MSG "[INFO]:-Dump current system locale to log file"
- $LOCALE >> $LOG_FILE
- LOG_MSG "[INFO]:-End of system locale dump"
- if [ ! -f $GPCREATESEG ];then
- ERROR_EXIT "[FATAL]:-No $GPCREATESEG exists" 2
- fi
-
- if [ x"" == x"$INPUT_CONFIG" ] ; then
- # Check the other files that are required to be able to continue
- FILE_LIST=($MACHINE_LIST_FILE)
- for FILE in "${FILE_LIST[@]}"
- do
- CHK_FILE $FILE
- if [ $EXISTS -ne 0 ]; then
- ERROR_EXIT "[FATAL]:-Problem with $FILE file" 2
- else
- LOG_MSG "[INFO]:-Completed check of file $FILE"
- fi
- done
- CHK_DUPLICATES
- # Set up the machine list array
- LOG_MSG "[INFO]:-Setting up segment instance list array"
- declare -a MACHINE_LIST=(`$CAT $MACHINE_LIST_FILE|$SORT`)
- fi
- # Now process contents for the configuration file
- if [ ! "$ARRAY_NAME" ]; then
- LOG_MSG "[WARN]:-ARRAY_NAME variable not set, will provide default value" 1
- EXIT_STATUS=1
- ARRAY_NAME="Greenplum Instance"
- fi
- export ARRAY_NAME
-
- if [ x"" = x"$MASTER_HOSTNAME" ]; then
- ERROR_EXIT "[FATAL]:MASTER_HOSTNAME variable not set" 2
- fi
-
- # Make sure that this script is running on the QD host
- if [ $MASTER_HOSTNAME != `$HOSTNAME` ]; then
- LOG_MSG "[WARN]:-Master hostname $MASTER_HOSTNAME does not match hostname output" 1
- LOG_MSG "[INFO]:-Checking to see if $MASTER_HOSTNAME can be resolved on this host" 1
- $TRUSTED_SHELL $MASTER_HOSTNAME "$TOUCH $TMP_HOSTNAME_FILE"
- if [ -f $TMP_HOSTNAME_FILE ];then
- LOG_MSG "[INFO]:-Can resolve $MASTER_HOSTNAME to this host" 1
- $RM -f $TMP_HOSTNAME_FILE
- else
- $TRUSTED_SHELL $MASTER_HOSTNAME "$RM -f $TMP_HOSTNAME_FILE"
- LOG_MSG "[FATAL]:-Master hostname in configuration file is ${MASTER_HOSTNAME}" 1
- LOG_MSG "[FATAL]:-Operating system command returns `$HOSTNAME`" 1
- LOG_MSG "[FATAL]:-Unable to resolve $MASTER_HOSTNAME on this host" 1
- ERROR_EXIT "[FATAL]:-Master hostname in gpinitsystem configuration file must be $MASTER_HOSTNAME" 2
- fi
- fi
-
-
- # Deal with issue of mixed case segment names being created in all lower case on tails but mixed
- # case in head system tables.
- LWR_CASE_SEG_PREFIX=`$ECHO $SEG_PREFIX|$TR '[A-Z]' '[a-z]'`
- SEG_PREFIX=$LWR_CASE_SEG_PREFIX
-
- # MASTER_PORT
- if [ x"" = x"$MASTER_PORT" ]; then
- ERROR_EXIT "[FATAL]:-MASTER_PORT variable not set" 2
- fi
-
- # DATA_DIRECTORY
- ((QE_PRIMARY_COUNT=${#DATA_DIRECTORY[@]}))
- if [ $QE_PRIMARY_COUNT -eq 0 ]; then
- ERROR_EXIT "[FATAL]:-Number of primary directories 0 (zero)" 2
- fi
-
- # MASTER_DIRECTORY
- if [ x"" = x"$MASTER_DIRECTORY" ]; then
- ERROR_EXIT "[FATAL]:-MASTER_DIRECTORY variable not set" 2
- fi
- # Check that we have write access to the proposed master data directory
- W_DIR=$MASTER_DIRECTORY
- LOG_MSG "[INFO]:-Checking write access to $W_DIR on master host"
- $TOUCH ${W_DIR}/tmp_file_test
- RETVAL=$?
- if [ $RETVAL -ne 0 ];then
- ERROR_EXIT "[FATAL]:-Cannot write to $W_DIR on master host " 2
- else
- $RM -f ${W_DIR}/tmp_file_test
- LOG_MSG "[INFO]:-Write test passed $W_DIR directory on master host"
- fi
- # Check that the master segment directory does not exist
- if [ -d ${MASTER_DIRECTORY}/${SEG_PREFIX}-1 ];then
- ERROR_EXIT "[FATAL]:-Master host data directory ${MASTER_DIRECTORY}/${SEG_PREFIX}-1 already exists" 2
- fi
- # DATABASE_PREFIX
- if [ x"" = x"$DATABASE_NAME" ]; then
- LOG_MSG "[INFO]:-No DATABASE_NAME set, will exit following $DEFAULTDB updates" 1
- else
- LOG_MSG "[INFO]:-Will create database $DATABASE_NAME"
- fi
- if [ ! $TRUSTED_SHELL ]; then
- ERROR_EXIT "[FATAL]:-TRUSTED_SHELL variable not set" 2
- fi
- # CHECK_POINT_SEGMENTS
- if [ ! $CHECK_POINT_SEGMENTS ]; then
- LOG_MSG "[WARN]:-CHECK_POINT_SEGMENTS variable not set, will set to default value" 1
- CHECK_POINT_SEGMENTS=$DEFAULT_CHK_PT_SEG
- fi
- # KERBEROS_KEYFILE
- if [ ! $KERBEROS_KEYFILE ]; then
- LOG_MSG "[INFO]:-KERBEROS_KEYFILE variable not set, will set to default value" 1
- KERBEROS_KEYFILE=$DEFAULT_DFS_KERBEROS_KEYFILE
- fi
- # ENABLE_SECURE_FILESYSTEM
- if [ ! $ENABLE_SECURE_FILESYSTEM ]; then
- LOG_MSG "[INFO]:-ENABLE_SECURE_FILESYSTEM variable not set, will set to default value" 1
- ENABLE_SECURE_FILESYSTEM=off
- fi
- # ENCODING
- if [ ! $ENCODING ]; then
- LOG_MSG "[WARN]:-ENCODING variable not set, will set to default UTF-8" 1
- EXIT_STATUS=1
- ENCODING="UTF-8"
- fi
- if [ x"SQL_ASCII" = x"$ENCODING" ]; then
- ERROR_EXIT "[FATAL]:-SQL_ASCII is no longer supported as a server encoding" 2
- fi
- # MASTER_MAX_CONNECT
- if [ ! $MASTER_MAX_CONNECT ];then
- LOG_MSG "[INFO]:-MASTER_MAX_CONNECT not set, will set to default value $DEFAULT_QD_MAX_CONNECT" 1
- MASTER_MAX_CONNECT=$DEFAULT_QD_MAX_CONNECT
- else
- if [ $MASTER_MAX_CONNECT -lt 1 ];then
- ERROR_EXIT "[FATAL]:-MASTER_MAX_CONNECT less than 1" 2
- fi
- fi
- ((QE_MAX_CONNECT=$MASTER_MAX_CONNECT*$QE_CONNECT_FACTOR))
- LOG_MSG "[INFO]:-Setting segment instance MAX_CONNECTIONS to $QD_MAX_CONNECT"
- if [ x"" == x"$NEW_BUFFERS" ];then
- MASTER_SHARED_BUFFERS=$DEFAULT_BUFFERS
- QE_SHARED_BUFFERS=$DEFAULT_BUFFERS
- else
- # removed code that forced at least 1000 buffers -kh 3/14/07
- MASTER_SHARED_BUFFERS=$NEW_BUFFERS
- QE_SHARED_BUFFERS=$NEW_BUFFERS
- LOG_MSG "[INFO]:-Set shared buffers to $NEW_BUFFERS"
- fi
- # Number of QE hosts in this configuration
- ((NUM_QES=${#MACHINE_LIST[*]}))
- if [ $NUM_QES -eq 0 ]; then
- ERROR_EXIT "[FATAL]:-Number of Segment instances's 0 (zero)" 2
- else
- LOG_MSG "[INFO]:-Number of segment instance hosts = $NUM_QES"
- if [ $NUM_QES -eq 1 ];then
- # This is a single host array, re-tune the BATCH_DEFAULT to 4
- if [ $BATCH_DEFAULT -gt $SINGLE_HOST_BATCH_LIMIT ];then
- LOG_MSG "[INFO]:-Detected a single host GPDB array build, reducing value of BATCH_DEFAULT from $BATCH_DEFAULT to $SINGLE_HOST_BATCH_LIMIT" 1
- BATCH_DEFAULT=$SINGLE_HOST_BATCH_LIMIT
- fi
- fi
- fi
-
- # Temp directory
- ((NUM_TEMP_DIR=${#TEMP_DIRECTORY[*]}))
- TEMP_DIRECTORY_LIST=(`$ECHO ${TEMP_DIRECTORY[@]} | $TR ' ' '\n' | $SORT -u | $TR '\n' ' '`)
- ((NUM_DEDUP_TEMP_DIR=${#TEMP_DIRECTORY_LIST[*]}))
- if [ $NUM_TEMP_DIR -eq 0 ]; then
- LOG_MSG "[INFO]:-Use the default temp directory" 1
- elif [ "$NUM_TEMP_DIR" -ne "$NUM_DEDUP_TEMP_DIR" ]; then
- LOG_MSG "[INFO]:-Use the user specified temp directory" 1
- LOG_MSG "[WARN]:-One or more temp directories are same" 1
- else
- LOG_MSG "[INFO]:-Use the user specified temp directory" 1
- fi
-
- # Mirror configuration
- if [ $MIRRORING -eq 1 ]; then
- ((NUM_MIRROR_DIRECTORY=${#MIRROR_DATA_DIRECTORY[@]}))
- if [ x"" == x"$INPUT_CONFIG" ] ; then
- if [ $NUM_MIRROR_DIRECTORY -ne $QE_PRIMARY_COUNT ]; then
- ERROR_EXIT "[FATAL]:-Number of primary directories does not match number of mirror directories" 2
- fi
- fi
-
- for dir in "${DATA_DIRECTORY[@]}"
- do
- for mirrordir in "${MIRROR_DATA_DIRECTORY[@]}"
- do
- if [ $dir == $mirrordir ] ; then
- ERROR_EXIT "[FATAL]:-Conflict between data directory and mirror data directory ($dir)." 2
- fi
- done
- done
- fi
-
- # Check open files value
- MASTER_OPEN=`ulimit -n`
- if [ $MASTER_OPEN -lt $OS_OPENFILES ];then
- LOG_MSG "[WARN]:-Master open file limit is $MASTER_OPEN should be >= $OS_OPENFILES" 1
- EXIT_STATUS=1
- ULIMIT_WARN=1
- fi
- # Get IP address of the master host
-
- MASTER_IP_ADDRESS_ALL=(`$IFCONFIG $IFCONFIG_TXT |$GREP "inet "|$GREP -v "127.0.0"|$AWK '{print $2}'|$CUT -d: -f2`)
- ERROR_CHK $? "obtain IP address of Master host" 2
- MASTER_IPV6_LOCAL_ADDRESS_ALL=(`$IPV6_ADDR_LIST_CMD |$GREP inet6|$AWK '{print $2}' |$CUT -d'/' -f1`)
- MASTER_IP_ADDRESS=(`$ECHO ${MASTER_IP_ADDRESS_ALL[@]} ${MASTER_IPV6_LOCAL_ADDRESS_ALL[@]}|$TR ' ' '\n'|$SORT -u|$TR '\n' ' '`)
- LOG_MSG "[INFO]:-Master IP address array = ${MASTER_IP_ADDRESS[@]}"
- if [ x"" != x"$STANDBY_HOSTNAME" ];then
- STANDBY_IP_ADDRESS_ALL=(`$TRUSTED_SHELL $STANDBY_HOSTNAME "$IFCONFIG $IFCONFIG_TXT |$GREP \"inet \"|$GREP -v \"127.0.0\"|$AWK '{print \\$2}'|$CUT -d: -f2" 2>>$LOG_FILE`)
- STANDBY_IPV6_ADDRESS_ALL=(`$TRUSTED_SHELL $STANDBY_HOSTNAME "$IPV6_ADDR_LIST_CMD |$GREP inet6|$AWK '{print \\$2}' |$CUT -d'/' -f1" 2>>$LOG_FILE`)
- STANDBY_IP_ADDRESS=(`$ECHO ${STANDBY_IP_ADDRESS_ALL[@]} ${STANDBY_IPV6_ADDRESS_ALL[@]}|$TR ' ' '\n'|$SORT -u|$TR '\n' ' '`)
- ERROR_CHK $? "obtain IP address of standby Master host" 2
- LOG_MSG "[INFO]:-Standby IP address array = ${STANDBY_IP_ADDRESS[@]}"
- #Check open files value
- STANDBY_MASTER_OPEN=`$TRUSTED_SHELL $STANDBY_HOSTNAME "ulimit -n"`
- if [ $STANDBY_MASTER_OPEN -lt $OS_OPENFILES ];then
- LOG_MSG "[WARN]:-Standby Master open file limit is $STANDBY_MASTER_OPEN should be >= $OS_OPENFILES" 1
- EXIT_STATUS=1
- ULIMIT_WARN=1
- fi
- fi
-
- # Validate that the different locale settings are available of the system
- # Note: This check is performed on the master only. There is an assumption
- # being made that the locales available on the master are available on the
- # segment hosts.
- if [ x"" != x"$REQ_LOCALE_SETTING" ]; then
- IN_ARRAY $REQ_LOCALE_SETTING "`locale -a`"
- if [ $? -eq 0 ]; then
- ERROR_EXIT "[FATAL]-Value $REQ_LOCALE_SETTING is not a valid value for --locale on this system." 2
- fi
- fi
- if [ x"" != x"$LCCOLLATE" ]; then
- IN_ARRAY $LCCOLLATE "`locale -a`"
- if [ $? -eq 0 ]; then
- ERROR_EXIT "[FATAL]-Value $LCCOLLATE is not a valid value for --lc-collate on this system." 2
- fi
- fi
- if [ x"" != x"$LCCTYPE" ]; then
- IN_ARRAY $LCCTYPE "`locale -a`"
- if [ $? -eq 0 ]; then
- ERROR_EXIT "[FATAL]-Value $LCCTYPE is not a valid value for --lc-ctype on this system." 2
- fi
- fi
- if [ x"" != x"$LCMESSAGES" ]; then
- IN_ARRAY $LCMESSAGES "`locale -a`"
- if [ $? -eq 0 ]; then
- ERROR_EXIT "[FATAL]-Value $LCMESSAGES is not a valid value for --lc-messages on this system." 2
- fi
- fi
-
- if [ x"" != x"$LCMONETARY" ]; then
- IN_ARRAY $LCMONETARY "`locale -a`"
- if [ $? -eq 0 ]; then
- ERROR_EXIT "[FATAL]-Value $LCMONETARY is not a valid value for --lc-monetary on this system." 2
- fi
- fi
-
- if [ x"" != x"$LCNUMERIC" ]; then
- IN_ARRAY $LCNUMERIC "`locale -a`"
- if [ $? -eq 0 ]; then
- ERROR_EXIT "[FATAL]-Value $LCNUMERIC is not a valid value for --lc-numeric on this system." 2
- fi
- fi
-
- if [ x"" != x"$LCTIME" ]; then
- IN_ARRAY $LCTIME "`locale -a`"
- if [ $? -eq 0 ]; then
- ERROR_EXIT "[FATAL]-Value $LCTIME is not a valid value for --lc-time on this system." 2
- fi
- fi
- LOG_MSG "[INFO]:-Checking configuration parameters, Completed" 1
- LOG_MSG "[INFO]:-End Function $FUNCNAME"
-}
-
-#*****************************************************
-# Hdfs Check Error Code
-#*****************************************************
-
-GPCHKHDFS_ERR=1
-CONNECT_ERR=100
-OPENFILE_ERR=101
-WRITEFILE_ERR=102
-FLUSH_ERR=103
-DELETE_ERR=104
-DFSDIR_ERR=105
-GETTOKEN_ERR=106
-KRBLOGIN_ERR=107
-CHK_HDFS() {
- LOG_MSG "[INFO]:-Start Function $FUNCNAME"
- LOG_MSG "[INFO]:-Checking Hdfs is available,please wait..." 1
- $GP_CHECK_HDFS $DFS_NAME $DFS_URL $ENABLE_SECURE_FILESYSTEM $KERBEROS_KEYFILE
- RESULT=$?
- if [ $RESULT -ne 0 ] ;then
- LOG_MSG "[FATAL]:-gpcheckhdfs Command: 'gpcheckhdfs $DFS_NAME $DFS_URL $ENABLE_SECURE_FILESYSTEM $KERBEROS_KEYFILE'" 1
- LOG_MSG "[FATAL]:-There may be some problems in Hdfs Or Krb Authorized..Please Check Error($RESULT).." 1
- else
- LOG_MSG "[INFO]:-Checking Hdfs Successfully" 1
- fi
-
- if [ $RESULT -eq $GPCHKHDFS_ERR ] ;then
- LOG_MSG "[FATAL]:-Command: $GP_CHECK_HDFS $DFS_NAME $DFS_URL $ENABLE_SECURE_FILESYSTEM $KERBEROS_KEYFILE" 1
- ERROR_EXIT "[FATAL]:Unable to check hdfs" 2
- elif [ $RESULT -eq $CONNECT_ERR ] ;then
- LOG_MSG "[FATAL]:-Failed to Connected to $DFS_NAME://$DFS_URL" 1
- ERROR_EXIT "[FATAL]:Please Check your hostname or port" 2
- elif [ $RESULT -eq $OPENFILE_ERR ] ;then
- ERROR_EXIT "[FATAL]:-Failed to Open or Write a File" 2
- elif [ $RESULT -eq $WRITEFILE_ERR ] ;then
- ERROR_EXIT "[FATAL]:-Failed to Write File to Hdfs" 2
- elif [ $RESULT -eq $FLUSH_ERR ] ;then
- ERROR_EXIT "[FATAL]:-Failed to Flush,Please check your Datanode" 2
- elif [ $RESULT -eq $DELETE_ERR ] ;then
- ERROR_EXIT "[FATAL]:-Failed to Delete a File..." 2
- elif [ $RESULT -eq $DFSDIR_ERR ] ;then
- ERROR_EXIT "[FATAL]:-DFS Directory Error" 2
- elif [ $RESULT -eq $GETTOKEN_ERR ] ;then
- ERROR_EXIT "[FATAL]:-Get Delegation Token Error" 2
- elif [ $RESULT -eq $KRBLOGIN_ERR ] ;then
- ERROR_EXIT "[FATAL]:-Failed to login to Kerberos,Please check your keytab file or kdc" 2
- fi
-}
-
-CHK_MULTI_HOME () {
- LOG_MSG "[INFO]:-Start Function $FUNCNAME"
- LOG_MSG "[INFO]:-Commencing multi-home checks, please wait..." 1
-
- # For MPP-12043 we need a way to sort that we can't get with the sort command
- # so we'll use a little command line python script.
-
-read -d '' HOSTNAME_SORTER <<"END_PYTHON_CODE"
-import sys, re
-
-standard_host_re = re.compile(r'([a-zA-Z]+)((\\d+)(-(\\d+))?)')
-
-def standard_host_cmp(h1, h2):
- m1 = standard_host_re.match(h1)
- m2 = standard_host_re.match(h2)
-
- if not m1 or not m2:
- if h1 == h2:
- return 0
- elif h1 < h2:
- return -1
- else:
- return 1
- else:
- if m1.group(1) <= m2.group(1):
- if not int(m1.group(3)) == int(m2.group(3)):
- return int(m1.group(3)) - int(m2.group(3))
- elif m1.group(5):
- return int(m1.group(5)) - int(m2.group(5))
- else:
- return 0
- else:
- return 1
-
-hl = sys.stdin.readlines()
-for h in sorted(hl,cmp=standard_host_cmp):
- print h,
-END_PYTHON_CODE
-
- MACHINE_LIST=(`$CAT $MACHINE_LIST_FILE|$PYTHON -c "$HOSTNAME_SORTER"`)
- M_HOST_ARRAY=()
- MCOUNT=0
- for MHOST in ${MACHINE_LIST[@]}
- do
- if [ $DEBUG_LEVEL -eq 0 ] && [ x"" != x"$VERBOSE" ];then $NOLINE_ECHO ".\c";fi
- PING_HOST $MHOST
- SEG_HOSTNAME=`$TRUSTED_SHELL $MHOST "$HOSTNAME"`
- if [ $? -ne 0 ];then
- LOG_MSG "[FATAL]:-Remote command to host $MHOST failed to get value of hostname"
- $ECHO "[FATAL]:-Remote command to host $MHOST failed to get value of hostname"
- LOG_MSG "[FATAL]:-Check to see that you have setup trusted remote ssh on all hosts"
- $ECHO "[FATAL]:-Check to see that you have setup trusted remote ssh on all hosts"
-
- ERROR_EXIT "[FATAL]:-Unable to get hostname output for $MHOST" 2
- fi
- if [ `$ECHO ${T_SEG_ARRAY[@]}|$TR ' ' '\n'|$GREP -c "^${SEG_HOSTNAME}$"` -eq 0 ];then
- T_SEG_ARRAY=(${T_SEG_ARRAY[@]} $SEG_HOSTNAME)
- fi
- T_HOST_ARRAY=(${T_HOST_ARRAY[@]} ${MHOST}~$SEG_HOSTNAME)
- ((MCOUNT=$MCOUNT+1))
- done
- if [ $DEBUG_LEVEL -eq 0 ] && [ x"" != x"$VERBOSE" ];then $ECHO;fi
- # Now sort the array to ensure that all SEG_HOSTNAME values together
- for S_HOST in ${T_SEG_ARRAY[@]}
- do
- M_HOST_ARRAY=(${M_HOST_ARRAY[@]} `$ECHO ${T_HOST_ARRAY[@]}|$TR ' ' '\n'|$GREP "~${S_HOST}$"|$TR '\n' ' '`)
- done
- . $CLUSTER_CONFIG
- NUM_DATADIR=${#DATA_DIRECTORY[@]}
- if [ `$ECHO ${M_HOST_ARRAY[@]}|$TR ' ' '\n'|$AWK -F"~" '{print $2}'|$SORT -u|$WC -l` -ne $MCOUNT ];then
- LOG_MSG "[INFO]:-Configuring build for multi-home array" 1
- MULTI_HOME=1
- # Now make sure that we have same number of unique hostnames as there are data directories declared
- HOST1=`$ECHO ${M_HOST_ARRAY[0]}|$AWK -F"~" '{print $2}'`
- NUM_MHOST_NODE=`$ECHO ${M_HOST_ARRAY[@]}|$TR ' ' '\n'|$GREP "~${HOST1}$"|$WC -l`
-
- ((REMAINDER=$NUM_DATADIR % $NUM_MHOST_NODE))
- ((MULTIPLE=$NUM_DATADIR / $NUM_MHOST_NODE))
- if [ $REMAINDER -ne 0 ] || [ $MULTIPLE -eq 0 ] ;then
- LOG_MSG "[FATAL]:-Inconsistency between number of multi-home hostnames and number of segments per host" 1
- LOG_MSG "[INFO]:-Have $NUM_DATADIR data directories and $NUM_MHOST_NODE multi-home hostnames for each host" 1
- LOG_MSG "[INFO]:-For multi-home configuration, number of segment instance data directories per host must be multiple of" 1
- LOG_MSG "[INFO]:-the number of multi-home hostnames within the GPDB array" 1
- ERROR_EXIT "[FATAL]:-Unable to continue" 2
- fi
-
- if [ $MULTIPLE -gt 1 ] ; then
- # Now need to increase the size of M_HOME_ARRAY to fake a multi-home array
- HOME_COUNT=1
- FAKE_M_HOST_ARRAY=(${M_HOST_ARRAY[@]})
- while [ $HOME_COUNT -lt $MULTIPLE ]
- do
- FAKE_M_HOST_ARRAY=(${FAKE_M_HOST_ARRAY[@]} ${M_HOST_ARRAY[@]})
- ((HOME_COUNT=$HOME_COUNT+1))
- done
- M_HOST_ARRAY=()
- for S_HOST in ${T_SEG_ARRAY[@]}
- do
- M_HOST_ARRAY=(${M_HOST_ARRAY[@]} `$ECHO ${FAKE_M_HOST_ARRAY[@]}|$TR ' ' '\n'|$GREP "~${S_HOST}$"| $SORT | $TR '\n' ' '`)
- done
- fi
- else
- LOG_MSG "[INFO]:-Configuring build for standard array" 1
- MULTI_HOME=0
- # Now need to increase the size of M_HOME_ARRAY to fake a multi-home array
- HOME_COUNT=1
- FAKE_M_HOST_ARRAY=(${M_HOST_ARRAY[@]})
- while [ $HOME_COUNT -lt $NUM_DATADIR ]
- do
- FAKE_M_HOST_ARRAY=(${FAKE_M_HOST_ARRAY[@]} ${M_HOST_ARRAY[@]})
- ((HOME_COUNT=$HOME_COUNT+1))
- done
- M_HOST_ARRAY=(`$ECHO ${FAKE_M_HOST_ARRAY[@]}|$TR ' ' '\n'|$SORT |$TR '\n' ' '`)
- fi
- NUM_SEP_HOSTS=`$ECHO ${M_HOST_ARRAY[@]}|$TR ' ' '\n'|$AWK -F"~" '{print $2}'|$SORT -u|$WC -l`
- if [ $MIRRORING -eq 1 ] && [ $MIRROR_TYPE -eq 1 ];then
- # Mirroring is on and spread mirror configuration has been requested, make sure sufficient hosts
- if [ $NUM_SEP_HOSTS -eq 1 ] || [ $NUM_SEP_HOSTS -le $NUM_DATADIR ];then
- LOG_MSG "[FATAL]:-Request made for spread mirroring via -S option, but insufficient hosts available" 1
- LOG_MSG "[INFO]:-Number of separate hosts must be greater than number of segment instances per host" 1
- ERROR_EXIT "[FATAL]:-Unable to continue" 2
- else
- LOG_MSG "[INFO]:-Sufficient hosts for spread mirroring request" 1
- fi
- fi
- if [ $MIRRORING -eq 0 ] && [ $MIRROR_TYPE -eq 1 ];then
- LOG_MSG "[WARN]:-Option -S supplied, but no mirrors have been defined, ignoring -S option" 1
- MIRROR_TYPE=0
- EXIT_STATUS=1
- fi
- LOG_MSG "[INFO]:-Commencing multi-home checks, Completed" 1
- LOG_MSG "[INFO]:-End Function $FUNCNAME"
-}
-
-
-CHK_LOCALE_KNOWN () {
- LOG_MSG "[INFO]:-Start Function $FUNCNAME"
- if [ $# -eq 0 ];then
- if [ `$LOCALE -a |$GREP -ic $LOCALE_SETTING` -eq 0 ];then
- LOG_MSG "[INFO]:-Master host available locale values"
- $LOCALE -a >> $LOG_FILE
- LOG_MSG "[FATAL]:-Unable to locate locale value $LOCALE_SETTING on this host" 1
- LOG_MSG "[INFO]:-Select another locale value via -n option to this utility" 1
- LOG_MSG "[INFO]:-Avaliable locale values have been dumped to the log file" 1
- ERROR_EXIT "[FATAL]:-Unable to continue" 2
- else
- LOG_MSG "[INFO]:-Locale check passed on this host"
- fi
- else
- # Hostname has been passed so check remote locale value
- if [ `$TRUSTED_SHELL $1 "$LOCALE -a|$GREP -ic '$LOCALE_SETTING'"` -eq 0 ];then
- LOG_MSG "[INFO]:-Host $1 available locale values"
- `$TRUSTED_SHELL $1 "$LOCALE -a"` >> $LOG_FILE
- LOG_MSG "[FATAL]:-Unable to locate locale value $LOCALE_SETTING on $1" 1
- LOG_MSG "[INFO]:-Select another locale value via -n option to this utility" 1
- LOG_MSG "[INFO]:-Avaliable locale values have been dumped to the log file" 1
- ERROR_EXIT "[FATAL]:-Unable to continue" 2
- else
- LOG_MSG "[INFO]:-Locale check passed on $1"
- fi
- fi
- LOG_MSG "[INFO]:-End Function $FUNCNAME"
-}
-
-CHK_DUPLICATES () {
- LOG_MSG "[INFO]:-Start Function $FUNCNAME"
- if [ `$ECHO | $CAT $MACHINE_LIST_FILE - |$GREP -v $"^$"|$WC -l` -ne `$CAT $MACHINE_LIST_FILE|$GREP -v "^$"|$SORT -u|$WC -l` ]
- then
- ERROR_EXIT "[FATAL]:-Duplicate segment instance hostname exists in $MACHINE_LIST_FILE" 2
- fi
- LOG_MSG "[INFO]:-No duplicate segment instance hostnames found, will proceed"
- LOG_MSG "[INFO]:-End Function $FUNCNAME"
-}
-
-CHK_OPEN_FILES () {
- LOG_MSG "[INFO]:-Start Function $FUNCNAME"
- LOG_MSG "[INFO]:-Checking $1"
- OPEN_VALUE=`$TRUSTED_SHELL $1 "ulimit -n"`
- if [ $OPEN_VALUE -lt $OS_OPENFILES ];then
- LOG_MSG "[WARN]:-Host $1 open files limit is $OPEN_VALUE should be >= $OS_OPENFILES" 1
- EXIT_STATUS=1
- ULIMIT_WARN=1
- fi
- LOG_MSG "[INFO]:-End Function $FUNCNAME"
-}
-
-# HAWQ needs DFS_NAME set.
-CHK_DFS_NAME () {
- LOG_MSG "[INFO]:-Start Function $FUNCNAME"
- LOG_MSG "[INFO]:-Checking DFS name"
- if [ x"" == x"$DFS_NAME" ]; then
- ERROR_EXIT "[FATAL]:-Issue with DFS_NAME: not set" 2
- fi
-
- name=$(echo $DFS_NAME | awk '{print toupper($0)}')
- if [ x"$name" != x"HDFS" ]; then
- ERROR_EXIT "[FATAL]:-Issue with DFS_NAME: only \"HDFS\" is supported" 2
- fi
- LOG_MSG "[INFO]:-End Function $FUNCNAME"
-}
-
-# HAWQ needs dfs url.
-CHK_DFS_URL () {
- LOG_MSG "[INFO]:-Start Function $FUNCNAME"
- LOG_MSG "[INFO]:-Checking DFS url"
-
- if [ x"" == x"$DFS_URL" ]; then
- ERROR_EXIT "[FATAL]:-Issue with DFS_URL: not set" 2
- fi
-
- # Don't need the protocol part in the URL.
- if `$ECHO "$DFS_URL" | $GREP -q '^[a-zA-Z]*://'`; then
- ERROR_EXIT "[FATAL]:-Issue with DFS_URL: protocol in the dfs url should not be specified" 2
- fi
-
- dfs_host=`$ECHO "$DFS_URL" | $SED 's,[:/].*,,'`
- # do not ping host because it may be configured using namenode HA
- #PING_HOST "$dfs_host"
-
- # We must create the filespace in a relative path
- #DFS_URL="$DFS_URL"/hawq_data
- #export DFS_URL
-
- LOG_MSG "[INFO]:-End Function $FUNCNAME"
-}
-
-# HAWQ supports multiple disks for pg_temp.
-CHK_TEMP_DIR () {
- LOG_MSG "[INFO]:-Start Function $FUNCNAME"
- LOG_MSG "[INFO]:-Checking temp directories" 1
-
- ((num=${#TEMP_DIRECTORY_LIST[*]}))
- # Remove and warn the duplicate temp directories.
- for dir in "${TEMP_DIRECTORY_LIST[@]}"
- do
- # If there is no STANDBY_HOSTNAME, it will not check it here.
- for machine in "${MACHINE_LIST[@]}" "$MASTER_HOSTNAME" $STANDBY_HOSTNAME
- do
- LOG_MSG "[INFO]:-Check directory: $dir on machine $machine" 1
- CHK_DIR "$dir" "$machine"
- CHK_WRITE_DIR "$dir" "$machine"
- done
- done
-
- LOG_MSG "[INFO]:-End Function $FUNCNAME"
-}
-
-CHK_QES () {
- LOG_MSG "[INFO]:-Start Function $FUNCNAME"
- LOG_MSG "[INFO]:-Checking Master host" 1
- SET_VAR $QD_PRIMARY_ARRAY
- CHK_DIR $GP_DIR
- if [ $EXISTS -eq 0 ]; then
- ERROR_EXIT "[FATAL]:-Master directory $GP_DIR already exists" 2
- fi
- GET_PG_PID_ACTIVE $GP_PORT
- if [ $PID -ne 0 ];then
- ERROR_EXIT "[FATAL]:-Found indication of postmaster process on port $GP_PORT on Master host" 2
- fi
- LOG_MSG "[INFO]:-Checking new segment hosts, please wait..." 1
-
- CHECK_LIST=(`$ECHO ${MACHINE_LIST[@]} | $TR ' ' '\n' | $GREP -v "${HOSTNAME}\$" | $SORT | $TR '\n' ' '`)
- for QE_ID in ${CHECK_LIST[@]}
- do
- CHK_LOCALE_KNOWN $QE_ID
- CHK_OPEN_FILES $GP_HOSTADDRESS
- POSTGRES_VERSION_CHK $GP_HOSTADDRESS
- if [ $VERSION_MATCH -ne 1 ] ; then
- ERROR_EXIT "Postgres version does not match" 2
- fi
-
- # Check to ensure have required QE directories
- for DIR in "${DATA_DIRECTORY[@]}"
- do
- if [ $DEBUG_LEVEL -eq 0 ] && [ x"" != x"$VERBOSE" ];then $NOLINE_ECHO ".\c";fi
- LOG_MSG "[INFO]:-Checking segment instance $QE_ID directory $DIR"
- CHK_DIR $DIR $QE_ID
- if [ $EXISTS -ne 0 ]; then
- ERROR_EXIT "[FATAL]:-No $DIR on segment instance $QE_ID" 2
- else
- LOG_MSG "[INFO]:-$QE_ID $DIR checked"
- fi
- done
- # Check that we have a GP_LIBRARY_PATH directory on the QE
- CHK_DIR $GP_LIBRARY_PATH $QE_ID
- if [ $EXISTS -ne 0 ]; then
- ERROR_EXIT "[FATAL]:-No $GP_LIBRARY_PATH on segment instance $QE_ID" 2
- else
- LOG_MSG "[INFO]:-Segment instance $QE_ID $GP_LIBRARY_PATH checked"
- fi
- done
-
- # Check to ensure that instance directories do not exist on hosts
- LOG_MSG "[INFO]:-Primary segment instance directory check"
- for I in "${QE_PRIMARY_ARRAY[@]}"
- do
- if [ $DEBUG_LEVEL -eq 0 ] && [ x"" != x"$VERBOSE" ];then $NOLINE_ECHO ".\c";fi
- SET_VAR $I
- LOG_MSG "[INFO]:-Checking $GP_HOSTADDRESS for dir $GP_DIR"
- CHK_DIR $GP_DIR $GP_HOSTADDRESS
- if [ $EXISTS -eq 0 ]; then
- ERROR_EXIT "[FATAL]:-Instance directory $GP_DIR exists on segment instance $GP_HOSTADDRESS" 2
- fi
- # Check that the hostname is not associated with local host
- LOG_MSG "[INFO]:-Checking $GP_HOSTADDRESS $HOSTFILE for localhost set as $GP_HOSTADDRESS"
- LOCAL_COUNT=`$TRUSTED_SHELL $GP_HOSTADDRESS "$GREP $GP_HOSTADDRESS $HOSTFILE|$GREP -c localhost"`
- if [ $LOCAL_COUNT -ne 0 ];then
- LOG_MSG "[WARN]:-----------------------------------------------------------" 1
- LOG_MSG "[WARN]:-Host $GP_HOSTADDRESS is assigned as localhost in $HOSTFILE" 1
- LOG_MSG "[WARN]:-This will cause segment->master communication failures" 1
- LOG_MSG "[WARN]:-Remove $GP_HOSTADDRESS from local host line in /etc/hosts" 1
- LOG_MSG "[WARN]:-----------------------------------------------------------" 1
- EXIT_STATUS=1
- fi
- # Check that we can write to the QE directory
- W_DIR=`$DIRNAME $GP_DIR`
- LOG_MSG "[INFO]:-Checking write access to $W_DIR on $GP_HOSTADDRESS"
- $TRUSTED_SHELL $GP_HOSTADDRESS "$TOUCH ${W_DIR}/tmp_file_test"
- RETVAL=$?
- if [ $RETVAL -ne 0 ];then
- ERROR_EXIT "[FATAL]:-Cannot write to $W_DIR on $GP_HOSTADDRESS " 2
- else
- $TRUSTED_SHELL $GP_HOSTADDRESS "$RM -f ${W_DIR}/tmp_file_test"
- LOG_MSG "[INFO]:-Write test passed on $GP_HOSTADDRESS $W_DIR directory"
- fi
-
- done
- # Check for mirror directories if mirroring configured
- if [ $MIRRORING -ne 0 ]; then
- LOG_MSG "[INFO]:-Mirror segment instance directory check"
- for I in "${QE_MIRROR_ARRAY[@]}"
- do
- if [ $DEBUG_LEVEL -eq 0 ] && [ x"" != x"$VERBOSE" ];then $NOLINE_ECHO ".\c";fi
- SET_VAR $I
- LOG_MSG "[INFO]:-Checking $GP_HOSTADDRESS for dir $GP_DIR"
- CHK_DIR $GP_DIR $GP_HOSTADDRESS
- if [ $EXISTS -eq 0 ]; then
- ERROR_EXIT "[FATAL]:-Instance directory $GP_DIR exists on segment instance $GP_HOSTADDRESS" 2
- fi
- # Check that we can write to the QE directory
- W_DIR=`$DIRNAME $GP_DIR`
- LOG_MSG "[INFO]:-Checking write access to $W_DIR on $GP_HOSTADDRESS"
- $TRUSTED_SHELL $GP_HOSTADDRESS "$TOUCH ${W_DIR}/tmp_file_test"
- RETVAL=$?
- if [ $RETVAL -ne 0 ];then
- ERROR_EXIT "[FATAL]:-Cannot write to $W_DIR on $GP_HOSTADDRESS " 2
- else
- $TRUSTED_SHELL $GP_HOSTADDRESS "$RM -f ${W_DIR}/tmp_file_test"
- LOG_MSG "[INFO]:-Write test passed on $GP_HOSTADDRESS $W_DIR directory"
- fi
- done
- else
- LOG_MSG "[INFO]:-Mirror segment instance directory check skipped, mirroring=Off"
- fi
-
- if [ x"" != x"$STANDBY_HOSTNAME" ];then
- PING_HOST $STANDBY_HOSTNAME
- CHK_LOCALE_KNOWN $STANDBY_HOSTNAME
- CHK_OPEN_FILES $GP_HOSTADDRESS
- # Check standby host directory
- CHK_DIR $MASTER_DIRECTORY $STANDBY_HOSTNAME
- if [ $EXISTS -ne 0 ];then
- ERROR_EXIT "[FATAL]:-Standby host directory $MASTER_DIRECTORY, does not exist" 2
- fi
- fi
-
- if [ $DEBUG_LEVEL -eq 0 ] && [ x"" != x"$VERBOSE" ];then $ECHO;fi
-
- LOG_MSG "[INFO]:-Checking new segment hosts, Completed" 1
- LOG_MSG "[INFO]:-End Function $FUNCNAME"
-}
-
-CHK_SEG () {
- LOG_MSG "[INFO]:-Start Function $FUNCNAME"
- SET_VAR $1
-
- # We need QE_ID to be able to re-use some functions.
- QE_ID=$GP_HOSTADDRESS
- CHK_LOCALE_KNOWN $QE_ID
- CHK_OPEN_FILES $GP_HOSTADDRESS
- POSTGRES_VERSION_CHK $GP_HOSTADDRESS
- if [ $VERSION_MATCH -ne 1 ] ; then
- ERROR_EXIT "Postgres version does not match" 2
- fi
-
- # since this is from the input config file we only check our one datadir
- #CHK_DIR $GP_DIR $QE_ID
- #if [ $EXISTS -ne 0 ]; then
- # ERROR_EXIT "[FATAL]:-No $GP_DIR on segment instance $QE_ID" 2
- #else
- # LOG_MSG "[INFO]:-$QE_ID $GP_DIR checked"
- #fi
-
- #make sure we can write to it.
- # Check that we can write to the QE directory
- W_DIR=`$DIRNAME $GP_DIR`
- LOG_MSG "[INFO]:-Checking write access to $W_DIR on $GP_HOSTADDRESS"
- $TRUSTED_SHELL $GP_HOSTADDRESS "$TOUCH ${W_DIR}/tmp_file_test"
- RETVAL=$?
- if [ $RETVAL -ne 0 ];then
- ERROR_EXIT "[FATAL]:-Cannot write to $W_DIR on $GP_HOSTADDRESS " 2
- else
- $TRUSTED_SHELL $GP_HOSTADDRESS "$RM -f ${W_DIR}/tmp_file_test"
- LOG_MSG "[INFO]:-Write test passed on $GP_HOSTADDRESS $W_DIR directory"
- fi
-
-
- # Check that we have a GP_LIBRARY_PATH directory on the QE
- CHK_DIR $GP_LIBRARY_PATH $GP_HOSTADDRESS
- if [ $EXISTS -ne 0 ]; then
- ERROR_EXIT "[FATAL]:-No $GP_LIBRARY_PATH on segment $GP_HOSTADDRESS:$GP_DIR" 2
- else
- LOG_MSG "[INFO]:-Segment instance $GP_HOSTADDRESS:$GP_DIR $GP_LIBRARY_PATH checked"
- fi
- LOG_MSG "[INFO]:-End Function $FUNCNAME"
-
-}
-
-CHK_QES_FROM_INPUTFILE () {
- LOG_MSG "[INFO]:-Start Function $FUNCNAME"
- LOG_MSG "[INFO]:-Checking Master host" 1
- SET_VAR $QD_PRIMARY_ARRAY
- CHK_DIR $GP_DIR
- if [ $EXISTS -eq 0 ]; then
- ERROR_EXIT "[FATAL]:-Master directory $GP_DIR already exists" 2
- fi
- GET_PG_PID_ACTIVE $GP_PORT
- if [ $PID -ne 0 ];then
- ERROR_EXIT "[FATAL]:-Found indication of postmaster process on port $GP_PORT on Master host" 2
- fi
- LOG_MSG "[INFO]:-Checking new segment hosts, please wait..." 1
-
- for QE in ${QE_PRIMARY_ARRAY[@]}
- do
- CHK_SEG $QE
- done
-
- for QE in ${QE_MIRROR_ARRAY[@]}
- do
- CHK_SEG $QE
- done
-
- if [ x"" != x"$STANDBY_HOSTNAME" ];then
- PING_HOST $STANDBY_HOSTNAME
- CHK_LOCALE_KNOWN $STANDBY_HOSTNAME
- CHK_OPEN_FILES $GP_HOSTADDRESS
- #Check standby host directory
- CHK_DIR $MASTER_DIRECTORY $STANDBY_HOSTNAME
- if [ $EXISTS -ne 0 ];then
- ERROR_EXIT "[FATAL]:-Standby host directory $MASTER_DIRECTORY, does not exist" 2
- fi
- fi
-
- if [ $DEBUG_LEVEL -eq 0 ] && [ x"" != x"$VERBOSE" ];then $ECHO;fi
- LOG_MSG "[INFO]:-Checking new segment hosts, Completed" 1
- LOG_MSG "[INFO]:-End Function $FUNCNAME"
-}
-
-POSTGRES_PORT_CHK () {
- LOG_MSG "[INFO]:-Start Function $FUNCNAME"
- GET_PG_PID_ACTIVE $1 $2
- if [ x"$PID" != x0 ];then
- ERROR_EXIT "[FATAL]:-Host $2 has an active database process on port = $1" 2
- fi
- LOG_MSG "[INFO]:-End Function $FUNCNAME"
-}
-
-
-CREATE_QE_ARRAY () {
- LOG_MSG "[INFO]:-Start Function $FUNCNAME"
- LOG_MSG "[INFO]:-Building primary segment instance array, please wait..." 1
- #Set up initial variables
- . $CLUSTER_CONFIG
- if [ x"" != x"$REQ_LOCALE_SETTING" ];then LOCALE_SETTING=$REQ_LOCALE_SETTING;fi
- if [ `$ECHO ${M_HOST_ARRAY[@]}|$TR ' ' '\n'|$GREP -c "~${MASTER_HOSTNAME}\$"` -gt 0 ]; then
- MASTER_LIST=(`$ECHO ${M_HOST_ARRAY[@]}|$TR ' ' '\n'|$GREP "~${MASTER_HOSTNAME}\$"`)
- M_HOST_ARRAY=(${MASTER_LIST[@]} `$ECHO ${M_HOST_ARRAY[@]}|$TR ' ' '\n'|$GREP -v "~${MASTER_HOSTNAME}\$"`)
- fi
- if [ $MIRRORING -eq 1 ] && [ $MIRROR_TYPE -eq 0 ];then
- # Move first host to other end of the array
- # Get first hostname
- HOST_MOVE=`$ECHO ${M_HOST_ARRAY[0]}|$AWK -F"~" '{print $2}'`
- M_MIR_HOST_ARRAY=(`$ECHO ${M_HOST_ARRAY[@]}|$TR ' ' '\n'|$GREP -v "~${HOST_MOVE}"|$TR '\n' ' '` `$ECHO ${M_HOST_ARRAY[@]}|$TR ' ' '\n'|$GREP "~${HOST_MOVE}"|$TR '\n' ' '`)
- fi
- DBID_COUNT=1
- CONTENT_COUNT=-1
- PORT_COUNT=0
- LAST_HOST=X
- SEG_DIR_VECTOR=0
- MIR_COUNT=-1
- QD_PRIMARY_ARRAY=${MASTER_HOSTNAME}~${MASTER_PORT}~${MASTER_DIRECTORY}/${SEG_PREFIX}${CONTENT_COUNT}~${DBID_COUNT}~${CONTENT_COUNT}~0
- ((DBID_COUNT=$DBID_COUNT+1));((CONTENT_COUNT=$CONTENT_COUNT+1))
- for QE_PAIR in ${M_HOST_ARRAY[@]}
- do
- if [ $DEBUG_LEVEL -eq 0 ] && [ x"" != x"$VERBOSE" ];then $NOLINE_ECHO ".\c";fi
- QE_NAME=`$ECHO $QE_PAIR|$AWK -F"~" '{print $1}'`
- QE_HOST=`$ECHO $QE_PAIR|$AWK -F"~" '{print $2}'`
- if [ $LAST_HOST == $QE_HOST ];then
- ((PORT_COUNT=$PORT_COUNT+1))
- else
- SEG_DIR_VECTOR=0
- PORT_COUNT=0
- fi
- ((GP_PORT=$PORT_BASE+$PORT_COUNT))
-
- if [ x"" == x"$REPLICATION_PORT_BASE" ];then
- REPL_PORT=0
- else
- ((REPL_PORT=$REPLICATION_PORT_BASE+$PORT_COUNT))
- fi
- GP_DIR=${DATA_DIRECTORY[$SEG_DIR_VECTOR]}
- QE_PRIMARY_ARRAY=(${QE_PRIMARY_ARRAY[@]} ${QE_NAME}~${GP_PORT}~${GP_DIR}/${SEG_PREFIX}${CONTENT_COUNT}~${DBID_COUNT}~$CONTENT_COUNT~${REPL_PORT})
- POSTGRES_PORT_CHK $GP_PORT $QE_NAME
- ((DBID_COUNT=$DBID_COUNT+1))
- ((CONTENT_COUNT=$CONTENT_COUNT+1))
- ((SEG_DIR_VECTOR=$SEG_DIR_VECTOR+1))
- LAST_HOST=$QE_HOST
- done
- if [ $DEBUG_LEVEL -eq 0 ] && [ x"" != x"$VERBOSE" ];then $ECHO;fi
- if [ $MIRRORING -eq 1 ] ;then
- ((MIRROR_OFFSET=$MIRROR_PORT_BASE-$PORT_BASE))
- ((MIRROR_REPLICATION_PORT_OFFSET=$MIRROR_REPLICATION_PORT_BASE-$REPLICATION_PORT_BASE))
- if [ $MIRROR_TYPE -eq 1 ];then
- CREATE_SPREAD_MIRROR_ARRAY
- else
- CREATE_GROUP_MIRROR_ARRAY
- fi
- fi
- ((TOTAL_SEG=${#QE_PRIMARY_ARRAY[@]}))
- # Now re-order the array so that it is ordered by PORT to spread out the number of parallel processes initiated on each host
- LOG_MSG "[INFO]:-End Function $FUNCNAME"
-}
-
-ARRAY_REORDER() {
- LOG_MSG "[INFO]:-Start Function $FUNCNAME"
-
- # Now re-order the array so that it is ordered by PORT to spread out the
- # number of parallel processes initiated on each host
- #
- # MPP-13617: If segment contains a ~, we assume ~ is the field delimiter.
- # Otherwise we assume : is the delimiter. This allows us to easily
- # handle IPv6 addresses which may contain a : by using a ~ as a delimiter.
- #
- case `$ECHO ${QE_PRIMARY_ARRAY[@]}` in
- *~*)
- S="~"
- ;;
- *)
- S=":"
- ;;
- esac
-
- QE_REORDER_ARRAY=(`$ECHO ${QE_PRIMARY_ARRAY[@]}|$TR ' ' '\n'|$SORT -t$S -k2,2|$TR '\n' ' '`)
- QE_PRIMARY_ARRAY=(${QE_REORDER_ARRAY[@]})
- if [ $MIRROR_TYPE -eq 1 ];then
- QE_REORDER_ARRAY=(`$ECHO ${QE_MIRROR_ARRAY[@]}|$TR ' ' '\n'|$SORT -t$S -k2,2|$TR '\n' ' '`)
- QE_MIRROR_ARRAY=(${QE_REORDER_ARRAY[@]})
- fi
- LOG_MSG "[INFO]:-End Function $FUNCNAME"
-}
-
-DISPLAY_CONFIG () {
- LOG_MSG "[INFO]:-Start Function $FUNCNAME"
- if [ x"" == x"$INPUT_CONFIG" ] ; then
- . $CLUSTER_CONFIG
- if [ x"" != x"$REQ_LOCALE_SETTING" ];then
- LOCALE_SETTING=$REQ_LOCALE_SETTING;
- fi
- if [ x"" != x"$MACHINE_LIST_FILE_ALT" ];then
- MACHINE_LIST_FILE=$MACHINE_LIST_FILE_ALT
- fi
-
- if [ `$CAT ${MACHINE_LIST_FILE}| $GREP -c "${MASTER_HOSTNAME}\$"` -eq 0 ]; then
- declare -a MACHINE_LIST=(`$CAT $MACHINE_LIST_FILE`)
- else
- declare -a MACHINE_LIST=($MASTER_HOSTNAME `$CAT $MACHINE_LIST_FILE|$GREP -v "${MASTER_HOSTNAME}\$"`)
- fi
- fi
- ((NUM_QES=${#MACHINE_LIST[*]}))
- if [ x"" == x"$PG_CONF_ADD_FILE" ]; then
- PG_ADD=Off
- else
- PG_ADD=On
- fi
- export QD_DIR=$GP_DIR
- if [ x"" != x"$INTERACTIVE" ];then
- SET_VAR $QD_PRIMARY_ARRAY
- LOG_MSG "[INFO]:-HAWQ Creation Parameters" 1
- LOG_MSG "[INFO]:---------------------------------------" 1
- LOG_MSG "[INFO]:-Master Configuration" 1
- LOG_MSG "[INFO]:---------------------------------------" 1
- LOG_MSG "[INFO]:-Master instance name = $ARRAY_NAME" 1
- LOG_MSG "[INFO]:-Master hostname = $GP_HOSTADDRESS" 1
- LOG_MSG "[INFO]:-Master port = $MASTER_PORT" 1
- LOG_MSG "[INFO]:-Master instance dir = $GP_DIR" 1
- LOG_MSG "[INFO]:-Master LOCALE = $LOCALE_SETTING" 1
- LOG_MSG "[INFO]:-Greenplum segment prefix = $SEG_PREFIX" 1
- LOG_MSG "[INFO]:-Master Database = $DATABASE_NAME" 1
- LOG_MSG "[INFO]:-Master connections = $MASTER_MAX_CONNECT" 1
- LOG_MSG "[INFO]:-Master buffers = $MASTER_SHARED_BUFFERS" 1
- LOG_MSG "[INFO]:-Segment connections = $QE_MAX_CONNECT" 1
- LOG_MSG "[INFO]:-Segment buffers = $QE_SHARED_BUFFERS" 1
- LOG_MSG "[INFO]:-Checkpoint segments = $CHECK_POINT_SEGMENTS" 1
- LOG_MSG "[INFO]:-Kerberos key files = $KERBEROS_KEYFILE" 1
- LOG_MSG "[INFO]:-Enable secure filesystem = $ENABLE_SECURE_FILESYSTEM" 1
- LOG_MSG "[INFO]:-Encoding = $ENCODING" 1
- LOG_MSG "[INFO]:-Postgres param file = $PG_ADD" 1
- LOG_MSG "[INFO]:-Initdb to be used = $INITDB" 1
- LOG_MSG "[INFO]:-GP_LIBRARY_PATH is = $GP_LIBRARY_PATH" 1
- LOG_MSG "[INFO]:-DFS name is = $DFS_NAME" 1
- LOG_MSG "[INFO]:-DFS url is = $DFS_URL" 1
- if [ $ULIMIT_WARN -eq 1 ];then
- LOG_MSG "[WARN]:-Ulimit check = Warnings generated, see log file $WARN_MARK" 1
- else
- LOG_MSG "[INFO]:-Ulimit check = Passed" 1
- fi
- if [ $MULTI_HOME -eq 0 ];then
- LOG_MSG "[INFO]:-Array host connect type = Single hostname per node" 1
- else
- LOG_MSG "[INFO]:-Array host connect type = Multi hostname per node" 1
- fi
- IP_COUNT=1
- for MASTER_IP in "${MASTER_IP_ADDRESS[@]}"
- do
- LOG_MSG "[INFO]:-Master IP address [$IP_COUNT] = $MASTER_IP" 1
- ((IP_COUNT=$IP_COUNT+1))
- done
-
- if [ x"" != x"$STANDBY_HOSTNAME" ];then
- LOG_MSG "[INFO]:-Standby Master = $STANDBY_HOSTNAME" 1
- else
- LOG_MSG "[INFO]:-Standby Master = Not Configured" 1
- fi
- LOG_MSG "[INFO]:-Primary segment # = $QE_PRIMARY_COUNT" 1
- if [ x"" != x"$STANDBY_HOSTNAME" ];then
- for STANDBY_IP in "${STANDBY_IP_ADDRESS[@]}"
- do
- LOG_MSG "[INFO]:-Standby IP address = $STANDBY_IP" 1
- done
- fi
- LOG_MSG "[INFO]:-Total Database segments = $TOTAL_SEG" 1
- LOG_MSG "[INFO]:-Trusted shell = $TRUSTED_SHELL" 1
- ((NUM_QES=${#MACHINE_LIST[*]}))
- LOG_MSG "[INFO]:-Number segment hosts = $NUM_QES" 1
- LOG_MSG "[INFO]:----------------------------------------" 1
- LOG_MSG "[INFO]:-Greenplum Primary Segment Configuration" 1
- LOG_MSG "[INFO]:----------------------------------------" 1
- for I in "${QE_PRIMARY_ARRAY[@]}"
- do
- if [ $MIRRORING -ne 0 ]; then
- TXT=`$ECHO $I|$AWK -F"~" '{print $1" \t"$3" \t"$2" \t"$4" \t"$5" \t"$6}'`
- else
- TXT=`$ECHO $I|$AWK -F"~" '{print $1" \t"$3" \t"$2" \t"$4" \t"$5}'`
- fi
- LOG_MSG "[INFO]:-$TXT" 1
- done
- if [ $MIRRORING -ne 0 ]; then
- LOG_MSG "[INFO]:---------------------------------------" 1
- LOG_MSG "[INFO]:-Greenplum Mirror Segment Configuration" 1
- LOG_MSG "[INFO]:---------------------------------------" 1
- for I in "${QE_MIRROR_ARRAY[@]}"
- do
- TXT=`$ECHO $I|$AWK -F"~" '{print $1" \t"$3" \t"$2" \t"$4" \t"$5" \t"$6}'`
- LOG_MSG "[INFO]:-$TXT" 1
- done
- fi
-
- GET_REPLY "Continue with Greenplum creation"
- fi
- LOG_MSG "[INFO]:-End Function $FUNCNAME"
-}
-
-SET_VAR () {
- #
- # MPP-13617: If segment contains a ~, we assume ~ is the field delimiter.
- # Otherwise we assume : is the delimiter. This allows us to easily
- # handle IPv6 addresses which may contain a : by using a ~ as a delimiter.
- #
- I=$1
- case $I in
- *~*)
- S="~"
- ;;
- *)
- S=":"
- ;;
- esac
- GP_HOSTADDRESS=`$ECHO $I|$CUT -d$S -f1`
- GP_PORT=`$ECHO $I|$CUT -d$S -f2`
- GP_DIR=`$ECHO $I|$CUT -d$S -f3`
- GP_DBID=`$ECHO $I|$CUT -d$S -f4`
- GP_CONTENT=`$ECHO $I|$CUT -d$S -f5`
- GP_REPLICATION_PORT=`$ECHO $I|$CUT -d$S -f6`
-}
-
-CREATE_QD_DB () {
- LOG_MSG "[INFO]:-Start Function $FUNCNAME"
- LOG_MSG "[INFO]:-Building the Master instance database, please wait..." 1
- SET_VAR $QD_PRIMARY_ARRAY
- LOG_MSG "[INFO]:-Initializing Master Postgres instance $GP_DIR"
- $EXPORT_LIB_PATH
-
- #
- # note: is_filerep_mirrored is no because the QD is not mirrored by filerep right now (standby master works differently)
- #
-
- if [ x"" != x"$LCCOLLATE" ]; then
- LC_COLLATE_SETTING="--lc-collate=$LCCOLLATE"
- fi
-
- if [ x"" != x"$LCCTYPE" ]; then
- LC_CTYPE_SETTING="--lc-ctype=$LCCTYPE"
- fi
-
- if [ x"" != x"$LCMESSAGES" ]; then
- LC_MESSAGES_SETTING="--lc-messages=$LCMESSAGES"
- fi
-
- if [ x"" != x"$LCMONETARY" ]; then
- LC_MONETARY_SETTING="--lc-monetary=$LCMONETARY"
- fi
-
- if [ x"" != x"$LCNUMERIC" ]; then
- LC_NUMERIC_SETTING="--lc-numeric=$LCNUMERIC"
- fi
-
- if [ x"" != x"$LCTIME" ]; then
- LC_TIME_SETTING="--lc-time=$LCTIME"
- fi
-
- LC_ALL_SETTINGS=" $LC_COLLATE_SETTING $LC_CTYPE_SETTING $LC_MESSAGES_SETTING $LC_MONETARY_SETTING $LC_NUMERIC_SETTING $LC_TIME_SETTING"
-
-
- # build initdb command, capturing output in ${GP_DIR}.initdb
- cmd="$INITDB"
- cmd="$cmd -E $ENCODING"
- cmd="$cmd -D $GP_DIR"
- cmd="$cmd --locale=$LOCALE_SETTING"
- cmd="$cmd $LC_ALL_SETTINGS"
- cmd="$cmd --max_connections=$MASTER_MAX_CONNECT"
- cmd="$cmd --shared_buffers=$MASTER_SHARED_BUFFERS"
- cmd="$cmd --backend_output=$GP_DIR.initdb"
-
- LOG_MSG "[INFO]:-Commencing local $cmd"
- $cmd >> $LOG_FILE 2>&1
- RETVAL=$?
-
- # if there was an error, copy ${GP_DIR}.initdb to the log
- if [ $RETVAL -ne 0 ]; then
- $CAT ${GP_DIR}.initdb >> $LOG_FILE
- fi
- $RM -f ${GP_DIR}.initdb
- if [ $RETVAL -ne 0 ]; then
- ERROR_EXIT "[FATAL]:- Command $cmd failed with error status $RETVAL, see log file $LOG_FILE for more detail" 2
- fi
-
- BACKOUT_COMMAND "if [ -d $GP_DIR ]; then $RM -Rf $GP_DIR; fi"
- BACKOUT_COMMAND "$ECHO Removing Master data directory files"
- LOG_MSG "[INFO]:-Completed Master instance initialization"
- $ECHO "#Greenplum specific configuration parameters for Master instance database" >> ${GP_DIR}/$PG_CONF
- $ECHO "#------------------------------------------------------------------------" >> ${GP_DIR}/$PG_CONF
- LOG_MSG "[INFO]:-Setting the Master port to $GP_PORT"
- ED_PG_CONF ${GP_DIR}/$PG_CONF "$PORT_TXT" port=$GP_PORT 0
- ERROR_CHK $? "set Master port=$GP_PORT in $PG_CONF" 2
- LOG_MSG "[INFO]:-Completed setting the Master port to $MASTER_PORT"
- LOG_MSG "[INFO]:-Setting Master logging option"
- ED_PG_CONF ${GP_DIR}/$PG_CONF "$LOG_STATEMENT_TXT" log_statement=all 0
- ERROR_CHK $? "set log_statement=all in ${GP_DIR}/$PG_CONF" 1
- LOG_MSG "[INFO]:-Setting Master instance check point segments"
- ED_PG_CONF ${GP_DIR}/$PG_CONF "$CHKPOINT_SEG_TXT" "checkpoint_segments=$CHECK_POINT_SEGMENTS" 0
- ERROR_CHK $? "set checkpoint_segments=$CHECK_POINT_SEGMENTS in ${GP_DIR}/$PG_CONF" 1
-
- if [ x"" != x"$KERBEROS_KEYFILE" ] ; then
- ED_PG_CONF ${GP_DIR}/$PG_CONF "$KERBEROS_KEYFILE_TXT" "krb_server_keyfile='$KERBEROS_KEYFILE'" 0
- ERROR_CHK $? "set krb_server_keyfile=$KERBEROS_KEYFILE in ${GP_DIR}/$PG_CONF" 1
- fi
- if [ xoff != x"$ENABLE_SECURE_FILESYSTEM" ] ; then
- ED_PG_CONF ${GP_DIR}/$PG_CONF "$ENABLE_SECURE_FILESYSTEM_TXT" "enable_secure_filesystem=$ENABLE_SECURE_FILESYSTEM" 0
- ERROR_CHK $? "set enable_secure_filesystem=$ENABLE_SECURE_FILESYSTEM in ${GP_DIR}/$PG_CONF" 1
- fi
- if [ x"" != x"$PG_CONF_ADD_FILE" ]; then
- LOG_MSG "[INFO]:-Processing additional configuration parameters"
- for NEW_PARAM in `$CAT $PG_CONF_ADD_FILE|$TR -s ' '|$TR -d ' '|$GREP -v "^#"`
- do
- LOG_MSG "[INFO]:-Adding config $NEW_PARAM to Master"
- SEARCH_TXT=`$ECHO $NEW_PARAM |$CUT -d"=" -f1`
- ED_PG_CONF ${GP_DIR}/$PG_CONF $SEARCH_TXT $NEW_PARAM 0
- ERROR_CHK $? "set $NEW_PARAM ${GP_DIR}/$PG_CONF" 1
- done
- fi
- LOG_MSG "[INFO]:-Adding gp_dumpall access to $PG_HBA for master host"
- BUILD_MASTER_PG_HBA_FILE $GP_DIR
- LOG_MSG "[INFO]:-Creating perfmon directories and configuration file"
- BUILD_PERFMON $GP_DIR
- ERROR_CHK $? "create perfmon directories and configuration file" 1
-
- LOG_MSG "[INFO]:-Creating temporary directories for master"
- for DIR in "${TEMP_DIRECTORY_LIST[@]}"
- do
- temp_master_dir="${DIR}/${SEG_PREFIX}-1"
- LOG_MSG "[INFO]:-create temporary ${tmp_seg_dir}"
- BACKOUT_COMMAND "$ECHO \"Remove temporary directory: ${temp_master_dir}\""
- BACKOUT_COMMAND "$RM -rf ${temp_master_dir}"
- $MKDIR $temp_master_dir >> $LOG_FILE 2>&1
- RETVAL=$?
- if [ $RETVAL -ne 0 ]; then
- ERROR_EXIT "[FATAL]:-Could not create directory ${temp_master_dir}. Please check your configuration." 2
- fi
- $ECHO "${temp_master_dir}" >> "${GP_DIR}/${GP_TEMP_DIRECTORIES_FILE}"
- done
-
- LOG_MSG "[INFO]:-Starting the Master in admin mode" 1
- export PGPORT=$GP_PORT;$PG_CTL -w -l $GP_DIR/pg_log/startup.log -D $GP_DIR -o "-i -p $GP_PORT -c gp_role=utility -M master -b -1 -C -1 -z 0 -m" start >> /dev/null 2>&1
- RET_TEXT="`$PG_CTL status -D $GP_DIR`"
- RUNNING=`$ECHO $RET_TEXT|$EGREP -c "not running|neither"`
- if [ $RUNNING -ne 0 ]; then
- $CAT ${GP_DIR}.log|$TEE -a $LOG_FILE
- ERROR_EXIT "[FATAL]:-Failed to start the Master database in admin mode" 2
- fi
- BACKOUT_COMMAND "$RM -f /tmp/.s.PGSQL.${GP_PORT}*"
- BACKOUT_COMMAND "$ECHO \"Removing Master lock files\""
- BACKOUT_COMMAND "$RM -f ${GP_DIR}.log"
- BACKOUT_COMMAND "$ECHO Removing Master log file"
- BACKOUT_COMMAND "if [ -d $GP_DIR ]; then $EXPORT_LIB_PATH;export PGPORT=$GP_PORT; $PG_CTL -D $GP_DIR stop; fi"
- BACKOUT_COMMAND "$ECHO \"Stopping Master instance\""
- LOG_MSG "[INFO]:-Completed starting the Master in admin mode"
- $SLEEP 2
- GP_HOSTNAME=`HOST_LOOKUP $GP_HOSTADDRESS`
- if [ x"$GP_HOSTNAME" = x"__lookup_of_hostname_failed__" ]; then
- ERROR_EXIT "[FATAL]:-Hostname lookup for host $GP_HOSTADDRESS failed." 2
- fi
- PING_HOST $GP_HOSTNAME
- RETVAL=$?
- if [ $RETVAL -ne 0 ]; then
- ERROR_EXIT "[FATAL]:-Could not establish connection to hostname $GP_HOSTNAME. Please check your configuration." 2
- fi
- UPDATE_GPCONFIG $GP_PORT $GP_DBID $GP_CONTENT $GP_HOSTNAME $GP_HOSTADDRESS $GP_PORT $GP_DIR p $GP_REPLICATION_PORT
- LOAD_QE_SYSTEM_DATA $DEFAULTDB
- SET_VAR $QD_PRIMARY_ARRAY
- LOG_MSG "[INFO]:-End Function $FUNCNAME"
-}
-
-# returns hostname for given address or "__lookup_of_hostname_failed__" if hostname not found
-HOST_LOOKUP() {
- res=`echo $1 | $GPHOSTCACHELOOKUP`
- err_index=`echo $res | awk '{print index($0,"__lookup_of_hostname_failed__")}'`
- if [ $err_index -ne 0 ]; then
- echo "__lookup_of_hostname_failed__"
- else
- echo $res
- fi
-}
-
-UPDATE_GPCONFIG () {
- LOG_MSG "[INFO]:-Start Function $FUNCNAME"
- MASTER_PORT=$1
- U_DBID=$2
- U_CONTENT=$3
- U_HOSTNAME=$4
- U_ADDRESS=$5
- U_PORT=$6
- U_DIR=$7
- U_ROLE=$8
- if [ $9 -eq 0 ]; then
- U_REPLICATION_PORT=null
- else
- U_REPLICATION_PORT=$9
- fi
-
- if [ $MIRRORING -ne 1 ]; then
- U_REPLICATION_PORT=null
- fi
-
- U_DB=$DEFAULTDB
- CHK_COUNT=`env PGOPTIONS="-c gp_session_role=utility" $PSQL -p $MASTER_PORT -d "$U_DB" -A -t -c "SELECT count(*) FROM $GP_CONFIG_TBL WHERE content=${U_CONTENT} AND preferred_role='${U_ROLE}';" 2>/dev/null` >> $LOG_FILE 2>&1
- ERROR_CHK $? "obtain psql count Master $GP_CONFIG_TBL" 2
- if [ $CHK_COUNT -eq 0 ]; then
- LOG_MSG "[INFO]:-Adding $U_CONTENT on $U_HOSTNAME $U_DIR to system configuration table"
- env PGOPTIONS="-c gp_session_role=utility" $PSQL -p $MASTER_PORT -d "$U_DB" -c "INSERT INTO $GP_CONFIG_TBL (dbid, content, role, preferred_role, mode, status, hostname, address, port, replication_port) VALUES (${U_DBID}, ${U_CONTENT}, '${U_ROLE}', '${U_ROLE}', 's', 'u', '${U_HOSTNAME}', '${U_ADDRESS}', ${U_PORT}, ${U_REPLICATION_PORT});" >> $LOG_FILE 2>&1
- ERROR_CHK $? "add $U_CONTENT on $U_HOSTNAME to Master gp_segment_configuration" 2
-
- LOG_MSG "[INFO]:-Adding $U_CONTENT on $U_HOSTNAME $U_DIR to Master gp_filespace_entry"
- env PGOPTIONS="-c gp_session_role=utility" $PSQL -p $MASTER_PORT -d "$U_DB" -c "insert into pg_filespace_entry (fsefsoid, fsedbid, fselocation) values (${PG_SYSTEM_FILESPACE}, ${U_DBID}, '${U_DIR}');" >> $LOG_FILE 2>&1
- ERROR_CHK $? "add $U_CONTENT on $U_HOSTNAME $U_DIR to Master pg_filespace_entry" 2
- else
- LOG_MSG "[INFO]:-Content $U_CONTENT already exists in gp_segment_configuration system table"
- fi
- if [ $U_CONTENT -eq -1 ]
- then
- MAKE_CONTENTNUM_FILE $CONTENT_COUNT $U_DIR
- MAKE_DBID_FILE $U_DBID $U_HOSTNAME $U_DIR
- fi
- LOG_MSG "[INFO]:-End Function $FUNCNAME"
-}
-
-
-LOAD_QE_SYSTEM_DATA () {
- LOG_MSG "[INFO]:-Start Function $FUNCNAME"
- SET_VAR $QD_PRIMARY_ARRAY
- TARGET_DB=$1
- QD_DBNAME=$DEFAULTDB
- for I in "${QE_PRIMARY_ARRAY[@]}"
- do
- SET_VAR $I
- LOG_MSG "[INFO]:-Adding segment $GP_HOSTADDRESS to Master system tables"
- GP_HOSTNAME=`HOST_LOOKUP $GP_HOSTADDRESS`
- if [ x"$GP_HOSTNAME" = x"__lookup_of_hostname_failed__" ]; then
- ERROR_EXIT "[FATAL]:-Hostname lookup for host $GP_HOSTADDRESS failed." 2
- fi
- PING_HOST $GP_HOSTNAME
- RETVAL=$?
- if [ $RETVAL -ne 0 ]; then
- ERROR_EXIT "[FATAL]:-Could not establish connection to hostname $GP_HOSTNAME. Please check your configuration." 2
- fi
- UPDATE_GPCONFIG $MASTER_PORT $GP_DBID $GP_CONTENT $GP_HOSTNAME $GP_HOSTADDRESS $GP_PORT $GP_DIR p $GP_REPLICATION_PORT
- LOG_MSG "[INFO]:-Successfully added segment $GP_HOSTADDRESS to Master system tables"
- done
- if [ $MIRRORING -eq 1 ]; then
- for I in "${QE_MIRROR_ARRAY[@]}"
- do
- SET_VAR $I
- LOG_MSG "[INFO]:-Adding segment $GP_HOSTADDRESS, $GP_DBID, $GP_DIR to Master system tables as mirror"
- GP_HOSTNAME=`HOST_LOOKUP $GP_HOSTADDRESS`
- if [ x"$GP_HOSTNAME" = x"__lookup_of_hostname_failed__" ]; then
- ERROR_EXIT "[FATAL]:-Hostname lookup for host $GP_HOSTADDRESS failed." 2
- fi
- PING_HOST $GP_HOSTNAME
- RETVAL=$?
- if [ $RETVAL -ne 0 ]; then
- ERROR_EXIT "[FATAL]:-Could not establish connection to hostname $GP_HOSTNAME. Please check your configuration." 2
- fi
- UPDATE_GPCONFIG $MASTER_PORT $GP_DBID $GP_CONTENT $GP_HOSTNAME $GP_HOSTADDRESS $GP_PORT $GP_DIR m $GP_REPLICATION_PORT
- LOG_MSG "[INFO]:-Successfully added segment $GP_HOSTADDRESS to Master system tables as mirror"
- done
- fi
- LOG_MSG "[INFO]:-End Function $FUNCNAME"
-}
-
-CREATE_QES_PRIMARY () {
- LOG_MSG "[INFO]:-Start Function $FUNCNAME"
- #
- # PARALLEL BUILD OF PRIMARIES
- #
- LOG_MSG "[INFO]:-Commencing parallel build of primary segment instances" 1
- BATCH_LIMIT=${#QE_PRIMARY_ARRAY[@]}
- PARALLEL_SETUP $PARALLEL_STATUS_FILE
- for I in "${QE_PRIMARY_ARRAY[@]}"
- do
- export PG_CONF_ADD_FILE
- export STANDBY_HOSTNAME
- export ENCODING
- export LOCALE_SETTING
- export LC_ALL_SETTINGS
- export BACKOUT_FILE
- export LOG_FILE
- export PARALLEL_STATUS_FILE
- export TOTAL_SEG
- export ARRAY_NAME
- export CHECK_POINT_SEGMENTS
- export KERBEROS_KEYFILE
- export ENABLE_SECURE_FILESYSTEM
- export QE_MAX_CONNECT
- export QE_SHARED_BUFFERS
- export SEG_PREFIX
- if [ $DEBUG_LEVEL -eq 0 ] && [ x"" != x"$VERBOSE" ];then $NOLINE_ECHO ".\c";fi
- FLAG=""
- if [ x"" != x"$PG_CONF_ADD_FILE" ] ; then
- FLAG="-p $PG_CONF_ADD_FILE"
- fi
-
- HAS_MIRRORS_OPTION=
- if [ $MIRRORING -ne 0 ]; then
- HAS_MIRRORS_OPTION=yes
- else
- HAS_MIRRORS_OPTION=no
- fi
-
- # create temp directory with SEG_PREFIX.
- TEMP_DIRECTORY_COMPACT_LIST=(`$ECHO ${TEMP_DIRECTORY_LIST[@]} | $TR ' ' ','`)
-
- $GPCREATESEG $FLAG $$ 1 $I IS_PRIMARY "$TEMP_DIRECTORY_COMPACT_LIST" $SEG_PREFIX $HAS_MIRRORS_OPTION $INST_COUNT $LOG_FILE \
-`$ECHO ${MASTER_IP_ADDRESS[@]}|$TR ' ' '~'` \
-`$ECHO ${STANDBY_IP_ADDRESS[@]}|$TR ' ' '~'` &
- PARALLEL_COUNT $BATCH_LIMIT $BATCH_DEFAULT
- done
- PARALLEL_SUMMARY_STATUS_REPORT $PARALLEL_STATUS_FILE
- if [ $REPORT_FAIL -ne 0 ];then
- $CAT $PARALLEL_STATUS_FILE >> $LOG_FILE
- fi
- $RM -f $PARALLEL_STATUS_FILE
-
- LOG_MSG "[INFO]:-End Function $FUNCNAME"
-}
-
-CREATE_QES_MIRROR () {
- LOG_MSG "[INFO]:-Start Function $FUNCNAME"
- LOG_MSG "[INFO]:-Commencing parallel build of mirror segment instances" 1
- BATCH_LIMIT=${#QE_MIRROR_ARRAY[@]}
-
- # MPP-13617: If segment contains a ~, we assume ~ is the field delimiter.
- # Otherwise we assume : is the delimiter. This allows us to easily
- # handle IPv6 addresses which may contain a : by using a ~ as a delimiter.
- #
- case `$ECHO ${QE_PRIMARY_ARRAY[@]}` in
- *~*)
- S="~"
- ;;
- *)
- S=":"
- ;;
- esac
-
- REORDERING=(`$ECHO ${QE_PRIMARY_ARRAY[@]}|$TR ' ' '\n'|$SORT -t$S -k5,5|$TR '\n' ' '`)
- PRIMARIES_ORDERED_BY_CONTENT_ID=(${REORDERING[@]})
-
- REORDERING=(`$ECHO ${QE_MIRROR_ARRAY[@]}|$TR ' ' '\n'|$SORT -t$S -k5,5|$TR '\n' ' '`)
- MIRRORS_ORDERED_BY_CONTENT_ID=(${REORDERING[@]})
-
- PARALLEL_SETUP $PARALLEL_STATUS_FILE
- for ((I_INDEX=0; I_INDEX < $CONTENT_COUNT; I_INDEX++))
- do
- export PG_CONF_ADD_FILE
- export STANDBY_HOSTNAME
- export ENCODING
- export LOCALE_SETTING
- export BACKOUT_FILE
- export LOG_FILE
- export PARALLEL_STATUS_FILE
- export TOTAL_SEG
- export ARRAY_NAME
- export QE_MAX_CONNECT
- export QE_SHARED_BUFFERS
- export CHECK_POINT_SEGMENTS
- export KERBEROS_KEYFILE
- export ENABLE_SECURE_FILESYSTEM
- export SEG_PREFIX
- if [ $DEBUG_LEVEL -eq 0 ] && [ x"" != x"$VERBOSE" ];then $NOLINE_ECHO ".\c";fi
- FLAG=""
- if [ x"" != x"$PG_CONF_ADD_FILE" ] ; then
- FLAG="-p $PG_CONF_ADD_FILE"
- fi
-
- $GPCREATESEG $FLAG $$ 1 ${MIRRORS_ORDERED_BY_CONTENT_ID[$I_INDEX]} ${PRIMARIES_ORDERED_BY_CONTENT_ID[$I_INDEX]} no $INST_COUNT $LOG_FILE \
-`$ECHO ${MASTER_IP_ADDRESS[@]}|$TR ' ' '~'` \
-`$ECHO ${STANDBY_IP_ADDRESS[@]}|$TR ' ' '~'` &
- PARALLEL_COUNT $BATCH_LIMIT $BATCH_DEFAULT
- done
- PARALLEL_SUMMARY_STATUS_REPORT $PARALLEL_STATUS_FILE
- if [ $REPORT_FAIL -ne 0 ];then
- $CAT $PARALLEL_STATUS_FILE >> $LOG_FILE
- fi
- $RM -f $PARALLEL_STATUS_FILE
-
- LOG_MSG "[INFO]:-End Function $FUNCNAME"
-}
-
-
-STOP_QD_PRODUCTION () {
- LOG_MSG "[INFO]:-Start Function $FUNCNAME"
- LOG_MSG "[INFO]:-Restarting the Greenplum instance in production mode" 1
- if [ -f $GPSTOP ]; then
- GPSTOP_OPTS=$(OUTPUT_LEVEL_OPTS)
- export MASTER_DATA_DIRECTORY=${MASTER_DIRECTORY}/${SEG_PREFIX}-1
- $GPSTOP -a -i -m -d $MASTER_DATA_DIRECTORY $GPSTOP_OPTS
- RETVAL=$?
- case $RETVAL in
- 0 ) LOG_MSG "[INFO]:-Successfully shutdown the new Greenplum instance" ;;
- 1 ) LOG_MSG "[WARN]:-Non fatal error from Greenplum instance shutdown, check log files, will continue"
- EXIT_STATUS=1 ;;
- * ) ERROR_EXIT "[FATAL]:-Failed to stop new Greenplum instance, check log file" 2
- esac
- else
- ERROR_EXIT "[FATAL]:-$GPSTOP not located" 2
- fi
- LOG_MSG "[INFO]:-End Function $FUNCNAME"
-}
-
-CREATE_STANDBY_QD () {
- LOG_MSG "[INFO]:-Start Function $FUNCNAME"
- if [ ! -f $INIT_STANDBY_PROG ]; then
- LOG_MSG "[WARN]:-Unable to locate $INIT_STANDBY_PROG, hence unable to initialize standby master $STANDBY_HOSTNAME"
- EXIT_STATUS=1
- else
- LOG_MSG "[INFO]:-Starting initialization of standby master $STANDBY_HOSTNAME" 1
- export MASTER_DATA_DIRECTORY=${MASTER_DIRECTORY}/${SEG_PREFIX}-1;$INIT_STANDBY_PROG -s $STANDBY_HOSTNAME -a
- RETCODE=$?
- case $RETCODE in
- 0) LOG_MSG "[INFO]:-Successfully completed standby master initialization" 1 ;;
- 1) LOG_MSG "[WARN]:-Non-fatal issue with standby master initialization, check gpstate -f output" 1 ;;
- *) ERROR_EXIT "[FATAL]:-Initialization of standby master failed"
- esac
- BACKOUT_COMMAND "$TRUSTED_SHELL ${STANDBY_HOSTNAME} \"$RM -Rf ${QD_DIR}\""
- BACKOUT_COMMAND "$ECHO \"Removing standby directory ${QD_DIR} on $STANDBY_HOSTNAME\""
- BACKOUT_COMMAND "$TRUSTED_SHELL $STANDBY_HOSTNAME \"if [ -d $GP_DIR ]; then ${EXPORT_LIB_PATH};export PGPORT=${MASTER_PORT}; $PG_CTL -w -D ${MASTER_DIRECTORY}/${SEG_PREFIX}-1 -o \"-i -p ${MASTER_PORT}\" -m immediate stop; fi\""
- BACKOUT_COMMAND "$ECHO \"Stopping standby instance on $STANDBY_HOSTNAME\""
- fi
- LOG_MSG "[INFO]:-End Function $FUNCNAME"
-}
-
-START_QD_PRODUCTION () {
- LOG_MSG "[INFO]:-Start Function $FUNCNAME"
- if [ -f $GPSTART ]; then
- GPSTART_OPTS=$(OUTPUT_LEVEL_OPTS)
- export MASTER_DATA_DIRECTORY=${MASTER_DIRECTORY}/${SEG_PREFIX}-1
- $GPSTART -a -d $MASTER_DATA_DIRECTORY $GPSTART_OPTS
-
- if [ $? -eq 0 ];then
- LOG_MSG "[INFO]:-Successfully started new Greenplum instance"
- else
-
- # this text is duplicated below
- LOG_MSG "[WARN]:" 1
- LOG_MSG "[WARN]:-Failed to start Greenplum instance; review gpstart output to" 1
- LOG_MSG "[WARN]:- determine why gpstart failed and reinitialize cluster after resolving" 1
- LOG_MSG "[WARN]:- issues. Not all initialization tasks have completed so the cluster" 1
- LOG_MSG "[WARN]:- should not be used." 1
-
- LOG_MSG "[WARN]:-gpinitsystem will now try to stop the cluster" 1
- LOG_MSG "[WARN]:" 1
- if [ -f $GPSTOP ]; then
- GPSTOP_OPTS=$(OUTPUT_LEVEL_OPTS)
- export MASTER_DATA_DIRECTORY=${MASTER_DIRECTORY}/${SEG_PREFIX}-1
- $GPSTOP -a -i -d $MASTER_DATA_DIRECTORY $GPSTOP_OPTS
-
- RETVAL=$?
- case $RETVAL in
- 0 ) LOG_MSG "[INFO]:-Successfully shutdown the Greenplum instance" 1;;
- 1 ) LOG_MSG "[WARN]:-Non fatal error from Greenplum instance shutdown" 1;;
- * ) ERROR_EXIT "[WARN]:-Failed to stop new Greenplum instance" 2
- esac
- else
- LOG_MSG "[WARN]:-$GPSTOP not located" 1
- fi
-
- # this text is duplicated above
- LOG_MSG "[WARN]:" 1
- LOG_MSG "[WARN]:-Failed to start Greenplum instance; review gpstart output to" 1
- LOG_MSG "[WARN]:- determine why gpstart failed and reinitialize cluster after resolving" 1
- LOG_MSG "[WARN]:- issues. Not all initialization tasks have completed so the cluster" 1
- LOG_MSG "[WARN]:- should not be used." 1
- LOG_MSG "[WARN]:" 1
- ERROR_EXIT "[FATAL]: starting new instance failed;" 2
- fi
- else
- ERROR_EXIT "[FATAL]:-$GPSTART not located" 2
- fi
- LOG_MSG "[INFO]:-Completed restart of Greenplum instance in production mode" 1
- LOG_MSG "[INFO]:-End Function $FUNCNAME"
-}
-
-SET_GP_USER_PW () {
- LOG_MSG "[INFO]:-Start Function $FUNCNAME"
- $PSQL -p $MASTER_PORT -d "$DEFAULTDB" -c"alter user $USER_NAME password '$GP_PASSWD';" >> $LOG_FILE 2>&1
- ERROR_CHK $? "update Greenplum superuser password" 1
- LOG_MSG "[INFO]:-End Function $FUNCNAME"
-}
-
-CREATE_DATABASE () {
- LOG_MSG "[INFO]:-Start Function $FUNCNAME"
- SET_VAR $QD_PRIMARY_ARRAY
- $PSQL -p $GP_PORT -d "$DEFAULTDB" -c"create database \"${DATABASE_NAME}\" tablespace \"$GP_TABLESPACE_NAME\";" >> $LOG_FILE 2>&1
- ERROR_CHK $? "create database $DATABASE_NAME" 2
- LOG_MSG "[INFO]:-End Function $FUNCNAME"
-}
-
-CREATE_DFS_TABLESPACE () {
- LOG_MSG "[INFO]:-Start Function $FUNCNAME"
- SET_VAR $QD_PRIMARY_ARRAY
-
- LOG_MSG "[INFO]:-Prepare to create filespace on $DFS_NAME" 1
-
- # create filespace statement
- LOG_MSG "[INFO]:-Create filespace $GP_FILESPACE_NAME" 1
- $PSQL -p $GP_PORT -d "$DEFAULTDB" -c"create filespace \"$GP_FILESPACE_NAME\" on \"${DFS_NAME}\" ('${DFS_URL}/${SEG_PREFIX}');" >> $LOG_FILE 2>&1
- CHECK_CREATE_DFS_ERROR
-
- # create tablespace statement
- LOG_MSG "[INFO]:-Create tablespace $GP_TABLESPACE_NAME" 1
- $PSQL -p $GP_PORT -d "$DEFAULTDB" -c"create tablespace \"$GP_TABLESPACE_NAME\" filespace \"$GP_FILESPACE_NAME\";" >> $LOG_FILE 2>&1
- CHECK_CREATE_DFS_ERROR
-
- # update template0 tablespace
- LOG_MSG "[INFO]:-Update template0 tablespace" 1
- $PSQL -p $GP_PORT -d "$DEFAULTDB" -c"SET allow_system_table_mods='dml';UPDATE pg_database SET dat2tablespace = (SELECT oid FROM pg_tablespace WHERE spcname = '$GP_TABLESPACE_NAME') WHERE datname = '$DEFAULTDB';" >> $LOG_FILE 2>&1
- CHECK_CREATE_DFS_ERROR
-
- LOG_MSG "[INFO]:-End Function $FUNCNAME"
-}
-
-CHECK_CREATE_DFS_ERROR () {
- if [ $? -ne 0 ];then
- # this text is duplicated below
- LOG_MSG "[WARN]:" 1
- LOG_MSG "[WARN]:-Failed to create dfs filespace; review gpinitsystem output to" 1
- LOG_MSG "[WARN]:- determine why this step failed and reinitialize cluster after resolving" 1
- LOG_MSG "[WARN]:- issues. Not all initialization tasks have completed so the cluster" 1
- LOG_MSG "[WARN]:- should not be used." 1
-
- LOG_MSG "[WARN]:-gpinitsystem will now try to stop the cluster" 1
- LOG_MSG "[WARN]:" 1
-
- if [ -f $GPSTOP ]; then
- GPSTOP_OPTS=$(OUTPUT_LEVEL_OPTS)
- export MASTER_DATA_DIRECTORY=${MASTER_DIRECTORY}/${SEG_PREFIX}-1
- $GPSTOP -a -i -d $MASTER_DATA_DIRECTORY $GPSTOP_OPTS
-
- RETVAL=$?
- case $RETVAL in
- 0 ) LOG_MSG "[INFO]:-Successfully shutdown the Greenplum instance" 1;;
- 1 ) LOG_MSG "[WARN]:-Non fatal error from Greenplum instance shutdown" 1;;
- * ) ERROR_EXIT "[WARN]:-Failed to stop new Greenplum instance" 2
- esac
- else
- LOG_MSG "[WARN]:-$GPSTOP not located" 1
- fi
-
- # this text is duplicated above
- LOG_MSG "[WARN]:" 1
- LOG_MSG "[WARN]:-Failed to create dfs filespace; review gpinitsystem output to" 1
- LOG_MSG "[WARN]:- determine why this step failed and reinitialize cluster after resolving" 1
- LOG_MSG "[WARN]:- issues. Not all initialization tasks have completed so the cluster" 1
- LOG_MSG "[WARN]:- should not be used." 1
- LOG_MSG "[WARN]:" 1
- ERROR_EXIT "[FATAL]: create dfs filespace failed;" 2
- fi
-}
-
-CREATE_TEMPLATE1 () {
- LOG_MSG "[INFO]:-Start Function $FUNCNAME"
- SET_VAR $QD_PRIMARY_ARRAY
- $PSQL -p $GP_PORT -d "$DEFAULTDB" -c"create database \"template1\" tablespace \"$GP_TABLESPACE_NAME\" template template0;" >> $LOG_FILE 2>&1
- ERROR_CHK $? "create database $DATABASE_NAME" 2
- $PSQL -p $GP_PORT -d "$DEFAULTDB" -c"SET allow_system_table_mods='dml';UPDATE pg_database SET datistemplate = 't' WHERE datname = 'template1';" >> $LOG_FILE 2>&1
- ERROR_CHK $? "update database $DATABASE_NAME" 2
- #$PSQL -p $GP_PORT -d "$DEFAULTDB" -c"REVOKE CREATE,TEMPORARY ON DATABASE template0 FROM public;" >> $LOG_FILE 2>&1
- #ERROR_CHK $? "revoke privileges database $DATABASE_NAME" 2
- #$PSQL -p $GP_PORT -d "$DEFAULTDB" -c"REVOKE CREATE,TEMPORARY ON DATABASE template1 FROM public;" >> $LOG_FILE 2>&1
- #ERROR_CHK $? "revoke privileges database $DATABASE_NAME" 2
- #$PSQL -p $GP_PORT -d "$DEFAULTDB" -c"VACUUM FULL pg_database;" >> $LOG_FILE 2>&1
- #ERROR_CHK $? "vacuum database $DATABASE_NAME" 2
- LOG_MSG "[INFO]:-End Function $FUNCNAME"
-}
-
-CREATE_POSTGRES () {
- LOG_MSG "[INFO]:-Start Function $FUNCNAME"
- SET_VAR $QD_PRIMARY_ARRAY
- $PSQL -p $GP_PORT -d "$DEFAULTDB" -c"create database \"postgres\" tablespace \"$GP_TABLESPACE_NAME\";" >> $LOG_FILE 2>&1
- ERROR_CHK $? "create database $DATABASE_NAME" 2
- $PSQL -p $GP_PORT -d "$DEFAULTDB" -c"SET allow_system_table_mods='dml';UPDATE pg_database SET datistemplate = 't' WHERE datname = 'postgres';" >> $LOG_FILE 2>&1
- ERROR_CHK $? "update database $DATABASE_NAME" 2
- #$PSQL -p $GP_PORT -d "$DEFAULTDB" -c"VACUUM FULL pg_database;" >> $LOG_FILE 2>&1
- #ERROR_CHK $? "vacuum database $DATABASE_NAME" 2
- LOG_MSG "[INFO]:-End Function $FUNCNAME"
-}
-
-
-SCAN_LOG () {
- LOG_MSG "[INFO]:-Start Function $FUNCNAME"
- LOG_MSG "[INFO]:-Scanning utility log file for any warning messages" 1
- SCAN_STRING="\[WARN\]|invalid|warning:|fatal|error"
- if [ `$EGREP -i "$SCAN_STRING" $LOG_FILE|$GREP -v "\[INFO\]"|$WC -l` -ne 0 ];then
- LOG_MSG "[WARN]:-*******************************************************" 1
- LOG_MSG "[WARN]:-Scan of log file indicates that some warnings or errors" 1
- LOG_MSG "[WARN]:-were generated during the array creation" 1
- LOG_MSG "[INFO]:-Please review contents of log file" 1
- LOG_MSG "[INFO]:-$LOG_FILE" 1
- LOG_MSG "[INFO]:-To determine level of criticality" 1
- if [ `$GREP -ci "\[INFO]:-Start Main" $LOG_FILE` -gt 1 ];then
- LOG_MSG "[INFO]:-These messages could be from a previous run of the utility" 1
- LOG_MSG "[INFO]:-that was called today!" 1
- fi
- LOG_MSG "[WARN]:-*******************************************************" 1
- EXIT_STATUS=1
- else
- LOG_MSG "[INFO]:-Log file scan check passed" 1
- fi
- LOG_MSG "[INFO]:-End Function $FUNCNAME"
-}
-
-DUMP_OUTPUT_CONFIG () {
- $ECHO "ARRAY_NAME=\"$ARRAY_NAME\"" > $OUTPUT_CONFIG
- if [ x"" != x"$IP_ALLOW" ] ; then
- $ECHO "IP_ALLOW=$IP_ALLOW" >> $OUTPUT_CONFIG
- fi
- if [ x"" != x"$TRUSTED_SHELL" ] ; then
- $ECHO "TRUSTED_SHELL=$TRUSTED_SHELL" >> $OUTPUT_CONFIG
- fi
- if [ x"" != x"$CHECK_POINT_SEGMENTS" ] ; then
- $ECHO "CHECK_POINT_SEGMENTS=$CHECK_POINT_SEGMENTS" >> $OUTPUT_CONFIG
- fi
- if [ x"" != x"$KERBEROS_KEYFILE" ] ; then
- $ECHO "KERBEROS_KEYFILE=$KERBEROS_KEYFILE" >> $OUTPUT_CONFIG
- fi
- if [ x"" != x"$ENABLE_SECURE_FILESYSTEM" ] ; then
- $ECHO "ENABLE_SECURE_FILESYSTEM=$ENABLE_SECURE_FILESYSTEM" >> $OUTPUT_CONFIG
- fi
- if [ x"" != x"$ENCODING" ] ; then
- $ECHO "ENCODING=$ENCODING" >> $OUTPUT_CONFIG
- fi
-
- $ECHO "SEG_PREFIX=$SEG_PREFIX" >> $OUTPUT_CONFIG
-
- $ECHO "QD_PRIMARY_ARRAY=$QD_PRIMARY_ARRAY" >> $OUTPUT_CONFIG
- $ECHO "declare -a PRIMARY_ARRAY=(" >> $OUTPUT_CONFIG
- for qe in ${QE_PRIMARY_ARRAY[@]}
- do
- $ECHO "$qe" >> $OUTPUT_CONFIG
- done
- $ECHO ")" >> $OUTPUT_CONFIG
- if [ $MIRRORING -ne 0 ] ; then
- $ECHO "declare -a MIRROR_ARRAY=(" >> $OUTPUT_CONFIG
- for qe in ${QE_MIRROR_ARRAY[@]}
- do
- $ECHO "$qe" >> $OUTPUT_CONFIG
- done
- $ECHO ")" >> $OUTPUT_CONFIG
- fi
-
-
-}
-
-READ_INPUT_CONFIG () {
- # Check that we have a non-zero configuration file
- CHK_FILE $INPUT_CONFIG
- if [ $EXISTS -ne 0 ]; then
- ERROR_EXIT "[FATAL]:-Problem with $INPUT_CONFIG file" 2
- fi
-
- # Make sure old CLUSTER_CONFIG settings are not hanging around.
- unset PORT_BASE SEG_PREFIX DATA_DIRECTORY
-
- # Validation
- if [ x"" != x"${MASTER_PORT}" ] ; then
- ERROR_EXIT "[FATAL]:-Problem with configuration. Cannot specify MASTER_PORT and QD_PRIMARY_ARRAY" 2
- fi
-
- if [ x"" != x"${MASTER_HOSTNAME}" ] ; then
- ERROR_EXIT "[FATAL]:-Problem with configuration file. Cannot specify MASTER_HOSTNAME and QD_PRIMARY_ARRAY" 2
- fi
-
- if [ x"" != x"${MASTER_DIRECTORY}" ] ; then
- ERROR_EXIT "[FATAL]:-Problem with configuration file. Cannot specify MASTER_DIRECTORY and QD_PRIMARY_ARRAY" 2
- fi
-
- # Make sure it is not a dos file with CTRL M at end of each line
- $CAT $INPUT_CONFIG|$SED -e 's/^M$//g' > $TMP_FILE
- $MV $TMP_FILE $INPUT_CONFIG
- LOG_MSG "[INFO]:-Dumping $INPUT_CONFIG to logfile for reference"
- $CAT $INPUT_CONFIG|$GREP -v "^#" >> $LOG_FILE
- LOG_MSG "[INFO]:-Completed $INPUT_CONFIG dump to logfile"
- # Source the cluster configuration file
- LOG_MSG "[INFO]:-Reading Greenplum configuration file $INPUT_CONFIG"
-
- . $INPUT_CONFIG
- SET_VAR $QD_PRIMARY_ARRAY
- MASTER_HOSTNAME=$GP_HOSTADDRESS
- MASTER_DIRECTORY=`$DIRNAME $GP_DIR`
- MASTER_PORT=$GP_PORT
- SEG_PREFIX=`$BASENAME $GP_DIR -1`
-
- MACHINE_LIST=()
- DATA_DIRECTORY=()
- CONTENT_COUNT=0
- for QE in ${PRIMARY_ARRAY[@]}
- do
- SET_VAR $QE
-
- DATA_DIRECTORY=(${DATA_DIRECTORY[@]} `$DIRNAME $GP_DIR`)
- MACHINE_LIST=(${MACHINE_LIST[@]} $GP_HOSTADDRESS)
-
- ((CONTENT_COUNT=$CONTENT_COUNT+1))
- done
-
- DATA_DIRECTORY=(`$ECHO ${DATA_DIRECTORY[@]} | $TR ' ' '\n' | $SORT | $TR '\n' ' '`)
-
- # HAWQ does not support mirroring.
- if [ x"" != x"${MIRROR_ARRAY}" ] ; then
- LOG_MSG "[WARN]:-HAWQ finds MIRROR_ARRAY variable in config file: $INPUT_CONFIG, mirroring config is not needed and is omitted"
- fi
- MIRROR_ARRAY=
-
- if [ x"" != x"${MIRROR_ARRAY}" ] ; then
- MIRRORING=0
- MIRROR_DATA_DIRECTORY=()
- for QE in ${MIRROR_ARRAY[@]}
- do
- SET_VAR $QE
-
- MIRROR_DATA_DIRECTORY=(${MIRROR_DATA_DIRECTORY[@]} `$DIRNAME $GP_DIR`)
- MACHINE_LIST=(${MACHINE_LIST[@]} $GP_HOSTADDRESS)
- done
-
- MIRROR_DATA_DIRECTORY=(`$ECHO ${MIRROR_DATA_DIRECTORY[@]} | $TR ' ' '\n' | $SORT | $TR '\n' ' '`)
- fi
-
- MACHINE_LIST=(`$ECHO ${MACHINE_LIST[@]} | $TR ' ' '\n' | $SORT -u | $TR '\n' ' '`)
- if [ `$ECHO ${MACHINE_LIST[@]}| $TR ' ' '\n' | $GREP -c "${MASTER_HOSTNAME}\$"` -gt 0 ]; then
- MACHINE_LIST=($MASTER_HOSTNAME `$ECHO ${MACHINE_LIST[@]} | $TR ' ' '\n' | $GREP -v "${MASTER_HOSTNAME}\$" | $TR '\n' ' '`)
- fi
-
- QE_PRIMARY_ARRAY=(${PRIMARY_ARRAY[@]})
- QE_MIRROR_ARRAY=(${MIRROR_ARRAY[@]})
- ((TOTAL_SEG=${#QE_PRIMARY_ARRAY[@]}))
- ((TOTAL_MIRRORS=${#MIRROR_ARRAY[@]}))
- MIRROR_TYPE=0
- MULTI_HOME=0
-
- if [ $TOTAL_MIRRORS -ne 0 ] ; then
- if [ $TOTAL_SEG -ne $TOTAL_MIRRORS ] ; then
- ERROR_EXIT "[FATAL]:-Problem with configuration file. Cannot specify different number of primary and mirror segments." 2
- fi
- fi
-}
-
-
-CHK_QE_ARRAY_PORT_RANGES () {
-
- #
- # calculate port ranges
- #
-
- MIN_PORT=1000000
- MIN_REPLICATION_PORT=100000000
- MAX_PORT=0
- MAX_REPLICATION_PORT=0
- for QE in ${QE_PRIMARY_ARRAY[@]}
- do
- SET_VAR $QE
- if [ $GP_PORT -lt $MIN_PORT ] ; then
- MIN_PORT=$GP_PORT
- fi
- if [ $GP_PORT -gt $MAX_PORT ] ; then
- MAX_PORT=$GP_PORT
- fi
- if [ $GP_REPLICATION_PORT -lt $MIN_REPLICATION_PORT ]; then
- MIN_REPLICATION_PORT=$GP_REPLICATION_PORT
- fi
- if [ $GP_REPLICATION_PORT -gt $MAX_REPLICATION_PORT ]; then
- MAX_REPLICATION_PORT=$GP_REPLICATION_PORT
- fi
- done
-
- PORT_BASE=$MIN_PORT
- REPLICATION_PORT_BASE=$MIN_REPLICATION_PORT
-
- if [ x"" != x"${QE_MIRROR_ARRAY}" ] ; then
- MIN_MIRROR_PORT=100000000
- MIN_MIRROR_REPLICATION_PORT=100000000
- MAX_MIRROR_PORT=0
- MAX_MIRROR_REPLICATION_PORT=0
- for QE in ${QE_MIRROR_ARRAY[@]}
- do
- SET_VAR $QE
- if [ $GP_PORT -lt $MIN_MIRROR_PORT ] ; then
- MIN_MIRROR_PORT=$GP_PORT
- fi
- if [ $GP_PORT -gt $MAX_MIRROR_PORT ] ; then
- MAX_MIRROR_PORT=$GP_PORT
- fi
- if [ $GP_REPLICATION_PORT -lt $MIN_MIRROR_REPLICATION_PORT ]; then
- MIN_MIRROR_REPLICATION_PORT=$GP_REPLICATION_PORT
- fi
- if [ $GP_REPLICATION_PORT -gt $MAX_MIRROR_REPLICATION_PORT ]; then
- MAX_MIRROR_REPLICATION_PORT=$GP_REPLICATION_PORT
- fi
- done
- fi
-
- MIRROR_PORT_BASE=$MIN_MIRROR_PORT
- MIRROR_REPLICATION_PORT_BASE=$MIN_MIRROR_REPLICATION_PORT
-
- #
- # now look for range conflicts
- #
-
- if CHK_OVERLAP $MASTER_PORT $MASTER_PORT $MIN_PORT $MAX_PORT; then
- ERROR_EXIT "[FATAL]:-MASTER_PORT overlaps with PORT_BASE." 2
- fi
-
- # Mirror configuration
- if [ $MIRROR_PORT_BASE ]; then
-
- if [ x"" = x"$REPLICATION_PORT_BASE" ]; then
- ERROR_EXIT "[FATAL]:-REPLICATION_PORT_BASE variable not set" 2
- fi
-
- if [ x"" = x"$MIRROR_REPLICATION_PORT_BASE" ]; then
- ERROR_EXIT "[FATAL]:-MIRROR_REPLICATION_PORT_BASE variable not set" 2
- fi
-
- if CHK_OVERLAP $MASTER_PORT $MASTER_PORT $MIN_MIRROR_PORT $MAX_MIRROR_PORT; then
- ERROR_EXIT "[FATAL]:-MASTER_PORT overlaps with MIRROR_PORT_BASE." 2
- fi
-
- if CHK_OVERLAP $MASTER_PORT $MASTER_PORT $MIN_REPLICATION_PORT $MAX_REPLICATION_PORT; then
- ERROR_EXIT "[FATAL]:-MASTER_PORT overlaps with REPLICATION_PORT_BASE." 2
- fi
-
- if CHK_OVERLAP $MASTER_PORT $MASTER_PORT $MIN_MIRROR_REPLICATION_PORT $MAX_MIRROR_REPLICATION_PORT; then
- ERROR_EXIT "[FATAL]:-MASTER_PORT overlaps with MIRROR_REPLICATION_PORT_BASE." 2
- fi
-
- if CHK_OVERLAP $MIN_PORT $MAX_PORT $MIN_MIRROR_PORT $MAX_MIRROR_PORT; then
- ERROR_EXIT "[FATAL]:-PORT_BASE AND MIRROR_PORT_BASE define overlapping ranges." 2
- fi
-
- if CHK_OVERLAP $MIN_PORT $MAX_PORT $MIN_REPLICATION_PORT $MAX_REPLICATION_PORT; then
- ERROR_EXIT "[FATAL]:-PORT_BASE AND REPLICATION_PORT_BASE define overlapping ranges." 2
- fi
-
- if CHK_OVERLAP $MIN_PORT $MAX_PORT $MIN_MIRROR_REPLICATION_PORT $MAX_MIRROR_REPLICATION_PORT; then
- ERROR_EXIT "[FATAL]:-PORT_BASE AND MIRROR_REPLICATION_PORT_BASE define overlapping ranges." 2
- fi
-
- if CHK_OVERLAP $MIN_MIRROR_PORT $MAX_MIRROR_PORT $MIN_REPLICATION_PORT $MAX_REPLICATION_PORT; then
- ERROR_EXIT "[FATAL]:-MIRROR_PORT_BASE AND REPLICATION_PORT_BASE define overlapping ranges." 2
- fi
-
- if CHK_OVERLAP $MIN_MIRROR_PORT $MAX_MIRROR_PORT $MIN_MIRROR_REPLICATION_PORT $MAX_MIRROR_REPLICATION_PORT; then
- ERROR_EXIT "[FATAL]:-MIRROR_PORT_BASE AND MIRROR_REPLICATION_PORT_BASE define overlapping ranges." 2
- fi
-
- if CHK_OVERLAP $MIN_REPLICATION_PORT $MAX_REPLICATION_PORT $MIN_MIRROR_REPLICATION_PORT $MAX_MIRROR_REPLICATION_PORT; then
- ERROR_EXIT "[FATAL]:-REPLICATION_PORT_BASE AND MIRROR_REPLICATION_PORT_BASE define overlapping ranges." 2
- fi
- fi
-
- # PORT_BASE
- if [ x"" = x"$PORT_BASE" ]; then
- ERROR_EXIT "[FATAL]:-PORT_BASE variable not set" 2
- fi
- QE_PORT=$PORT_BASE
-}
-
-
-
-LOAD_GP_TOOLKIT () {
- LOG_MSG "[INFO]:-Loading hawq_toolkit..." 1
- ROLNAME=`$PSQL -q -t -A -c "select rolname from pg_authid where oid=10" template1`
- if [ x"$ROLNAME" == x"" ];then
- ERROR_EXIT "[FATAL]:-Failed to retrieve rolname." 2
- fi
-
- BACKOUT_COMMAND "$RM -f /tmp/_gp_toolkit_tmp_${CUR_DATE}_$FILE_TIME"
-
- # We need SET SESSION AUTH here to load the toolkit
- $ECHO "SET SESSION AUTHORIZATION $ROLNAME;" >> /tmp/_gp_toolkit_tmp_${CUR_DATE}_$FILE_TIME 2>&1
- RETVAL=$?
- if [ $RETVAL -ne 0 ];then
- ERROR_EXIT "[FATAL]:-Failed to create the hawq_toolkit sql file." 2
- fi
-
- $CAT $GPHOME/share/postgresql/gp_toolkit.sql >> /tmp/_gp_toolkit_tmp_${CUR_DATE}_$FILE_TIME 2>&1
- RETVAL=$?
- if [ $RETVAL -ne 0 ];then
- ERROR_EXIT "[FATAL]:-Failed to create the hawq_toolkit sql file." 2
- fi
-
- $PSQL -q -f /tmp/_gp_toolkit_tmp_${CUR_DATE}_$FILE_TIME template1 >> $LOG_FILE 2>&1
- RETVAL=$?
- if [ $RETVAL -ne 0 ];then
- ERROR_EXIT "[FATAL]:-Failed to create the hawq_toolkit schema." 2
- fi
-
- $PSQL -q -f /tmp/_gp_toolkit_tmp_${CUR_DATE}_$FILE_TIME postgres >> $LOG_FILE 2>&1
- RETVAL=$?
- if [ $RETVAL -ne 0 ];then
- ERROR_EXIT "[FATAL]:-Failed to create the hawq_toolkit schema." 2
- fi
-
- $RM /tmp/_gp_toolkit_tmp_${CUR_DATE}_$FILE_TIME
-}
-
-SET_DCA_CONFIG_SETTINGS () {
- LOG_MSG "[INFO]:-Setting DCA specific configuration values..." 1
-
- # GPSQL-171 user may set ENV PGDATABASE here. Unset PGDATABASE or gpconfig may encounter some problems.
- unset PGDATABASE
-
- LOG_MSG "[INFO]:-$GPCONFIG -c $DCA_RESQUEUE_PRIORITY_NAME -v $DCA_SEGMENT_RESQUEUE_PRIORITY_VAL -m $DCA_MASTER_RESQUEUE_PRIORITY_VAL" 1
- $GPCONFIG -c $DCA_RESQUEUE_PRIORITY_NAME -v $DCA_SEGMENT_RESQUEUE_PRIORITY_VAL -m $DCA_MASTER_RESQUEUE_PRIORITY_VAL
- RETVAL=$?
- if [ $RETVAL -ne 0 ]; then
- LOG_MSG "[WARN]:-Failed to set value for $DCA_RESQUEUE_PRIORITY_NAME" 1
- fi
-
- LOG_MSG "[INFO]:-$GPCONFIG -c $DCA_RESQUEUE_PRIORITY_CPUCORES_PER_SEGMENT_NAME -v $DCA_SEGMENT_RESQUEUE_PRIORITY_CPUCORES_PER_SEGMENT_VAL -m $DCA_MASTER_RESQUEUE_PRIORITY_CPUCORES_PER_SEGMENT_VAL" 1
- $GPCONFIG -c $DCA_RESQUEUE_PRIORITY_CPUCORES_PER_SEGMENT_NAME -v $DCA_SEGMENT_RESQUEUE_PRIORITY_CPUCORES_PER_SEGMENT_VAL -m $DCA_MASTER_RESQUEUE_PRIORITY_CPUCORES_PER_SEGMENT_VAL
- RETVAL=$?
- if [ $RETVAL -ne 0 ]; then
- LOG_MSG "[WARN]:-Failed to set value for $DCA_RESQUEUE_PRIORITY_CPUCORES_PER_SEGMENT_NAME" 1
- fi
-
- LOG_MSG "[INFO]:-$GPCONFIG -c $DCA_RESQUEUE_PRIORITY_SWEEPER_INTERVAL_NAME -v $DCA_SEGMENT_RESQUEUE_PRIORITY_SWEEPER_INTERVAL_VAL -m $DCA_MASTER_RESQUEUE_PRIORITY_SWEEPER_INTERVAL_VAL" 1
- $GPCONFIG -c $DCA_RESQUEUE_PRIORITY_SWEEPER_INTERVAL_NAME -v $DCA_SEGMENT_RESQUEUE_PRIORITY_SWEEPER_INTERVAL_VAL -m $DCA_MASTER_RESQUEUE_PRIORITY_SWEEPER_INTERVAL_VAL
- RETVAL=$?
- if [ $RETVAL -ne 0 ]; then
- LOG_MSG "[WARN]:-Failed to set value for $DCA_RESQUEUE_PRIORITY_SWEEPER_INTERVAL_NAME" 1
- fi
-}
-
-#******************************************************************************
-# Main Section
-#******************************************************************************
-trap 'ERROR_EXIT "[FATAL]:-Received INT or TERM signal" 2' INT
<TRUNCATED>