You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hawq.apache.org by zt...@apache.org on 2021/07/30 03:48:00 UTC

[hawq] branch taoz updated (c0adf9f -> a89e10a)

This is an automated email from the ASF dual-hosted git repository.

ztao1987 pushed a change to branch taoz
in repository https://gitbox.apache.org/repos/asf/hawq.git.


 discard c0adf9f  HAWQ-1760. datalocality information should be showed by default
 discard 2c7fe33  avoid pxf distclean when doing 'make distclean'
    omit cbd649e  HAWQ-1758. Add debug info for udf
    omit 9012518  fix proc wait queue disorder in deadlock scenario
    omit e16eaf4  fix deadlock error info
    omit 2a050a1  Fix bug that deadlock cannot be resolved
    omit 8afca8b  Enable LOCK TABLE command and add case for it
    omit 9c7ca3c  HAWQ-1730. Fix missing data when inserting into external table select from internal table using gpfdist
    omit 78b1062  Fix bug in refineCachedPlan and SPI_cursor_open
    omit aa13949  Fix link for tomcat v7 download
    omit 1d755ec  enable cleaning up idle cached executors when guc gp_vmem_idle_resource_timeout specificed timer expires
    omit 33b5f13  HAWQ-1722. Fix core dump due to lock is not released before reporting errors when exceeding MaxAORelSegFileStatus
    omit 1bf9530  refresh gpfdist server/client code
    omit bd6196f  HAWQ-1647. Update HAWQ version to 2.4.0.0
     add 67b64bc  HAWQ-1645. Remove autogeneration of version number from pxf/build.gradle
     add d85cb5d  Revert "HAWQ-1645. Remove autogeneration of version number from pxf/build.gradle"
     add 6cd1cf3  HAWQ-1646. Fixes travis CI issues
     add 7ccbdc0  HAWQ-1645. Use the latest available version of gradle (4.9)
     add 80142f4  HAWQ-1649. Add parallel true and daemon to gradle properties
     add 4f12950  HAWQ-1644. Make delegation token optional in PXF
     add 9f33d8d  HAWQ-1650. Fix compilation issue in Java 7
     add 48ff52c  HAWQ-1628. Add hdfs protocol for pluggable storage framework
     add e6b7021  HAWQ-1628. Fix a bug of cassert mode
     add 5d6afbe  HAWQ-1652. Update PXF keytab path to use PXF_HOME
     add 0a271bc  HAWQ-1655. Fix doc and makefile issue for hawq-docker
     add da823d7  HAWQ-1651. Fix some bugs in bulid and unittest.
     add 6f3337c  HAWQ-1656. Fix build issues of Bison dependency.
     add ae2af74  HAWQ-1629. Add ORC format using pluggable storage framework.
     add a12ed20  Fix implicit AND expression addition
     add 24e36c9  Fix handling of implicit AND expressions
     add a741655  HAWQ-1647. Update HAWQ version to 2.4.0.0
     add 472fa2b  HAWQ-1605. Support INSERT in PXF JDBC plugin
     add c6c7988  HAWQ-1617. Fix function calls from unit tests
     add 75977f8  HAWQ-1658. Easy Hawq and PXF initialization with docker.
     add 37c7a22  HAWQ-1671. Add compile support for orc formatter and wrapper 	Changed Makefile in contrib/orc 	Add folder format and cwrapper for split compilation 	Add dummy code for testing the Makefile
     add 78c8431  HAWQ-1674. Add support for compiling CPP projects in orc_format. 	add cwrapper.cpp to generate interface code for C/C++ link. 	changed Makefile to support cpp compiling.
     add 4978898  Update PXF README to include profiling info
     add b56084d  HAWQ-1677. Disable orc compilation until it is available
     add 7cd9f06  HAWQ-1678. Add missing header file for libyarn client
     add 86fbd2e  HAWQ-1679. Fix typo of orc in contrib
     add 7a1358b  HAWQ-1679. Remove unnecessary error in hdfsprotocol_validate function
     add 7936ceb  HAWQ-1679. Fix travis failure due to missing apache license information
     add 7770727  HAWQ-1681. Support manage user in cloud
     add 621b775  HAWQ-1682. Remove useless log for cloud authentication
     add c72a3f5  HAWQ-1683. Using DEBUG3 instead INFO log in cloudrest.c
     add 188f6a1  HAWQ-1684. Add connection close in header for cloud rest
     add 6096055  HAWQ-1685. Fix bug in CheckUserExistOnCloud
     add 63d8935  HAWQ-1688. Fix apache license header for cloud support
     add aea301f  HAWQ-1687. Fix travis broken due to missing apr-1-config
     add 304d8d5  update hawq help
     add e696bbd  HAWQ-1689. Add test for cloud environment support
     add 39d8eb5  HAWQ-1690. Add common utility used by ORC foramt
     add 9701636  HAWQ-1691. Add universal plan used by ORC format
     add 46cd31b  HAWQ-1692. Add the ORC format implementation
     add c8d6571  apache hawq make dbcommon univplan storage
     add bc2edae  HAWQ-1694. enable dbcommon,univplan,storage ut
     add f4f9794  HAWQ-1694. fix dbcommon,univplan,storage ut couldn't run issue
     add 7f4f99a  HAWQ-1697. fix bug of window func unsafe push down
     add 57d53ac  HAWQ-1699. modify json's path in hawq build
     add ede551e  HAWQ-1698. fix univplan cannot be build in linux bug
     add 0f6ed94  HAWQ-1700. fix hawq cannot be build in linux bug
     add 35cabab  HAWQ-1701. enable feature test in apache hawq
     add 6bd5b2c  HAWQ-1702. modify .ans file in hawq feature-test
     add ad23e03  HAWQ-1703. delete legacy orc in hawq
     add d9a45b4  HAWQ-1704. Add ORC protocol validators in hawq
     add 6ac3bd2  HAWQ-1705. fix orc build bug
     add 4ad84ff  HAWQ-1706. enable orc build in makefile
     add e0a5d7b  HAWQ-1708. Add ORC writer in hawq
     add edb727e  HAWQ-1707. install dbcommon,univplan,storage once they are made
     add 437047a  HAWQ-1709. Add ORC reader interface in hawq
     add b5fdb47  HAWQ-1695. Optimize hawq compilation structure
     add f62d3f4  HAWQ-1710. Add ORC reader implement in hawq
     add 4809a1c  HAWQ-1715. bump hawq version to 2.5 in contrib/hawq-ambari-plugin/build.properties for Apache HAWQ 2.5.0
     add 9247b8c  HAWQ-1714. bump hawq version to 2.5 in pom.xml for Apache HAWQ 2.5.0
     add 74b24a3  HAWQ-1712. enable apache hawq build in docker
     add cb059df  HAWQ-1716. enable feature-test in docker
     add 39cfe43  HAWQ-1717. Fix incorrect seeding for random function
     add fa3d1b0  HAWQ-1719. Reopen unittest in hawq
     add 296aeb6  HAWQ-1718. Disable agg function unittest in dbcommon
     add c7e03d4  HAWQ-1713. Disable TestMbConverter.Canonical in hawq
     add 79869d6  HAWQ-1711. Add feature test for ORC format
     add b935042  HAWQ-1720. support create table in orc format
     add 8404363  HAWQ-1722. Fix core dump due to lock is not released before reporting errors when exceeding MaxAORelSegFileStatus
     add 861affd  HAWQ-1724. Fix download link for tomcat 7
     add 0d8e82e  HAWQ-1725 install cogapp in Dockerfile
     add 5c027fe  HAWQ-1726. install some necessary software in Dockerfile
     add 6630c54  HAWQ-1729.fix memory wrong usage
     add af1f432  HAWQ-1728. fix dependency path hard code
     add a0990cc  HAWQ-1731. fix orc table create information bug
     add 4732b4e  HAWQ-1732. Make writable table can read
     add 30afd65  HAWQ-1733.Resolve select issue in external table of orc
     add 33ddcf7  HAWQ-1734. Resolve insert issue in external table of orc
     add d95647a  HAWQ-1735. enable orc feature test
     add 7b2376b  HAWQ-1737. set with_orc's default value as yes to build orc automatically
     add 7bf7113  HAWQ-1739. remove old version protobuf in Dockerfile
     add 45434cd  HAWQ-1736.Fix bug timestamp orc select
     add 5ec4383  HAWQ-1740. fix build bug when build in ci
     add 9dab748  HAWQ-1742. include *.h in dbcommon in src's makefile
     add 9090185  HAWQ-1743. Fix download link for tomcat 7
     add 3ab78a8  HAWQ-1744. fix lib jvm not found in docker bug
     add e19ffa3  HAWQ-1745. include *.h in storage in contrib's makefile
     add f630d30  HAWQ-1746. fix make install bug when building hawq rpm
     add d155b5b  HAWQ-1747. Change pip downloading
     add e16c76e  HAWQ-1748. Idle executors keep exist instead of quit even when gp_vmem_idle_resource_timeout specified timer expires
     add c1fd0b5  HAWQ-1749. Add some license and change pom.xml
     add 4156a87  HAWQ-1750. Fix link of apr in travis
     add 154d290  HAWQ-1751. Fix Travis CI build
     add 6dcfe6e  HAWQ-1752. Remove tracked Makefile.global
     add eb9133e  HAWQ-1752. Change .travis
     add c4da083  HAWQ-1753. location number of external table should less than bucketnum of target table
     add da2623e  HAWQ-1754. add comment for max segment number count for random table.
     add f2e0482  HAWQ-2742. add comment for enforce virtual segment number.
     add 202bc0b  HAWQ-1756. add comment for struct TargetSegmentIDMap.
     add 5700463  HAWQ-1758. Add debug info for udf
     add 6c72703  HAWQ-1760. datalocality information should be showed by default
     add 67be67a  HAWQ-1764. fix udf debug core and improve timing accuracy
     add 7dee8e5  HAWQ-1765. fix compile issue
     add bc7108d  HAWQ-1766. fix get hdfs file blocklocations
     add 00c1268  HAWQ-1767. Add TypeKind for aggregate function intermediate output
     add 395d823   HAWQ-1767. fix wrong compile for enum type
     add cd88f8c  HAWQ-1767. do not install in compile all command
     add d7bd21b  HAWQ-1767. Fix ORC DECIMAL writer
     add e9718a0  HAWQ-1767. enhance orc writer and reader            clean magma type
     add ee41386  HAWQ-1767. truncate zero to no digits for old executor
     add dd2d3ad  HAWQ-1767. enable new insert in univplan
     add dc95081  HAWQ-1767. refactor decimalToFloat and floatToDecimal function
     add b3f60b3  HAWQ-1769. Fix DirectoryIterator of libhdfs3
     add 7429ece  HAWQ-1767. Use list instead of deque for waiters in LockState
     add 05b54e1  HAWQ-1767. Adjust getMemUsed() to exact value
     add 1844586  HAWQ-1767. Refactor FlatMemBuf memory allocate strategy
     add 0959053  HAWQ-1770. fix memUse bug when reserve new block
     add 86c7efb  HAWQ-1770. Fix bug of lock
     add 6b045e7  HAWQ-1771. Add newQE DOUBLE_TO_TIMESTAMP function
     add b7f085d  HAWQ-1771. add TRANSLATE function and set KMP_LIMIT = 30
     add daf1b87  HAWQ-1775. refacte bpchar implementation and test
     add 3dddfff  HAWQ-1774. fix coredump when insert nan to decimal
     add 449f046  HAWQ-1775. set scalar initial value
     add 7c26e79  HAWQ-1774. refactor decimalToFloat and floatToDecimal function
     add feba0b1  HAWQ-1775. Add group agg
     add f53c738  HAWQ-1775. Regulate FlatMemBuf::getMemUsed
     add dec8fbf  HAWQ-1775. Add Vector/TupleBatch/ExprContext empty()
     add 2d4af05  HAWQ-1775. Refact tuple batch
     add 4958d4d  HAWQ-1772. add bool_text function
     add 5843970  HAWQ-1774. fix potential issue in DecimalVector::append
     add f9f3cc3  HAWQ-1772. Add TEXT_TO_DECIMAL && TO_NUMBER function
     add d242113  HAWQ-1772. Add INTERVAL_TO_TEXT function
     add ea93d12  HAWQ-1772. Add new functions
     add dce1e1f  HAWQ-1772. Add BOOL_TO_BYTEA function
     add 02c093e  HAWQ-1772. ADD time_text function
     add 70dfb85  HAWQ-1772. ADD NUMERIC[]
     add 82faba0  HAWQ-1772. ADD timestamptz_text
     add 6712e27  HAWQ-1773. Add type check for orc file
     add 6cb439a  HAWQ-1775. update some univplan index function to new name
     add 6f40de6  HAWQ-1773. fix orc type check for null orc file
     add 29c70e6  HAWQ-1772. fix typecast match issues
     add a8c0940  HAWQ-1779. Add GitHub Action for building on macOS
     add 4cbd805  HAWQ-1780. Add GitHub Action Step to Test against Running Instance
     add de10f23  HAWQ-1781. Add Github Action Sanity Test
     add aa1919b  HAWQ-1782. Fix failed to read EXTERNAL TABLE of GPFDIST protocol
     add 117bbfe  HAWQ-1784. Fix TestCreateTable depends on GUC setting
     add 2a80d79  HAWQ-1785. Fix HDFS metadata mismatch in GitHub Action
     add 89ed32f  HAWQ-1783. Add GitHub Action Workflow for Build on Linux
     add c5d2edb  HAWQ-1787. Refactor notice in pre-built toolchain
     add 5ca27b0  HAWQ-1787. Fix travis-CI
     add 74ce659  HAWQ-1788. set default value for dfs.domain.socket.path
     add 10cff83  HAWQ-1789. Make GitHub Workflow init script idempotent
     new a89e10a  HAWQ-1799. Init HAWQ 3.0.0.0 repo

This update added new revisions after undoing existing revisions.
That is to say, some revisions that were in the old version of the
branch are not in the new version.  This situation occurs
when a user --force pushes a change and generates a repository
containing something like this:

 * -- * -- B -- O -- O -- O   (c0adf9f)
            \
             N -- N -- N   refs/heads/taoz (a89e10a)

You should already have received notification emails for all of the O
revisions, and so the following emails describe only the N revisions
from the common base, B.

Any revisions marked "omit" are not gone; other references still
refer to them.  Any revisions marked "discard" are gone forever.

The 1 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .github/workflows/build.yml                        |   135 +
 .github/workflows/scripts/download/.gitignore      |     1 +
 .github/workflows/scripts/gtest_filter_negative    |    33 +
 .github/workflows/scripts/init_hawq.sh             |    73 +
 .github/workflows/scripts/init_hdfs.sh             |    70 +
 .github/workflows/scripts/init_linux.sh            |    65 +
 .github/workflows/scripts/init_macos.sh            |    53 +
 .github/workflows/scripts/toolchain.sh             |   106 +
 .travis.yml                                        |    58 +-
 CMakeLists.txt                                     |   164 +
 GNUmakefile.in                                     |    48 +-
 LICENSE                                            |     4 +-
 NOTICE                                             |     2 +-
 README.md                                          |   301 +-
 commit-msg                                         |    25 +
 config/tomcat.m4                                   |     2 +
 configure                                          |   396 +-
 configure.in                                       |    56 +-
 contrib/Makefile                                   |     8 +-
 contrib/extfmtcsv/Makefile                         |    15 +
 contrib/extfmtcsv/extfmtcsv.c                      |   676 +
 contrib/exthdfs/Makefile                           |    13 +
 contrib/exthdfs/common.h                           |    18 +
 contrib/exthdfs/exthdfs.c                          |   529 +
 contrib/exthive/Makefile                           |    13 +
 contrib/exthive/common.h                           |    18 +
 contrib/exthive/exthive.c                          |   493 +
 contrib/formatter_fixedwidth/fixedwidth.c          |     4 +-
 contrib/hawq-ambari-plugin/README.md               |     4 +-
 contrib/hawq-ambari-plugin/build.properties        |     4 +-
 contrib/hawq-ambari-plugin/pom.xml                 |     6 +-
 .../src/main/resources/utils/add-hawq.py           |     4 +-
 contrib/hawq-docker/README.md                      |     6 +-
 .../centos7-docker/hawq-test/service-hawq.sh       |   105 +
 .../centos7-docker/hawq-test/service-pxf.sh        |   120 +
 contrib/hawq-hadoop/Makefile                       |     4 -
 contrib/hawq-package/README                        |     6 +-
 contrib/hawq-package/build_hawq_rpm.sh             |     2 +-
 contrib/hawq-package/hawq.spec                     |    10 +-
 contrib/hawq-package/make_rpm_tarball.sh           |     6 +-
 contrib/magma/Makefile                             |    17 +
 contrib/magma/magma.c                              |  3885 +++
 contrib/magma/magma_install.sql                    |   217 +
 contrib/magma/monitor_install.sql                  |    77 +
 contrib/orc/Makefile                               |    15 +
 contrib/orc/README                                 |    84 +
 contrib/orc/hive_install.sql                       |    15 +
 contrib/orc/orc.c                                  |  2136 ++
 contrib/orc/orc_init.sql                           |    60 +
 contrib/orc/orc_install.sql                        |    85 +
 contrib/orc/orc_uninstall.sql                      |    36 +
 contrib/oushu/load_orc_debug_udf.sql               |   180 +
 contrib/oushu/orc_debug_metadata.py                |    10 +
 contrib/oushu/orc_debug_statistics.py              |    73 +
 contrib/pgcrypto/px-crypt.c                        |     2 +-
 coverage-report.sh                                 |   298 +
 depends/dbcommon/.gitignore                        |     9 +
 .../dbcommon/CMake/CMakeTestCompileInt64tType.cc   |    30 +
 depends/dbcommon/CMake/FindCogapp.cmake            |    54 +
 depends/dbcommon/CMake/FindGFlags.cmake            |    48 +
 depends/dbcommon/CMake/FindGlog.cmake              |    49 +
 depends/dbcommon/CMake/FindJSON.cmake              |    38 +
 depends/dbcommon/CMake/FindSnappy.cmake            |    30 +
 .../{libyarn => dbcommon}/CMake/Functions.cmake    |     0
 depends/dbcommon/CMake/Options.cmake               |    59 +
 depends/dbcommon/CMake/Platform.cmake              |    47 +
 depends/dbcommon/CMakeLists.txt                    |    31 +
 depends/dbcommon/Makefile                          |    79 +
 depends/dbcommon/Makefile.global.in                |    40 +
 depends/dbcommon/README                            |    14 +
 depends/dbcommon/bootstrap                         |   141 +
 depends/dbcommon/src/CMakeLists.txt                |   156 +
 .../src/dbcommon/checksum/checksum-util.cc         |    42 +
 .../dbcommon/src/dbcommon/checksum/checksum-util.h |    55 +
 depends/dbcommon/src/dbcommon/checksum/checksum.h  |    61 +
 .../dbcommon/src/dbcommon/checksum/hw-crc32c.cc    |   160 +
 depends/dbcommon/src/dbcommon/checksum/hw-crc32c.h |    69 +
 .../dbcommon/src/dbcommon/checksum/sw-crc32c.cc    |    94 +
 depends/dbcommon/src/dbcommon/checksum/sw-crc32c.h |    45 +
 .../src/dbcommon/common/node-deserializer.cc       |    33 +
 .../src/dbcommon/common/node-deserializer.h        |    75 +
 .../dbcommon/src/dbcommon/common/node-serializer.h |    51 +
 .../src/dbcommon/common/tuple-batch-store.cc       |   108 +
 .../src/dbcommon/common/tuple-batch-store.h        |    82 +
 .../dbcommon/src/dbcommon/common/tuple-batch.cc    |   903 +
 depends/dbcommon/src/dbcommon/common/tuple-batch.h |   335 +
 depends/dbcommon/src/dbcommon/common/tuple-desc.cc |   129 +
 depends/dbcommon/src/dbcommon/common/tuple-desc.h  |    72 +
 .../src/dbcommon/common/vector-transformer.h       |   311 +
 depends/dbcommon/src/dbcommon/common/vector.cc     |   268 +
 depends/dbcommon/src/dbcommon/common/vector.h      |   766 +
 .../src/dbcommon/common/vector/decimal-vector.cc   |   623 +
 .../src/dbcommon/common/vector/decimal-vector.h    |   215 +
 .../dbcommon/common/vector/fixed-length-vector.h   |   702 +
 .../src/dbcommon/common/vector/interval-vector.cc  |   553 +
 .../src/dbcommon/common/vector/interval-vector.h   |   166 +
 .../src/dbcommon/common/vector/list-vector.h       |   621 +
 .../src/dbcommon/common/vector/struct-vector.h     |   229 +
 .../src/dbcommon/common/vector/timestamp-vector.h  |   698 +
 .../common/vector/variable-length-vector.cc        |    83 +
 .../common/vector/variable-length-vector.h         |  1174 +
 .../src/dbcommon/filesystem/file-system-manager.cc |   112 +
 .../src/dbcommon/filesystem/file-system-manager.h  |    72 +
 .../dbcommon/src/dbcommon/filesystem/file-system.h |   226 +
 .../dbcommon/filesystem/hdfs/hdfs-file-system.cc   |   279 +
 .../dbcommon/filesystem/hdfs/hdfs-file-system.h    |   160 +
 .../src/dbcommon/filesystem/hive/fb303.thrift      |   113 +
 .../dbcommon/filesystem/hive/hive-file-system.cc   |    58 +
 .../dbcommon/filesystem/hive/hive-file-system.h    |    36 +
 .../dbcommon/filesystem/hive/hive_metastore.thrift |  1538 +
 .../dbcommon/filesystem/local/local-file-system.cc |   499 +
 .../dbcommon/filesystem/local/local-file-system.h  |   191 +
 depends/dbcommon/src/dbcommon/function/CPPLINT.cfg |     1 +
 depends/dbcommon/src/dbcommon/function/README      |     2 +
 depends/dbcommon/src/dbcommon/function/agg-func.cc |  2367 ++
 depends/dbcommon/src/dbcommon/function/agg-func.h  |   323 +
 .../src/dbcommon/function/arith-cmp-func.h         |   133 +
 .../dbcommon/src/dbcommon/function/arith-func.cc   |    86 +
 .../src/dbcommon/function/arithmetic-function.h    |   353 +
 .../src/dbcommon/function/array-function.cc        |  1021 +
 .../src/dbcommon/function/array-function.h         |    48 +
 .../src/dbcommon/function/binary-function.cc       |   299 +
 depends/dbcommon/src/dbcommon/function/cmp-func.cc |   537 +
 .../src/dbcommon/function/comparison-function.h    |   170 +
 .../src/dbcommon/function/date-function.cc         |  1292 +
 .../dbcommon/src/dbcommon/function/date-function.h |   104 +
 .../src/dbcommon/function/decimal-function.cc      |   952 +
 .../src/dbcommon/function/decimal-function.h       |   110 +
 depends/dbcommon/src/dbcommon/function/func-kind.h |   376 +
 depends/dbcommon/src/dbcommon/function/func.cc     |   605 +
 depends/dbcommon/src/dbcommon/function/func.h      |    91 +
 depends/dbcommon/src/dbcommon/function/function.h  |   719 +
 depends/dbcommon/src/dbcommon/function/invoker.cc  |    48 +
 depends/dbcommon/src/dbcommon/function/invoker.h   |    65 +
 .../src/dbcommon/function/mathematical-function.cc |   306 +
 .../src/dbcommon/function/mathematical-function.h  |   543 +
 .../src/dbcommon/function/string-binary-function.h |    68 +
 .../src/dbcommon/function/string-function.cc       |  1053 +
 .../src/dbcommon/function/typecast-func.cc         |   722 +
 .../dbcommon/src/dbcommon/function/typecast-func.h |    87 +
 .../src/dbcommon/function/typecast-function.cc     |   578 +
 .../src/dbcommon/function/typecast-function.h      |    45 +
 .../dbcommon/function/typecast-texttonum-func.cc   |  1027 +
 .../dbcommon/function/typecast-texttonum-func.h    |    53 +
 .../src/dbcommon/function/volatile-func.cc         |    56 +
 .../dbcommon/src/dbcommon/function/volatile-func.h |    30 +
 depends/dbcommon/src/dbcommon/hash/cdb-hash.h      |    95 +
 depends/dbcommon/src/dbcommon/hash/fast-hash.h     |   267 +
 depends/dbcommon/src/dbcommon/hash/hash-keys.cc    |    62 +
 depends/dbcommon/src/dbcommon/hash/hash-keys.h     |   357 +
 .../src/dbcommon/hash/native-hash-table.cc         |    97 +
 .../dbcommon/src/dbcommon/hash/native-hash-table.h |   472 +
 .../src/dbcommon/hash/tuple-batch-hasher.h         |   168 +
 depends/dbcommon/src/dbcommon/log/debug-logger.cc  |    83 +
 depends/dbcommon/src/dbcommon/log/debug-logger.h   |   120 +
 depends/dbcommon/src/dbcommon/log/error-code.h     |   444 +
 depends/dbcommon/src/dbcommon/log/exception.h      |    54 +
 depends/dbcommon/src/dbcommon/log/logger.cc        |    47 +
 depends/dbcommon/src/dbcommon/log/logger.h         |   163 +
 depends/dbcommon/src/dbcommon/log/stack-printer.cc |   663 +
 depends/dbcommon/src/dbcommon/log/stack-printer.h  |    35 +
 .../dbcommon/network/socket-tcp-message-client.cc  |   124 +
 .../dbcommon/network/socket-tcp-message-client.h   |    45 +
 .../dbcommon/network/socket-tcp-message-common.cc  |   307 +
 .../dbcommon/network/socket-tcp-message-common.h   |   197 +
 .../dbcommon/network/socket-tcp-message-server.cc  |   193 +
 .../dbcommon/network/socket-tcp-message-server.h   |    90 +
 .../network/socket-tcp-message-serverhandler.h     |    58 +
 .../dbcommon/src/dbcommon/network/socket-tcp.cc    |   103 +
 depends/dbcommon/src/dbcommon/network/socket-tcp.h |    35 +
 depends/dbcommon/src/dbcommon/nodes/datum.cc       |    65 +
 depends/dbcommon/src/dbcommon/nodes/datum.h        |   485 +
 depends/dbcommon/src/dbcommon/nodes/scalar.cc      |    46 +
 depends/dbcommon/src/dbcommon/nodes/scalar.h       |    88 +
 depends/dbcommon/src/dbcommon/nodes/select-list.cc |   175 +
 depends/dbcommon/src/dbcommon/nodes/select-list.h  |   170 +
 .../dbcommon/src/dbcommon/python/code_generator.py |   225 +
 .../src/dbcommon/testutil/agg-func-utils.h         |   275 +
 .../src/dbcommon/testutil/function-utils.cc        |   481 +
 .../src/dbcommon/testutil/function-utils.h         |   225 +
 .../dbcommon/src/dbcommon/testutil/scalar-utils.h  |    62 +
 .../src/dbcommon/testutil/tuple-batch-utils.h      |   302 +
 .../dbcommon/src/dbcommon/testutil/vector-utils.h  |   336 +
 .../src/dbcommon/thread/err-detect-callback.h      |   133 +
 .../dbcommon/src/dbcommon/thread/thread-base.cc    |    36 +
 depends/dbcommon/src/dbcommon/thread/thread-base.h |   172 +
 depends/dbcommon/src/dbcommon/type/CPPLINT.cfg     |     1 +
 depends/dbcommon/src/dbcommon/type/README          |     2 +
 depends/dbcommon/src/dbcommon/type/array.cc        |   209 +
 depends/dbcommon/src/dbcommon/type/array.h         |   141 +
 depends/dbcommon/src/dbcommon/type/bool.cc         |    62 +
 depends/dbcommon/src/dbcommon/type/bool.h          |    63 +
 depends/dbcommon/src/dbcommon/type/date.cc         |   420 +
 depends/dbcommon/src/dbcommon/type/date.h          |   926 +
 depends/dbcommon/src/dbcommon/type/decimal.cc      |   665 +
 depends/dbcommon/src/dbcommon/type/decimal.h       |   638 +
 depends/dbcommon/src/dbcommon/type/float.h         |   137 +
 depends/dbcommon/src/dbcommon/type/integer.h       |   143 +
 depends/dbcommon/src/dbcommon/type/interval.h      |   261 +
 depends/dbcommon/src/dbcommon/type/magma-tid.h     |   105 +
 depends/dbcommon/src/dbcommon/type/type-kind.h     |   121 +
 depends/dbcommon/src/dbcommon/type/type-modifier.h |    66 +
 depends/dbcommon/src/dbcommon/type/type-util.cc    |   227 +
 depends/dbcommon/src/dbcommon/type/type-util.h     |   137 +
 depends/dbcommon/src/dbcommon/type/typebase.cc     |   178 +
 depends/dbcommon/src/dbcommon/type/typebase.h      |   280 +
 depends/dbcommon/src/dbcommon/type/varlen.cc       |   182 +
 depends/dbcommon/src/dbcommon/type/varlen.h        |   150 +
 depends/dbcommon/src/dbcommon/utils/async-queue.h  |   340 +
 .../src/dbcommon/utils/block-memory-buffer.h       |   181 +
 depends/dbcommon/src/dbcommon/utils/bool-buffer.cc |   351 +
 depends/dbcommon/src/dbcommon/utils/bool-buffer.h  |   252 +
 depends/dbcommon/src/dbcommon/utils/byte-buffer.h  |   267 +
 .../dbcommon/src/dbcommon/utils/comp/compressor.h  |    64 +
 .../src/dbcommon/utils/comp/lz4-compressor.cc      |    94 +
 .../src/dbcommon/utils/comp/lz4-compressor.h       |    51 +
 .../src/dbcommon/utils/comp/snappy-compressor.cc   |    28 +
 .../src/dbcommon/utils/comp/snappy-compressor.h    |    90 +
 .../src/dbcommon/utils/comp/zlib-compressor.cc     |    81 +
 .../src/dbcommon/utils/comp/zlib-compressor.h      |    57 +
 depends/dbcommon/src/dbcommon/utils/cutils.cc      |    71 +
 depends/dbcommon/src/dbcommon/utils/cutils.h       |    63 +
 depends/dbcommon/src/dbcommon/utils/file-info.h    |    41 +
 .../src/dbcommon/utils/flat-memory-buffer.h        |   274 +
 depends/dbcommon/src/dbcommon/utils/global.cc      |    23 +
 depends/dbcommon/src/dbcommon/utils/global.h       |    29 +
 depends/dbcommon/src/dbcommon/utils/instrument.cc  |    80 +
 depends/dbcommon/src/dbcommon/utils/instrument.h   |    66 +
 depends/dbcommon/src/dbcommon/utils/int-util.h     |   357 +
 .../src/dbcommon/utils/join-tuple-buffer.cc        |   488 +
 .../src/dbcommon/utils/join-tuple-buffer.h         |   130 +
 depends/dbcommon/src/dbcommon/utils/lock.cc        |   186 +
 depends/dbcommon/src/dbcommon/utils/lock.h         |   121 +
 depends/dbcommon/src/dbcommon/utils/macro.h        |    44 +
 .../dbcommon/src/dbcommon/utils/mb/mb-converter.cc |   246 +
 .../dbcommon/src/dbcommon/utils/mb/mb-converter.h  |   116 +
 depends/dbcommon/src/dbcommon/utils/memory-pool.cc |    28 +
 depends/dbcommon/src/dbcommon/utils/memory-pool.h  |   206 +
 depends/dbcommon/src/dbcommon/utils/net-client.h   |    46 +
 .../dbcommon/src/dbcommon/utils/object-counter.cc  |    35 +
 .../dbcommon/src/dbcommon/utils/object-counter.h   |    54 +
 depends/dbcommon/src/dbcommon/utils/parameters.h   |   130 +
 depends/dbcommon/src/dbcommon/utils/string-util.cc |   231 +
 depends/dbcommon/src/dbcommon/utils/string-util.h  |   277 +
 depends/dbcommon/src/dbcommon/utils/sys-info.h     |    51 +
 depends/dbcommon/src/dbcommon/utils/time-util.h    |    62 +
 .../dbcommon/src/dbcommon/utils/timezone-util.cc   |   458 +
 .../dbcommon/src/dbcommon/utils/timezone-util.h    |   527 +
 depends/dbcommon/src/dbcommon/utils/url.cc         |   125 +
 depends/dbcommon/src/dbcommon/utils/url.h          |    89 +
 depends/dbcommon/test/CMakeLists.txt               |    37 +
 .../{libyarn => dbcommon}/test/data/checksum1.in   |     0
 .../{libyarn => dbcommon}/test/data/checksum2.in   |     0
 .../dbcommon/test/parallel/parallel-launcher.py    |   153 +
 depends/dbcommon/test/unit/CMakeLists.txt          |    30 +
 .../dbcommon/test/unit/checksum/test-checksum.cc   |   134 +
 .../dbcommon/test/unit/common/test-async-queue.cc  |    82 +
 depends/dbcommon/test/unit/common/test-function.cc |    54 +
 .../unit/common/test-tuple-batch-copy-control.cc   |    69 +
 .../test/unit/common/test-tuple-batch-store.cc     |    55 +
 .../dbcommon/test/unit/common/test-tuple-batch.cc  |   684 +
 .../test/unit/common/test-vector-copy-control.cc   |   624 +
 depends/dbcommon/test/unit/common/test-vector.cc   |   731 +
 .../test/unit/filesystem/test-file-system.cc       |    93 +
 depends/dbcommon/test/unit/function/CPPLINT.cfg    |     1 +
 .../unit/function/test-agg-func-has-no-group-by.cc |   882 +
 .../unit/function/test-agg-func-small-scale.cc     |   710 +
 .../dbcommon/test/unit/function/test-agg-func.cc   |   755 +
 .../dbcommon/test/unit/function/test-array-func.cc |   377 +
 .../test/unit/function/test-binary-cmp-function.cc |   429 +
 .../test/unit/function/test-binary-function.cc     |    41 +
 .../unit/function/test-codegen-arith-functions.cc  |   617 +
 .../unit/function/test-codegen-cmp-functions.cc    |   413 +
 .../function/test-codegen-typecast-functions.cc    |   261 +
 .../test/unit/function/test-date-function.cc       |   449 +
 .../test/unit/function/test-decimal-function.cc    |   420 +
 .../unit/function/test-mathematical-function.cc    |  1039 +
 .../test/unit/function/test-string-cmp-function.cc |   427 +
 .../test/unit/function/test-string-function.cc     |  1735 +
 .../test/unit/function/test-timestamp-function.cc  |   543 +
 .../test/unit/function/test-typecast-function.cc   |   352 +
 .../unit/function/test-typecast-texttonum-func.cc  |   211 +
 .../dbcommon/test/unit/log/test-debug-logger.cc    |    41 +
 .../unit/network/test-socket-tcp-message-comm.cc   |   105 +
 depends/dbcommon/test/unit/nodes/test-datum.cc     |   195 +
 .../dbcommon/test/unit/nodes/test-select-list.cc   |   168 +
 depends/dbcommon/test/unit/test-hash-table.cc      |   231 +
 .../test/unit/thread/test-err-detect-callback.cc   |   120 +
 depends/dbcommon/test/unit/type/test-type.cc       |   454 +
 depends/dbcommon/test/unit/unit-test-main.cc       |    34 +
 .../dbcommon/test/unit/utils/test-bool-buffer.cc   |   272 +
 .../dbcommon/test/unit/utils/test-byte-buffer.cc   |    89 +
 depends/dbcommon/test/unit/utils/test-cutils.cc    |    50 +
 .../test/unit/utils/test-flat-memory-buffer.cc     |    59 +
 depends/dbcommon/test/unit/utils/test-int-util.cc  |    88 +
 .../test/unit/utils/test-join-tuple-buffer.cc      |   236 +
 depends/dbcommon/test/unit/utils/test-lock.cc      |   180 +
 .../dbcommon/test/unit/utils/test-lz4-compress.cc  |    57 +
 .../dbcommon/test/unit/utils/test-mb-converter.cc  |    49 +
 .../dbcommon/test/unit/utils/test-string-util.cc   |    41 +
 depends/dbcommon/test/unit/utils/test-url.cc       |   112 +
 depends/libhdfs3/CMake/FindGoogleTest.cmake        |    12 +-
 depends/libhdfs3/CMakeLists.txt                    |     4 +-
 depends/libhdfs3/src/CMakeLists.txt                |     6 -
 depends/libhdfs3/src/client/FileEncryptionInfo.h   |     2 +-
 depends/libhdfs3/src/client/Hdfs.cpp               |    29 +-
 depends/libhdfs3/src/client/InputStreamImpl.cpp    |    41 +-
 depends/libhdfs3/src/client/InputStreamImpl.h      |    26 -
 depends/libhdfs3/src/client/OutputStreamImpl.cpp   |    63 +-
 depends/libhdfs3/src/client/OutputStreamImpl.h     |    26 -
 depends/libhdfs3/src/client/Permission.cpp         |     5 +-
 depends/libhdfs3/src/client/UserInfo.h             |     4 -
 depends/libhdfs3/src/client/hdfs.h                 |    34 +-
 depends/libhdfs3/src/common/SessionConfig.cpp      |    14 +-
 depends/libhdfs3/src/common/SessionConfig.h        |    34 -
 depends/libhdfs3/src/rpc/RpcChannel.cpp            |    13 +-
 depends/libhdfs3/src/rpc/RpcConfig.h               |    13 +-
 depends/libhdfs3/test/data/function-test.xml       |    15 -
 depends/libhdfs3/test/function/CMakeLists.txt      |     4 -
 depends/libhdfs3/test/function/TestCInterface.cpp  |   776 +-
 .../libhdfs3/test/function/TestOutputStream.cpp    |     2 +-
 depends/libhdfs3/test/unit/CMakeLists.txt          |     4 -
 .../libhdfs3/test/unit/UnitTestOutputStream.cpp    |    65 +-
 depends/libyarn/CMake/FindGoogleTest.cmake         |    12 +-
 depends/libyarn/CMakeLists.txt                     |     2 +
 .../src/libyarnclient/ApplicationClient.cpp        |     4 +-
 .../src/libyarnclient/ApplicationMaster.cpp        |     4 +-
 depends/storage/.gitignore                         |     9 +
 .../storage/CMake/CMakeTestCompileInt64tType.cc    |    30 +
 depends/storage/CMake/FindCogapp.cmake             |    50 +
 depends/storage/CMake/FindGFlags.cmake             |    48 +
 depends/storage/CMake/FindGlog.cmake               |    49 +
 depends/storage/CMake/FindJSON.cmake               |    38 +
 depends/storage/CMake/FindSnappy.cmake             |    30 +
 depends/storage/CMake/FindZLIB.cmake               |    48 +
 depends/{libyarn => storage}/CMake/Functions.cmake |     0
 depends/storage/CMake/Options.cmake                |    59 +
 depends/storage/CMake/Platform.cmake               |    47 +
 depends/storage/CMakeLists.txt                     |    31 +
 depends/storage/Makefile                           |    79 +
 depends/storage/Makefile.global.in                 |    40 +
 depends/storage/README                             |    15 +
 depends/storage/bootstrap                          |   141 +
 depends/storage/src/CMakeLists.txt                 |    85 +
 .../dummy.py => depends/storage/src/storage/README |     0
 depends/storage/src/storage/common/bloom-filter.h  |   189 +
 depends/storage/src/storage/common/string.h        |    95 +
 .../src/storage/cwrapper/hdfs-file-system-c.cc     |   486 +
 .../src/storage/cwrapper/hdfs-file-system-c.h      |   126 +
 .../storage/src/storage/cwrapper/orc-format-c.cc   |   642 +
 .../storage/src/storage/cwrapper/orc-format-c.h    |    81 +
 depends/storage/src/storage/format/format.cc       |    94 +
 depends/storage/src/storage/format/format.h        |   228 +
 depends/storage/src/storage/format/orc/README      |   320 +
 depends/storage/src/storage/format/orc/byte-rle.cc |   476 +
 depends/storage/src/storage/format/orc/byte-rle.h  |   237 +
 .../src/storage/format/orc/column-printer.cc       |   613 +
 .../src/storage/format/orc/column-printer.h        |   280 +
 .../storage/src/storage/format/orc/data-buffer.cc  |    67 +
 .../storage/src/storage/format/orc/data-buffer.h   |    62 +
 .../storage/src/storage/format/orc/exceptions.cc   |    58 +
 .../storage/src/storage/format/orc/exceptions.h    |    51 +
 .../storage/src/storage/format/orc/file-version.h  |    56 +
 .../storage/src/storage/format/orc/input-stream.cc |    40 +
 .../storage/src/storage/format/orc/input-stream.h  |   112 +
 depends/storage/src/storage/format/orc/int128.cc   |   480 +
 depends/storage/src/storage/format/orc/int128.h    |   304 +
 .../src/storage/format/orc/lzo-decompressor.cc     |   396 +
 .../src/storage/format/orc/lzo-decompressor.h      |    35 +
 .../src/storage/format/orc/orc-format-reader.cc    |   434 +
 .../src/storage/format/orc/orc-format-reader.h     |    79 +
 .../src/storage/format/orc/orc-format-writer.cc    |   224 +
 .../src/storage/format/orc/orc-format-writer.h     |    64 +
 .../storage/src/storage/format/orc/orc-format.cc   |   129 +
 .../storage/src/storage/format/orc/orc-format.h    |   113 +
 .../src/storage/format/orc/orc-predicates.cc       |   281 +
 .../src/storage/format/orc/orc-predicates.h        |    71 +
 .../src/storage/format/orc/orc-proto-definition.cc |   221 +
 .../src/storage/format/orc/orc-proto-definition.h  |  1131 +
 .../storage/src/storage/format/orc/orc_proto.proto |   277 +
 .../src/storage/format/orc/output-stream.cc        |    33 +
 .../storage/src/storage/format/orc/output-stream.h |   135 +
 depends/storage/src/storage/format/orc/reader.cc   |  2419 ++
 depends/storage/src/storage/format/orc/reader.h    |  1071 +
 depends/storage/src/storage/format/orc/rle-v0.h    |   137 +
 depends/storage/src/storage/format/orc/rle-v1.h    |   371 +
 depends/storage/src/storage/format/orc/rle-v2.h    |  1768 ++
 depends/storage/src/storage/format/orc/rle.cc      |   139 +
 depends/storage/src/storage/format/orc/rle.h       |   596 +
 .../storage/format/orc/seekable-input-stream.cc    |   624 +
 .../src/storage/format/orc/seekable-input-stream.h |   378 +
 .../storage/format/orc/seekable-output-stream.cc   |    46 +
 .../storage/format/orc/seekable-output-stream.h    |   261 +
 .../src/storage/format/orc/string-dictionary.cc    |    60 +
 .../src/storage/format/orc/string-dictionary.h     |    62 +
 depends/storage/src/storage/format/orc/timezone.cc |   458 +
 depends/storage/src/storage/format/orc/timezone.h  |   502 +
 .../storage/src/storage/format/orc/type-impl.cc    |   507 +
 depends/storage/src/storage/format/orc/type-impl.h |   109 +
 depends/storage/src/storage/format/orc/type.h      |   105 +
 depends/storage/src/storage/format/orc/vector.cc   |   453 +
 depends/storage/src/storage/format/orc/vector.h    |   703 +
 depends/storage/src/storage/format/orc/writer.cc   |   288 +
 depends/storage/src/storage/format/orc/writer.h    |  1516 +
 .../format/orc/writer/binary-column-writer.cc      |   101 +
 .../format/orc/writer/decimal-column-writer.cc     |   307 +
 .../format/orc/writer/string-column-writer.cc      |   232 +
 depends/storage/src/storage/testutil/file-utils.h  |    54 +
 depends/storage/src/storage/testutil/format-util.h |   288 +
 depends/storage/test/CMakeLists.txt                |    35 +
 depends/storage/test/data/hawq-write-orc.sql       |     3 +
 .../storage/test/data/sampledata                   |     0
 depends/storage/test/data/spark-read-orc.sql       |     3 +
 depends/storage/test/parallel/parallel-launcher.py |   153 +
 depends/storage/test/unit/CMakeLists.txt           |    22 +
 .../storage/test/unit/common/test-bloom-filter.cc  |   118 +
 .../test/unit/format/test-filter-pushdown.cc       |   613 +
 .../test/unit/format/test-orc-byte-rle-encoder.cc  |   165 +
 .../storage/test/unit/format/test-orc-byte-rle.cc  |  1445 +
 .../storage/test/unit/format/test-orc-format.cc    |   529 +
 .../storage/test/unit/format/test-orc-int128.cc    |   620 +
 .../test/unit/format/test-orc-proto-definition.cc  |   351 +
 depends/storage/test/unit/format/test-orc-rle.cc   |  2863 ++
 .../storage/test/unit/format/test-orc-vector.cc    |   192 +
 .../test/unit/format/test-string-dictionary.cc     |    74 +
 depends/storage/test/unit/unit-test-main.cc        |    32 +
 depends/univplan/.gitignore                        |     9 +
 .../univplan/CMake/CMakeTestCompileInt64tType.cc   |    30 +
 depends/univplan/CMake/FindCogapp.cmake            |    50 +
 depends/univplan/CMake/FindGFlags.cmake            |    48 +
 depends/univplan/CMake/FindGlog.cmake              |    49 +
 depends/univplan/CMake/FindJSON.cmake              |    38 +
 depends/univplan/CMake/FindSnappy.cmake            |    30 +
 .../{libyarn => univplan}/CMake/Functions.cmake    |     0
 depends/univplan/CMake/Options.cmake               |    59 +
 depends/univplan/CMake/Platform.cmake              |    47 +
 depends/univplan/CMakeLists.txt                    |    31 +
 depends/univplan/Makefile                          |    79 +
 depends/univplan/Makefile.global.in                |    40 +
 .../test/dummy.py => depends/univplan/README       |     0
 depends/univplan/bootstrap                         |   141 +
 depends/univplan/src/CMakeLists.txt                |    77 +
 depends/univplan/src/univplan/common/expression.cc |   744 +
 depends/univplan/src/univplan/common/expression.h  |   352 +
 .../univplan/src/univplan/common/plannode-util.h   |   336 +
 .../univplan/src/univplan/common/plannode-walker.h |   138 +
 depends/univplan/src/univplan/common/stagize.cc    |   212 +
 depends/univplan/src/univplan/common/stagize.h     |    54 +
 depends/univplan/src/univplan/common/statistics.h  |    71 +
 .../univplan/src/univplan/common/subplan-util.cc   |   151 +
 .../univplan/src/univplan/common/subplan-util.h    |    46 +
 .../univplan/src/univplan/common/univplan-type.h   |   148 +
 depends/univplan/src/univplan/common/var-util.cc   |   111 +
 depends/univplan/src/univplan/common/var-util.h    |    40 +
 .../univplan/src/univplan/cwrapper/univplan-c.cc   |  1189 +
 .../univplan/src/univplan/cwrapper/univplan-c.h    |   364 +
 .../src/univplan/minmax/minmax-predicates.cc       |   596 +
 .../src/univplan/minmax/minmax-predicates.h        |   604 +
 .../univplan/proto/universal-plan-catalog.proto    |    64 +
 .../src/univplan/proto/universal-plan-expr.proto   |   216 +
 .../src/univplan/proto/universal-plan.proto        |   430 +
 .../src/univplan/testutil/univplan-proto-util.cc   |   520 +
 .../src/univplan/testutil/univplan-proto-util.h    |   276 +
 .../univplanbuilder/univplanbuilder-agg.cc         |    60 +
 .../univplan/univplanbuilder/univplanbuilder-agg.h |    52 +
 .../univplanbuilder/univplanbuilder-append.cc      |    53 +
 .../univplanbuilder/univplanbuilder-append.h       |    48 +
 .../univplanbuilder/univplanbuilder-column.h       |    54 +
 .../univplanbuilder/univplanbuilder-connector.h    |    98 +
 .../univplanbuilder/univplanbuilder-expr-node.h    |   736 +
 .../univplanbuilder/univplanbuilder-expr-poly.h    |    54 +
 .../univplanbuilder/univplanbuilder-expr-tree.h    |   105 +
 .../univplanbuilder-ext-gs-filter.cc               |    55 +
 .../univplanbuilder-ext-gs-filter.h                |    51 +
 .../univplanbuilder/univplanbuilder-ext-gs-proj.cc |    57 +
 .../univplanbuilder/univplanbuilder-ext-gs-proj.h  |    51 +
 .../univplanbuilder/univplanbuilder-ext-gs-scan.cc |   103 +
 .../univplanbuilder/univplanbuilder-ext-gs-scan.h  |    62 +
 .../univplanbuilder/univplanbuilder-hash.h         |    64 +
 .../univplanbuilder/univplanbuilder-hashjoin.h     |    79 +
 .../univplanbuilder/univplanbuilder-insert.cc      |    69 +
 .../univplanbuilder/univplanbuilder-insert.h       |    52 +
 .../univplanbuilder/univplanbuilder-limit.cc       |    57 +
 .../univplanbuilder/univplanbuilder-limit.h        |    52 +
 .../univplanbuilder/univplanbuilder-listener.h     |    48 +
 .../univplanbuilder/univplanbuilder-material.h     |    94 +
 .../univplanbuilder/univplanbuilder-mergejoin.h    |    74 +
 .../univplanbuilder/univplanbuilder-nestloop.h     |    68 +
 .../univplanbuilder/univplanbuilder-node.cc        |    73 +
 .../univplanbuilder/univplanbuilder-node.h         |    71 +
 .../univplanbuilder/univplanbuilder-paraminfo.h    |    59 +
 .../univplanbuilder-plan-node-poly.h               |    60 +
 .../univplanbuilder/univplanbuilder-plan.cc        |   153 +
 .../univplanbuilder/univplanbuilder-plan.h         |    86 +
 .../univplanbuilder-range-tbl-entry.h              |    52 +
 .../univplanbuilder/univplanbuilder-receiver.h     |    51 +
 .../univplanbuilder/univplanbuilder-result.h       |    67 +
 .../univplanbuilder/univplanbuilder-scan-seq.cc    |    70 +
 .../univplanbuilder/univplanbuilder-scan-seq.h     |    56 +
 .../univplanbuilder/univplanbuilder-scan-task.h    |    94 +
 .../univplanbuilder-shareinput-scan.h              |    70 +
 .../univplanbuilder/univplanbuilder-sink.h         |   252 +
 .../univplanbuilder/univplanbuilder-sort.cc        |    65 +
 .../univplanbuilder/univplanbuilder-sort.h         |    54 +
 .../univplanbuilder-subquery-scan.h                |    67 +
 .../univplanbuilder/univplanbuilder-table.h        |    71 +
 .../univplanbuilder/univplanbuilder-target-entry.h |    67 +
 .../univplanbuilder/univplanbuilder-unique.cc      |    57 +
 .../univplanbuilder/univplanbuilder-unique.h       |    51 +
 .../univplan/univplanbuilder/univplanbuilder.cc    |    92 +
 .../src/univplan/univplanbuilder/univplanbuilder.h |    68 +
 depends/univplan/test/CMakeLists.txt               |    36 +
 depends/univplan/test/data/TestAgg                 |   262 +
 depends/univplan/test/data/TestCompletedPlanAfter  |   851 +
 depends/univplan/test/data/TestCompletedPlanBefore |   630 +
 depends/univplan/test/data/TestLimitCount          |   228 +
 depends/univplan/test/data/TestLimitCountOffset    |   246 +
 depends/univplan/test/data/TestLimitOffset         |   212 +
 depends/univplan/test/data/TestNullTest            |   136 +
 depends/univplan/test/data/TestQualListAndExpr     |   259 +
 depends/univplan/test/data/TestSort                |    99 +
 depends/univplan/test/data/TestStagizeAfter        |   167 +
 depends/univplan/test/data/TestStagizeBefore       |   100 +
 .../univplan/test/data/TestUnivPlanProtoGenerate   |   130 +
 .../univplan/test/parallel/parallel-launcher.py    |   153 +
 depends/univplan/test/unit/CMakeLists.txt          |    15 +
 depends/univplan/test/unit/test-basic-univplan.cc  |   262 +
 .../univplan/test/unit/test-minmax-cotasklist.cc   |   259 +
 .../univplan/test/unit/test-univplan-cwrapper.cc   |    90 +
 depends/univplan/test/unit/test-univplan.h         |    67 +
 depends/univplan/test/unit/unit-test-main.cc       |    32 +
 dist/hawq/LICENSE                                  |     4 +-
 dist/hawq/NOTICE                                   |     2 +-
 doc/src/sgml/ref/alter_database.sgml               |    50 -
 doc/src/sgml/ref/alter_role.sgml                   |     2 +-
 doc/src/sgml/ref/alter_schema.sgml                 |    15 -
 doc/src/sgml/ref/alter_sequence.sgml               |    16 -
 doc/src/sgml/ref/alter_type.sgml                   |    27 +-
 doc/src/sgml/ref/create_external_table.sgml        |    12 +-
 doc/src/sgml/ref/create_role.sgml                  |     2 +-
 doc/src/sgml/ref/create_table.sgml                 |   103 +-
 doc/src/sgml/ref/fetch.sgml                        |    53 +-
 doc/src/sgml/ref/grant.sgml                        |    32 +-
 doc/src/sgml/ref/revoke.sgml                       |    18 +-
 getversion                                         |    10 +-
 pom.xml                                            |     3 +-
 pre-push                                           |    47 +
 pxf/Makefile                                       |    25 +-
 pxf/README.md                                      |     9 -
 pxf/build.gradle                                   |   121 +-
 pxf/gradle.properties                              |     7 +-
 .../java/org/apache/hawq/pxf/api/OneField.java     |     5 -
 .../main/java/org/apache/hawq/pxf/api/OneRow.java  |     9 -
 .../apache/hawq/pxf/api/utilities/InputData.java   |    69 +-
 .../hawq/pxf/api/utilities/ProfilesConf.java       |     2 +-
 .../apache/hawq/pxf/api/utilities/Utilities.java   |    99 -
 .../pxf/api/utilities/ColumnDescriptorTest.java    |     3 +-
 .../hawq/pxf/api/utilities/ProfilesConfTest.java   |    12 +-
 .../hawq/pxf/api/utilities/UtilitiesTest.java      |   138 -
 .../pxf/plugins/hdfs/HdfsAtomicDataAccessor.java   |     2 +-
 .../plugins/hdfs/HdfsSplittableDataAccessor.java   |     2 +-
 .../hawq/pxf/plugins/hdfs/ParquetResolver.java     |     2 +-
 .../pxf/plugins/hdfs/utilities/HdfsUtilities.java  |    69 +-
 .../plugins/hdfs/utilities/HdfsUtilitiesTest.java  |    21 -
 .../hawq/pxf/plugins/hive/HiveDataFragmenter.java  |    58 +-
 .../plugins/hive/HiveInputFormatFragmenter.java    |     2 +-
 .../pxf/plugins/hive/HiveLineBreakAccessor.java    |     2 +-
 .../hawq/pxf/plugins/hive/HiveMetadataFetcher.java |     2 +-
 .../hawq/pxf/plugins/hive/HiveORCAccessor.java     |    77 +-
 .../pxf/plugins/hive/utilities/HiveUtilities.java  |    27 +-
 .../pxf/plugins/hive/utilities/ProfileFactory.java |    19 +-
 .../pxf/plugins/hive/HiveDataFragmenterTest.java   |   218 -
 .../hawq/pxf/plugins/hive/HiveORCAccessorTest.java |    15 -
 .../plugins/hive/utilities/HiveUtilitiesTest.java  |     3 +-
 pxf/pxf-jdbc/README.md                             |     2 +
 .../apache/hawq/pxf/plugins/jdbc/JdbcAccessor.java |   353 +
 .../pxf/plugins/jdbc/JdbcPartitionFragmenter.java  |    44 +-
 .../apache/hawq/pxf/plugins/jdbc/JdbcPlugin.java   |     4 +-
 .../apache/hawq/pxf/plugins/jdbc/JdbcResolver.java |   367 +
 .../pxf/plugins/jdbc/utils/MicrosoftProduct.java   |    35 +
 .../jdbc/writercallable/BatchWriterCallable.java   |   109 +
 .../jdbc/writercallable/SimpleWriterCallable.java  |   102 +
 .../jdbc/writercallable/WriterCallable.java        |    56 +
 .../jdbc/writercallable/WriterCallableFactory.java |    97 +
 .../plugins/jdbc/JdbcPartitionFragmenterTest.java  |   173 +-
 .../apache/hawq/pxf/plugins/json/JsonAccessor.java |     2 +-
 .../org/apache/hawq/pxf/plugins/json/PxfUnit.java  |    16 +-
 .../parser/PartitionedJsonParserNoSeekTest.java    |    11 +-
 pxf/pxf-service/src/configs/tomcat/bin/setenv.sh   |    15 +-
 .../hawq/pxf/service/BridgeOutputBuilder.java      |    13 -
 .../pxf/service/FragmentsResponseFormatter.java    |     4 +-
 .../org/apache/hawq/pxf/service/ReadBridge.java    |     2 +-
 .../java/org/apache/hawq/pxf/service/UGICache.java |     5 +-
 .../hawq/pxf/service/rest/BridgeResource.java      |    45 +-
 .../hawq/pxf/service/rest/MetadataResource.java    |     2 +-
 .../pxf/service/rest/ServletLifecycleListener.java |    63 +
 .../hawq/pxf/service/rest/WritableResource.java    |    32 +-
 .../pxf/service/servlet/SecurityServletFilter.java |    24 +-
 .../pxf/service/utilities/CustomWebappLoader.java  |     2 +-
 .../hawq/pxf/service/utilities/ProtocolData.java   |    85 +-
 .../hawq/pxf/service/utilities/SecureLogin.java    |    21 +-
 .../src/main/resources/pxf-log4j.properties        |     6 +-
 .../src/main/resources/pxf-private.classpath       |    35 +-
 .../src/main/resources/pxf-privatebigtop.classpath |     6 +-
 .../src/main/resources/pxf-privatehdp.classpath    |     7 +-
 .../src/main/resources/pxf-privateoushu.classpath  |    52 +
 .../src/main/resources/pxf-privatephd.classpath    |     8 +-
 .../src/main/resources/pxf-profiles-default.xml    |    55 +-
 pxf/pxf-service/src/main/webapp/WEB-INF/web.xml    |    10 +-
 pxf/pxf-service/src/scripts/pxf-env.sh             |    37 +-
 pxf/pxf-service/src/scripts/pxf-service            |   351 +-
 .../hawq/pxf/service/BridgeOutputBuilderTest.java  |    15 +-
 .../org/apache/hawq/pxf/service/UGICacheTest.java  |     2 +-
 .../service/servlet/SecurityServletFilterTest.java |     3 +
 .../pxf/service/utilities/ProtocolDataTest.java    |   101 +-
 pxf/settings.gradle                                |     3 +-
 pxf/tomcat/src/scripts/pre-install.sh              |     2 +-
 ranger-plugin/admin-plugin/pom.xml                 |     2 +-
 ranger-plugin/conf/rps.properties                  |     5 +-
 ranger-plugin/integration/admin/pom.xml            |     2 +-
 ranger-plugin/integration/pom.xml                  |     2 +-
 ranger-plugin/integration/service/pom.xml          |     2 +-
 ranger-plugin/pom.xml                              |     2 +-
 ranger-plugin/service/pom.xml                      |     2 +-
 sanity-test.sh                                     |   219 +
 src/Makefile                                       |    41 +-
 src/Makefile.global.in                             |    41 +-
 src/Makefile.mock                                  |     6 +-
 src/backend/Makefile                               |    33 +-
 src/backend/access/Makefile                        |     2 +-
 src/backend/access/appendonly/aosegfiles.c         |    16 +-
 src/backend/access/appendonly/appendonlyam.c       |     9 +-
 src/backend/access/appendonly/appendonlywriter.c   |   253 +-
 src/backend/access/bitmap/bitmapattutil.c          |     3 +-
 src/backend/access/common/printtup.c               |    66 +-
 src/backend/access/common/reloptions.c             |   126 +-
 src/backend/access/common/tupdesc.c                |    57 +-
 src/backend/access/external/Makefile               |     4 +-
 src/backend/access/external/fileam.c               |   434 +-
 src/backend/access/external/plugstorage.c          |   399 +-
 src/backend/access/external/pxffilters.c           |   110 +-
 src/backend/access/external/pxfheaders.c           |     8 +-
 src/backend/access/external/pxfmasterapi.c         |     6 +-
 src/backend/access/external/pxfuriparser.c         |     6 +-
 src/backend/access/external/pxfutils.c             |     2 -
 src/backend/access/external/read_cache.c           |   240 +
 src/backend/access/external/test/pxffilters_test.c |   114 +-
 src/backend/access/external/test/pxfheaders_test.c |    35 -
 .../access/external/test/pxfuriparser_test.c       |     4 +-
 src/backend/access/external/url.c                  |     2 +-
 src/backend/access/external/url_curl.c             |    12 +
 src/backend/access/heap/heapam.c                   |    14 +-
 src/backend/access/index/catquery.c                |     1 -
 src/backend/access/index/gperf.init                |     1 -
 src/backend/access/orc/Makefile                    |    13 +
 src/backend/access/orc/orcam.c                     |   859 +
 src/backend/access/orc/orcsegfiles.c               |   382 +
 src/backend/access/parquet/parquetam.c             |     4 +-
 src/backend/access/parquet/parquetsegfiles.c       |     1 +
 src/backend/access/transam/xact.c                  |   243 +
 src/backend/bootstrap/bootparse.y                  |     3 +-
 src/backend/catalog/.gitignore                     |     2 +-
 src/backend/catalog/Makefile                       |    14 +
 src/backend/catalog/aclchk.c                       |    12 +-
 src/backend/catalog/aoseg.c                        |    24 +-
 src/backend/catalog/catalog.c                      |    14 +-
 src/backend/catalog/cdb_external_extensions.sql    |    46 +
 src/backend/catalog/dependency.c                   |     4 +-
 src/backend/catalog/external/externalmd.c          |     4 +-
 src/backend/catalog/gp_toolkit.sql.in              |    49 +-
 src/backend/catalog/gp_toolkit_test.sql.in         |     1 +
 src/backend/catalog/heap.c                         |   301 +-
 src/backend/catalog/index.c                        |   279 +-
 src/backend/catalog/information_schema.sql         |    27 +
 src/backend/catalog/namespace.c                    |   211 +-
 src/backend/catalog/pg_aggregate.c                 |     5 +-
 src/backend/catalog/pg_attribute_encoding.c        |     2 +-
 src/backend/catalog/pg_compression.c               |     4 +-
 src/backend/catalog/pg_constraint.c                |    73 +
 src/backend/catalog/pg_extprotocol.c               |     5 +-
 src/backend/catalog/pg_exttable.c                  |   251 +-
 src/backend/catalog/pg_namespace.c                 |    98 +
 src/backend/catalog/system_views.sql               |    11 +
 src/backend/catalog/toasting.c                     |     5 +-
 src/backend/cdb/Makefile                           |     9 +-
 src/backend/cdb/cdbcat.c                           |    27 +-
 src/backend/cdb/cdbconn.c                          |   131 +
 src/backend/cdb/cdbcopy.c                          |    56 +-
 src/backend/cdb/cdbdatabaseinfo.c                  |   263 +-
 src/backend/cdb/cdbdatalocality.c                  |  3031 +-
 src/backend/cdb/cdbdirectopen.c                    |    33 +
 src/backend/cdb/cdbdispatchedtablespaceinfo.c      |     3 +-
 src/backend/cdb/cdbdispatchresult.c                |   222 +-
 src/backend/cdb/cdbexplain.c                       |   344 +-
 src/backend/cdb/cdbfilesplit.c                     |    65 +-
 src/backend/cdb/cdbfilesystemcredential.c          |    31 +-
 src/backend/cdb/cdbgang.c                          |     1 +
 src/backend/cdb/cdbgroup.c                         |    20 +-
 src/backend/cdb/cdbhash.c                          |    76 +
 src/backend/cdb/cdbllize.c                         |    11 +-
 src/backend/cdb/cdbmirroredappendonly.c            |    15 +
 src/backend/cdb/cdbmirroredfilesysobj.c            |    49 +-
 src/backend/cdb/cdbmutate.c                        |   174 +-
 src/backend/cdb/cdbpartition.c                     |    82 +-
 src/backend/cdb/cdbpath.c                          |     2 +
 src/backend/cdb/cdbpathlocus.c                     |     8 +-
 src/backend/cdb/cdbpersistentbuild.c               |    20 +-
 src/backend/cdb/cdbpersistentrecovery.c            |    24 +-
 src/backend/cdb/cdbpersistenttablespace.c          |     1 -
 src/backend/cdb/cdbplan.c                          |    34 +-
 src/backend/cdb/cdbquerycontextdispatching.c       |   576 +-
 src/backend/cdb/cdbsharedstorageop.c               |    26 +-
 src/backend/cdb/cdbsrlz.c                          |   103 +-
 src/backend/cdb/cdbtargeteddispatch.c              |    17 +-
 src/backend/cdb/cdbutil.c                          |    13 +
 src/backend/cdb/cdbvars.c                          |    11 +-
 src/backend/cdb/dispatcher.c                       |   178 +-
 src/backend/cdb/dispatcher_mgr.c                   |   306 +
 src/backend/cdb/dispatcher_mgt.c                   |    91 +-
 src/backend/cdb/dispatcher_new.c                   |  1506 +
 src/backend/cdb/executormgr.c                      |   104 +-
 src/backend/cdb/executormgr_new.c                  |   688 +
 src/backend/cdb/motion/Makefile                    |     2 +-
 src/backend/cdb/motion/cdbmotion.c                 |     3 +-
 src/backend/cdb/motion/ic_common.c                 |     6 +-
 src/backend/cdb/motion/ic_new.c                    |    90 +
 src/backend/cdb/motion/ic_udp.c                    |    48 +-
 src/backend/cdb/poolmgr.c                          |    16 +
 src/backend/cdb/scheduler.c                        |   688 +
 src/backend/cdb/workermgr.c                        |    16 +
 src/backend/commands/alter.c                       |    14 +-
 src/backend/commands/analyze.c                     |  1107 +-
 src/backend/commands/cluster.c                     |    19 +-
 src/backend/commands/conversioncmds.c              |     6 +-
 src/backend/commands/copy.c                        |   702 +-
 src/backend/commands/dbcommands.c                  |   106 +-
 src/backend/commands/explain.c                     |   122 +-
 src/backend/commands/extprotocolcmds.c             |     3 +-
 src/backend/commands/filespace.c                   |    19 +-
 src/backend/commands/filesystemcmds.c              |     3 +-
 src/backend/commands/foreigncmds.c                 |    31 +-
 src/backend/commands/functioncmds.c                |    22 +
 src/backend/commands/indexcmds.c                   |   109 +-
 src/backend/commands/portalcmds.c                  |     3 +
 src/backend/commands/prepare.c                     |     3 +
 src/backend/commands/schemacmds.c                  |     7 +
 src/backend/commands/sequence.c                    |    22 +-
 src/backend/commands/tablecmds.c                   |  2298 +-
 src/backend/commands/tablespace.c                  |    39 +-
 src/backend/commands/trigger.c                     |    15 +-
 src/backend/commands/typecmds.c                    |    21 +-
 src/backend/commands/user.c                        |   670 +-
 src/backend/commands/vacuum.c                      |     9 +-
 src/backend/commands/vacuumlazy.c                  |    25 +-
 src/backend/commands/variable.c                    |    14 +-
 src/backend/commands/view.c                        |    21 +-
 src/backend/executor/Makefile                      |     3 +-
 src/backend/executor/execAmi.c                     |     6 +-
 src/backend/executor/execDML.c                     |   579 +-
 src/backend/executor/execHHashagg.c                |   145 +-
 src/backend/executor/execMain.c                    |   900 +-
 src/backend/executor/execProcnode.c                |    63 +-
 src/backend/executor/execQual.c                    |    22 +-
 src/backend/executor/execScan.c                    |    21 +-
 src/backend/executor/execTuples.c                  |    10 +-
 src/backend/executor/execUtils.c                   |   170 +-
 src/backend/executor/functions.c                   |    36 +-
 src/backend/executor/newExecutor.c                 |   475 +
 src/backend/executor/nodeAgg.c                     |    47 +-
 src/backend/executor/nodeDML.c                     |     6 +-
 src/backend/executor/nodeDynamicTableScan.c        |     2 +-
 src/backend/executor/nodeExternalscan.c            |    91 +-
 src/backend/executor/nodeMotion.c                  |    16 +-
 src/backend/executor/nodeResult.c                  |     8 +-
 src/backend/executor/nodeRowTrigger.c              |     3 +-
 src/backend/executor/nodeSubplan.c                 |   171 +-
 src/backend/executor/spi.c                         |   137 +-
 src/backend/gp_libpq_fe/fe-connect.c               |    35 +-
 src/backend/gp_libpq_fe/fe-exec.c                  |   230 +
 src/backend/gp_libpq_fe/fe-protocol3.c             |    27 +-
 src/backend/gp_libpq_fe/gp-libpq-fe.h              |    46 +
 src/backend/gp_libpq_fe/gp-libpq-int.h             |     4 +
 src/backend/gpopt/gpdbwrappers.cpp                 |     2 +-
 .../gpopt/translate/CTranslatorDXLToPlStmt.cpp     |    12 +-
 .../gpopt/translate/CTranslatorRelcacheToDXL.cpp   |     1 +
 src/backend/libpq/Makefile                         |     2 +-
 src/backend/libpq/auth.c                           |    79 +-
 src/backend/libpq/cloudrest.c                      |   452 +
 src/backend/libpq/hba.c                            |    15 +
 src/backend/libpq/pqcomm.c                         |    16 +-
 src/backend/nodes/copyfuncs.c                      |   129 +-
 src/backend/nodes/equalfuncs.c                     |    42 +-
 src/backend/nodes/outfast.c                        |   116 +-
 src/backend/nodes/outfuncs.c                       |   107 +-
 src/backend/nodes/print.c                          |     4 +
 src/backend/nodes/readfast.c                       |   139 +-
 src/backend/nodes/readfuncs.c                      |    49 +-
 src/backend/optimizer/path/allpaths.c              |   354 +-
 src/backend/optimizer/path/indxpath.c              |   145 +-
 src/backend/optimizer/plan/Makefile                |     5 +-
 src/backend/optimizer/plan/createplan.c            |   367 +-
 src/backend/optimizer/plan/initsplan.c             |    83 +-
 src/backend/optimizer/plan/newPlanner.c            |  2028 ++
 src/backend/optimizer/plan/planmain.c              |     4 +
 src/backend/optimizer/plan/planner.c               |   485 +-
 src/backend/optimizer/plan/planpartition.c         |     2 +
 src/backend/optimizer/plan/planshare.c             |    56 +-
 src/backend/optimizer/plan/planwindow.c            |     9 +-
 src/backend/optimizer/plan/setrefs.c               |    46 +-
 src/backend/optimizer/plan/subselect.c             |    10 +-
 src/backend/optimizer/prep/preptlist.c             |    19 +-
 src/backend/optimizer/prep/prepunion.c             |    27 +-
 src/backend/optimizer/util/clauses.c               |    70 +-
 src/backend/optimizer/util/pathnode.c              |    16 +-
 src/backend/optimizer/util/plancat.c               |    78 +
 src/backend/optimizer/util/relnode.c               |     2 +-
 src/backend/optimizer/util/var.c                   |    56 +
 src/backend/optimizer/util/walkers.c               |     8 +
 src/backend/parser/analyze.c                       | 21520 ++++++-------
 src/backend/parser/gram.y                          |   527 +-
 src/backend/parser/parse_clause.c                  |    40 +-
 src/backend/parser/parse_coerce.c                  |    13 +-
 src/backend/parser/parse_expr.c                    |    25 +-
 src/backend/parser/parse_func.c                    |    55 +-
 src/backend/parser/parse_relation.c                |    29 +-
 src/backend/parser/parse_utilcmd.c                 |     5 +
 src/backend/postmaster/Makefile                    |     2 +-
 src/backend/postmaster/identity.c                  |    40 +-
 .../postmaster/pg_stat_activity_history_process.c  |   945 +
 src/backend/postmaster/postmaster.c                |    81 +
 src/backend/postmaster/service.c                   |    11 +
 src/backend/postmaster/syslogger.c                 |     4 +-
 src/backend/resourcemanager/requesthandler.c       |    45 +-
 src/backend/resourcemanager/requesthandler_RMSEG.c |     8 +-
 src/backend/resourcemanager/resqueuemanager.c      |     8 +-
 src/backend/rewrite/rewriteDefine.c                |     4 +-
 src/backend/storage/buffer/bufmgr.c                |     8 +-
 src/backend/storage/file/fd.c                      |    55 +-
 src/backend/storage/ipc/ipci.c                     |     5 +
 src/backend/storage/lmgr/lock.c                    |     1 -
 src/backend/storage/lmgr/proc.c                    |     2 +
 src/backend/storage/lmgr/spin.c                    |     3 +-
 src/backend/storage/page/itemptr.c                 |    29 +
 src/backend/tcop/dest.c                            |    13 +-
 src/backend/tcop/postgres.c                        |   449 +-
 src/backend/tcop/pquery.c                          |    16 +
 src/backend/tcop/utility.c                         |   216 +-
 src/backend/utils/.gitignore                       |     1 +
 src/backend/utils/Gen_hawq_funcoid_mapping.sh      |   731 +
 src/backend/utils/Makefile                         |    10 +-
 src/backend/utils/adt/Makefile                     |     3 +-
 src/backend/utils/adt/array_distance_install.sql   |    15 +
 src/backend/utils/adt/array_distance_uninstall.sql |    15 +
 src/backend/utils/adt/array_userfuncs.c            |   274 +
 src/backend/utils/adt/arrayfuncs.c                 |    42 +
 src/backend/utils/adt/dbsize.c                     |    94 +-
 src/backend/utils/adt/int.c                        |   191 +-
 src/backend/utils/adt/int8.c                       |   148 +-
 src/backend/utils/adt/json.c                       |  2525 ++
 src/backend/utils/adt/jsonb.c                      |  1968 ++
 src/backend/utils/adt/jsonb_gin.c                  |   624 +
 src/backend/utils/adt/jsonb_op.c                   |   292 +
 src/backend/utils/adt/jsonb_util.c                 |  1802 ++
 src/backend/utils/adt/jsonfuncs.c                  |  3958 +++
 src/backend/utils/adt/numeric.c                    |    38 +
 src/backend/utils/adt/pxf_functions.c              |     4 +
 src/backend/utils/adt/regproc.c                    |     6 +-
 src/backend/utils/adt/ruleutils.c                  |    57 +-
 src/backend/utils/adt/selfuncs.c                   |   266 +-
 src/backend/utils/cache/lsyscache.c                |    53 +-
 src/backend/utils/cache/relcache.c                 |     3 +-
 src/backend/utils/cache/typcache.c                 |   380 +-
 src/backend/utils/error/elog.c                     |    10 +
 src/backend/utils/fmgr/fmgr.c                      |    62 +-
 src/backend/utils/gp/segadmin.c                    |   126 +
 src/backend/utils/hawq_type_mapping.c              |   216 +
 src/backend/utils/init/globals.c                   |    14 +
 src/backend/utils/init/miscinit.c                  |     8 +-
 src/backend/utils/mb/mbutils.c                     |    59 +
 src/backend/utils/misc/atomic.c                    |    12 +-
 src/backend/utils/misc/etc/gpcheck.cnf             |    18 +-
 src/backend/utils/misc/etc/hawq-site.xml           |    17 +
 src/backend/utils/misc/etc/hdfs-client.xml         |     9 -
 src/backend/utils/misc/etc/template-hawq-site.xml  |    11 +
 src/backend/utils/misc/guc.c                       |   581 +-
 src/backend/utils/misc/uriparser.c                 |    98 +-
 src/backend/utils/mmgr/memprot.c                   |     2 +-
 src/backend/utils/mmgr/portalmem.c                 |    23 +-
 src/bin/Makefile                                   |     2 +-
 src/bin/gpcheckhdfs/Makefile                       |     5 +-
 src/bin/gpcheckhdfs/gpcheckhdfs.c                  |    12 +-
 src/bin/gpfdist/Makefile                           |     9 +-
 src/bin/gpfilesystem/hdfs/Makefile                 |     2 +-
 src/bin/gpfusion/gpbridgeapi.c                     |     5 +-
 src/bin/pg_ctl/pg_ctl.c                            |     6 +-
 src/bin/pg_dump/dumputils.c                        |    44 +
 src/bin/pg_dump/pg_backup_archiver.c               |     3 -
 src/bin/pg_dump/pg_dump.c                          |   352 +-
 src/bin/pg_dump/pg_dumpall.c                       |     6 +-
 src/bin/psql/describe.c                            |   218 +-
 src/bin/psql/tab-complete.c                        |   339 +-
 src/include/Makefile                               |     3 +-
 src/include/access/appendonlywriter.h              |     4 +-
 src/include/access/extprotocol.h                   |    60 +-
 src/include/access/fileam.h                        |    42 +-
 src/include/access/filesplit.h                     |    10 +
 src/include/access/formatter.h                     |    21 +
 src/include/access/gin.h                           |    23 +
 src/include/access/heapam.h                        |     2 +-
 src/include/access/orcam.h                         |   108 +
 src/include/access/orcsegfiles.h                   |    62 +
 src/include/access/persistentfilesysobjname.h      |     1 +
 src/include/access/plugstorage.h                   |   132 +-
 src/include/access/plugstorage_utils.h             |    46 +-
 src/include/access/pxffilters.h                    |     2 +-
 src/include/access/read_cache.h                    |    29 +
 src/include/access/relscan.h                       |    18 +-
 src/include/access/tupdesc.h                       |     2 +
 src/include/access/xact.h                          |    53 +
 src/include/catalog/calico.pl                      |     3 +-
 src/include/catalog/caqltrack.pl                   |     2 +-
 src/include/catalog/caqluniqdef.pl                 |     2 +-
 src/include/catalog/catullus.pl                    |     2 +-
 src/include/catalog/heap.h                         |    10 +-
 src/include/catalog/index.h                        |    15 +
 src/include/catalog/namespace.h                    |     3 +-
 src/include/catalog/pablopcatso.pl                 |     2 +-
 src/include/catalog/pg_aggregate.h                 |     6 +
 src/include/catalog/pg_amop.h                      |    27 +
 src/include/catalog/pg_amproc.h                    |    19 +-
 src/include/catalog/pg_authid.h                    |    20 +-
 src/include/catalog/pg_cast.h                      |     3 +
 src/include/catalog/pg_class.h                     |     9 +-
 src/include/catalog/pg_constraint.h                |     4 +
 src/include/catalog/pg_database.h                  |     2 -
 src/include/catalog/pg_exttable.h                  |    60 +-
 src/include/catalog/pg_namespace.h                 |    46 +-
 src/include/catalog/pg_opclass.h                   |     5 +-
 src/include/catalog/pg_operator.h                  |    57 +-
 src/include/catalog/pg_proc.h                      |   219 +-
 src/include/catalog/pg_proc.sql                    |    10 +
 src/include/catalog/pg_type.h                      |    11 +-
 src/include/catalog/sleazy.pl                      |     2 +-
 src/include/catalog/tidycat.pl                     |     2 +-
 src/include/cdb/cdbconn.h                          |     9 +
 src/include/cdb/cdbcopy.h                          |     3 +-
 src/include/cdb/cdbdatabaseinfo.h                  |    33 +
 src/include/cdb/cdbdatalocality.h                  |    79 +-
 src/include/cdb/cdbdirectopen.h                    |     4 +
 src/include/cdb/cdbdisp.h                          |     4 +
 src/include/cdb/cdbdispatchresult.h                |    16 +-
 src/include/cdb/cdbexplain.h                       |     8 +
 src/include/cdb/cdbfilesystemcredential.h          |    17 +
 src/include/cdb/cdbgang.h                          |     1 +
 src/include/cdb/cdbhash.h                          |    10 +
 src/include/cdb/cdbmirroredfilesysobj.h            |     3 +
 src/include/cdb/cdbmotion.h                        |     1 -
 src/include/cdb/cdbparquetstoragewrite.h           |    40 +-
 src/include/cdb/cdbpartition.h                     |     2 +-
 src/include/cdb/cdbquerycontextdispatching.h       |    67 +-
 src/include/cdb/cdbutil.h                          |     1 +
 src/include/cdb/cdbvars.h                          |     9 +-
 src/include/cdb/dispatcher.h                       |    10 +-
 src/include/cdb/dispatcher_mgr.h                   |    32 +
 src/include/cdb/dispatcher_new.h                   |    74 +
 src/include/cdb/executormgr.h                      |    14 +-
 src/include/cdb/executormgr_new.h                  |    94 +
 src/include/cdb/ml_ipc.h                           |     5 +
 src/include/cdb/poolmgr.h                          |    36 +-
 src/include/cdb/scheduler.h                        |    96 +
 src/include/cdb/workermgr.h                        |     2 +
 src/include/commands/copy.h                        |     4 +-
 src/include/commands/defrem.h                      |     4 +
 src/include/commands/explain.h                     |     2 +-
 src/include/commands/tablecmds.h                   |    49 +-
 src/include/commands/user.h                        |     3 +
 src/include/commands/vacuum.h                      |     1 +
 src/include/cwrapper/cached-result.h               |    43 +
 src/include/cwrapper/executor-c.h                  |    53 +
 src/include/cwrapper/func-kind.cg.h                |   941 +
 src/include/cwrapper/hdfs-file-system-c.h          |   117 +
 src/include/cwrapper/hive-file-system-c.h          |    28 +
 src/include/cwrapper/instrument.h                  |    54 +
 src/include/cwrapper/magma-client-c.h              |   141 +
 src/include/cwrapper/magma-format-c.h              |   121 +
 src/include/cwrapper/orc-format-c.h                |   125 +
 src/include/cwrapper/scheduler-c.h                 |    42 +
 src/include/cwrapper/text-format-c.h               |    66 +
 src/include/cwrapper/type-kind.h                   |   108 +
 src/include/cwrapper/univplan-c.h                  |   397 +
 src/include/executor/execDML.h                     |    51 +
 src/include/executor/execHHashagg.h                |    27 +-
 src/include/executor/execdesc.h                    |    14 +
 src/include/executor/executor.h                    |    50 +-
 src/include/executor/nodeAgg.h                     |     9 +-
 src/include/executor/nodeMotion.h                  |     2 -
 src/include/executor/tuptable.h                    |     4 +-
 src/include/fmgr.h                                 |     5 +
 src/include/funcapi.h                              |     8 +
 src/include/libpq/hba.h                            |     4 +-
 src/include/mb/pg_wchar.h                          |     1 +
 src/include/miscadmin.h                            |    13 +
 src/include/nodes/execnodes.h                      |    31 +-
 src/include/nodes/nodes.h                          |    38 +-
 src/include/nodes/parsenodes.h                     |  3154 +-
 src/include/nodes/plannerconfig.h                  |     4 +
 src/include/nodes/plannodes.h                      |    50 +-
 src/include/nodes/relation.h                       |    25 +-
 src/include/optimizer/clauses.h                    |     1 +
 src/include/optimizer/cost.h                       |    13 +-
 src/include/optimizer/newPlanner.h                 |    92 +
 src/include/optimizer/paths.h                      |     4 +
 src/include/optimizer/planmain.h                   |     2 +
 src/include/optimizer/planshare.h                  |    10 +-
 src/include/optimizer/var.h                        |     2 +-
 src/include/parser/analyze.h                       |   102 +-
 src/include/parser/kwlist.h                        |     2 +-
 src/include/parser/parse_func.h                    |     6 +-
 src/include/pg_config.h.in                         |    23 +-
 src/include/pg_stat_activity_history_process.h     |    46 +
 src/include/postmaster/identity.h                  |     8 +-
 src/include/storage/fd.h                           |     4 +
 src/include/storage/itemptr.h                      |     4 +
 src/include/storage/lwlock.h                       |     2 +-
 src/include/storage/s_lock.h                       |     4 +
 src/include/tcop/dest.h                            |     5 +
 src/include/utils/.gitignore                       |     1 +
 src/include/utils/acl.h                            |     1 +
 src/include/utils/array.h                          |     6 +
 src/include/utils/builtins.h                       |     5 +
 src/include/utils/cloudrest.h                      |    88 +
 src/include/utils/guc.h                            |    41 +-
 src/include/utils/guc_tables.h                     |     2 +
 src/include/utils/hawq_type_mapping.h              |    91 +
 src/include/utils/json.h                           |    86 +
 src/include/utils/jsonapi.h                        |   134 +
 src/include/utils/jsonb.h                          |   438 +
 src/include/utils/lsyscache.h                      |     6 +
 src/include/utils/memaccounting.h                  |     3 +
 src/include/utils/memutils.h                       |     3 +-
 src/include/utils/numeric.h                        |     1 +
 src/include/utils/rel.h                            |     8 +-
 src/include/utils/typcache.h                       |     2 +
 src/include/utils/uri.h                            |    14 +-
 src/pl/Makefile                                    |    21 +-
 src/pl/plperl/GNUmakefile                          |     5 +
 src/pl/plpgsql/src/pl_comp.c                       |     3 +-
 src/pl/plpgsql/src/pl_exec.c                       |    63 +-
 src/pl/plpython/plpython.c                         |     6 +
 src/pl/plr.spec                                    |     4 +-
 src/pl/vplr.spec                                   |     3 +-
 src/port/rand.c                                    |     2 +-
 src/test/feature/.gitignore                        |    10 +-
 .../ans/exttable_extorc_datatype_bool.ans.source   |    91 +
 .../ans/exttable_extorc_datatype_char.ans.source   |    50 +
 .../exttable_extorc_datatype_datetime.ans.source   |   188 +
 .../exttable_extorc_datatype_decimal.ans.source    |   270 +
 .../ans/exttable_extorc_datatype_double.ans.source |    42 +
 .../exttable_extorc_datatype_integer.ans.source    |    48 +
 .../ans/exttable_extorc_datatype_money.ans.source  |    27 +
 .../exttable_extorc_datatype_varchar.ans.source    |    42 +
 .../ans/exttable_extorc_encoding.ans.source        |    37 +
 .../ans/exttable_extorc_negativepath.ans.source    |   225 +
 .../ans/exttable_extorc_normalpath.ans             |   103 +
 .../ans/exttable_extorc_normalpath.ans.source      |   103 +
 .../ans/exttable_extorc_same_name_test.ans.source  |    69 +
 .../ans/exttable_extorc_testtruncate.ans.source    |    22 +
 .../sql/exttable_extorc_datatype_bool.sql.source   |    27 +
 .../sql/exttable_extorc_datatype_char.sql.source   |    27 +
 .../exttable_extorc_datatype_datetime.sql.source   |    98 +
 .../exttable_extorc_datatype_decimal.sql.source    |    92 +
 .../sql/exttable_extorc_datatype_double.sql.source |    17 +
 .../exttable_extorc_datatype_integer.sql.source    |    17 +
 .../sql/exttable_extorc_datatype_money.sql.source  |    14 +
 .../exttable_extorc_datatype_varchar.sql.source    |    18 +
 .../sql/exttable_extorc_encoding.sql.source        |    17 +
 .../sql/exttable_extorc_negativepath.sql.source    |    89 +
 .../sql/exttable_extorc_normalpath.sql             |    49 +
 .../sql/exttable_extorc_normalpath.sql.source      |    49 +
 .../sql/exttable_extorc_same_name_test.sql.source  |    33 +
 .../sql/exttable_extorc_testtruncate.sql.source    |     5 +
 src/test/feature/ExternalSource/sql/init_file      |    27 +
 .../feature/ExternalSource/test_extfmt_orc.cpp     |  1272 +
 src/test/feature/UDF/TestUDF.cpp                   |     9 +
 src/test/feature/UDF/ans/test_udf_debug.ans        |   125 +
 src/test/feature/UDF/sql/test_udf_debug.sql        |    72 +
 src/test/feature/catalog/ans/char.ans              |    16 +-
 src/test/feature/catalog/ans/char_linux.ans        |   139 +
 src/test/feature/catalog/ans/varchar.ans           |    16 +-
 src/test/feature/catalog/ans/varchar_linux.ans     |   128 +
 src/test/feature/catalog/test_create_table.cpp     |     4 +
 src/test/feature/catalog/test_type.cpp             |    34 +-
 src/test/feature/cloudtest/sql/normal/all.sql      |   101 +
 .../feature/cloudtest/sql/normal/before_normal.sql |    41 +
 .../cloudtest/sql/parallel/before_parallel.sql     |    99 +
 .../feature/cloudtest/sql/parallel/parallel.sql    |    73 +
 src/test/feature/cloudtest/test_cloud.cpp          |   271 +
 src/test/feature/cloudtest/test_cloud.h            |   143 +
 src/test/feature/cloudtest/test_cloud.xml          |    40 +
 src/test/feature/lib/compent_config.cpp            |   331 +
 src/test/feature/lib/compent_config.h              |   139 +
 src/test/feature/lib/hdfs_config.cpp               |   390 +-
 src/test/feature/lib/hdfs_config.h                 |    28 +-
 src/test/feature/lib/parse_out.cpp                 |   167 +
 src/test/feature/lib/parse_out.h                   |    86 +
 src/test/feature/lib/psql.cpp                      |   203 +-
 src/test/feature/lib/psql.h                        |    45 +-
 src/test/feature/lib/sql_util.cpp                  |   685 +-
 src/test/feature/lib/sql_util.h                    |   303 +-
 src/test/feature/lib/sql_util_parallel.cpp         |   705 +
 src/test/feature/lib/sql_util_parallel.h           |   134 +
 src/test/feature/lib/sqlfile-parsebase.cpp         |    89 +
 src/test/feature/lib/sqlfile-parsebase.h           |    61 +
 src/test/feature/lock/TestLock.cpp                 |    60 -
 src/test/feature/lock/ans/lock.ans                 |   129 -
 src/test/feature/lock/sql/lock.sql                 |    71 -
 src/test/feature/planner/ans/subplan.ans           |    15 +
 src/test/feature/planner/sql/subplan.sql           |     7 +
 src/test/regress/GNUmakefile                       |     2 +-
 src/test/regress/atmsort.pl                        |     4 +-
 src/test/regress/checkinc.py                       |     8 +-
 src/test/regress/data/tenk.data                    |     2 +-
 .../regress/data/upgrade41/catalog40/toolkit.sql   |     4 +-
 src/test/regress/dld.pl                            |     2 +-
 src/test/regress/explain.pl                        |     8 +-
 src/test/regress/get_ereport.pl                    |     2 +-
 src/test/regress/gpdiff.pl                         |     2 +-
 src/test/regress/gpexclude.pl                      |     2 +-
 src/test/regress/gpsourcify.pl                     |     2 +-
 src/test/regress/gpstringsubs.pl                   |     2 +-
 src/test/regress/gptorment.pl                      |     2 +-
 src/test/regress/output/hcatalog_lookup.source     |    14 +-
 src/test/regress/upg2_wizard.pl                    |     2 +-
 src/timezone/Makefile                              |     2 +-
 src/timezone/strftime.c                            |    40 +-
 tools/Makefile                                     |     6 +
 tools/bin/Makefile                                 |     6 +-
 tools/bin/autoswitch.sh                            |    73 +
 tools/bin/generate-greenplum-path.sh               |    23 +-
 tools/bin/generate_load_tpch.pl                    |    34 +-
 tools/bin/gpcheck                                  |   828 +-
 tools/bin/gpload.py                                |     4 +-
 tools/bin/gppylib/commands/base.py                 |     9 +-
 tools/bin/gppylib/data/{2.4.json => 3.0.json}      |     0
 tools/bin/gppylib/data/{2.4.json => 3.1.json}      |     0
 tools/bin/gppylib/data/{2.4.json => 3.2.json}      |     0
 tools/bin/gppylib/data/4.0.json                    | 10458 +++++++
 tools/bin/gppylib/gpsqlUtil.py                     |    61 +
 tools/bin/gpscp                                    |     4 +-
 tools/bin/gpsd                                     |     2 +-
 tools/bin/hawq                                     |    28 +-
 tools/bin/hawq_ctl                                 |   236 +-
 tools/bin/hawqconfig                               |    21 +-
 tools/bin/hawqpylib/hawqlib.py                     |   180 +-
 tools/bin/hawqregister                             |     6 +-
 tools/bin/lib/hawqinit.sh                          |    40 +-
 tools/bin/magma                                    |   415 +
 tools/bin/pre_setup.sh                             |    52 +
 tools/bin/upgrade.sh                               |   213 +
 tools/tpcds/.gitignore                             |    16 +
 tools/tpcds/makefile                               |    39 +
 tools/tpcds/parallel_dsdgen.cpp                    |   193 +
 tools/tpcds/tools/Cygwin Tools.rules               |    30 +
 tools/tpcds/tools/HISTORY                          |    36 +
 tools/tpcds/tools/How_To_Guide-DS-V2.0.0.docx      |   Bin 0 -> 29054 bytes
 tools/tpcds/tools/How_To_Guide.doc                 |   Bin 0 -> 68608 bytes
 tools/tpcds/tools/Makefile.suite                   |   692 +
 tools/tpcds/tools/PORTING.NOTES                    |   201 +
 tools/tpcds/tools/QGEN.doc                         |   Bin 0 -> 151552 bytes
 tools/tpcds/tools/QgenMain.c                       |   375 +
 tools/tpcds/tools/README                           |    73 +
 tools/tpcds/tools/README_grammar.txt               |    63 +
 tools/tpcds/tools/ReleaseNotes.txt                 |    49 +
 tools/tpcds/tools/StringBuffer.c                   |   189 +
 tools/tpcds/tools/StringBuffer.h                   |    58 +
 tools/tpcds/tools/address.c                        |   350 +
 tools/tpcds/tools/address.h                        |    79 +
 tools/tpcds/tools/build_support.c                  |   448 +
 tools/tpcds/tools/build_support.h                  |    59 +
 tools/tpcds/tools/calendar.dst                     |   441 +
 tools/tpcds/tools/checksum.c                       |    98 +
 tools/tpcds/tools/checksum.vcproj                  |   170 +
 tools/tpcds/tools/cities.dst                       |  1057 +
 tools/tpcds/tools/column_list.txt                  |  1024 +
 tools/tpcds/tools/config.h                         |   177 +
 tools/tpcds/tools/constants.h                      |   325 +
 tools/tpcds/tools/date.c                           |   647 +
 tools/tpcds/tools/date.h                           |    76 +
 tools/tpcds/tools/dbgen2.sln                       |    70 +
 tools/tpcds/tools/dbgen2.vcproj                    |  3358 ++
 tools/tpcds/tools/dbgen_version.c                  |   156 +
 tools/tpcds/tools/dbgen_version.h                  |    52 +
 tools/tpcds/tools/dcgram.c                         |   657 +
 tools/tpcds/tools/dcgram.h                         |    42 +
 tools/tpcds/tools/dcomp.c                          |   327 +
 tools/tpcds/tools/dcomp.h                          |    87 +
 tools/tpcds/tools/dcomp_params.h                   |    61 +
 tools/tpcds/tools/decimal.c                        |   398 +
 tools/tpcds/tools/decimal.h                        |    70 +
 tools/tpcds/tools/dist.c                           |   973 +
 tools/tpcds/tools/dist.h                           |   105 +
 tools/tpcds/tools/distcomp.vcproj                  |   573 +
 tools/tpcds/tools/driver.c                         |   574 +
 tools/tpcds/tools/driver.h                         |    70 +
 tools/tpcds/tools/english.dst                      |  4790 +++
 tools/tpcds/tools/error_msg.c                      |   241 +
 tools/tpcds/tools/error_msg.h                      |   102 +
 tools/tpcds/tools/eval.c                           |   886 +
 tools/tpcds/tools/eval.h                           |    38 +
 tools/tpcds/tools/expr.c                           |   542 +
 tools/tpcds/tools/expr.h                           |   131 +
 tools/tpcds/tools/fips.dst                         |  3187 ++
 tools/tpcds/tools/genrand.c                        |   728 +
 tools/tpcds/tools/genrand.h                        |    82 +
 tools/tpcds/tools/grammar.c                        |   383 +
 tools/tpcds/tools/grammar.h                        |    54 +
 tools/tpcds/tools/grammar.vcproj                   |   321 +
 tools/tpcds/tools/grammar_support.c                |   217 +
 tools/tpcds/tools/grammar_support.h                |    63 +
 tools/tpcds/tools/items.dst                        |   516 +
 tools/tpcds/tools/join.c                           |   462 +
 tools/tpcds/tools/keywords.c                       |   217 +
 tools/tpcds/tools/keywords.h                       |    52 +
 tools/tpcds/tools/list.c                           |   329 +
 tools/tpcds/tools/list.h                           |    74 +
 tools/tpcds/tools/load.c                           |   116 +
 tools/tpcds/tools/load.h                           |    42 +
 tools/tpcds/tools/makefile                         |   700 +
 tools/tpcds/tools/mathops.h                        |    54 +
 tools/tpcds/tools/misc.c                           |   124 +
 tools/tpcds/tools/misc.h                           |    48 +
 tools/tpcds/tools/mkheader.c                       |   177 +
 tools/tpcds/tools/mkheader.vcproj                  |   243 +
 tools/tpcds/tools/names.dst                        | 10220 ++++++
 tools/tpcds/tools/nulls.c                          |   112 +
 tools/tpcds/tools/nulls.h                          |    38 +
 tools/tpcds/tools/parallel.c                       |   229 +
 tools/tpcds/tools/parallel.h                       |    41 +
 tools/tpcds/tools/parallel.sh                      |   101 +
 tools/tpcds/tools/params.h                         |    78 +
 tools/tpcds/tools/permute.c                        |   132 +
 tools/tpcds/tools/permute.h                        |    39 +
 tools/tpcds/tools/porting.c                        |    57 +
 tools/tpcds/tools/porting.h                        |   153 +
 tools/tpcds/tools/pricing.c                        |   278 +
 tools/tpcds/tools/pricing.h                        |    77 +
 tools/tpcds/tools/print.c                          |   690 +
 tools/tpcds/tools/print.h                          |    58 +
 tools/tpcds/tools/qgen.y                           |   572 +
 tools/tpcds/tools/qgen2.vcproj                     |  1092 +
 tools/tpcds/tools/qgen_params.h                    |    83 +
 tools/tpcds/tools/query_handler.c                  |   315 +
 tools/tpcds/tools/query_handler.h                  |    39 +
 tools/tpcds/tools/r_params.c                       |   953 +
 tools/tpcds/tools/r_params.h                       |    83 +
 tools/tpcds/tools/release.c                        |    68 +
 tools/tpcds/tools/release.h                        |    51 +
 tools/tpcds/tools/s_brand.c                        |   151 +
 tools/tpcds/tools/s_brand.h                        |    53 +
 tools/tpcds/tools/s_call_center.c                  |   151 +
 tools/tpcds/tools/s_call_center.h                  |    42 +
 tools/tpcds/tools/s_catalog.c                      |   156 +
 tools/tpcds/tools/s_catalog.h                      |    61 +
 tools/tpcds/tools/s_catalog_order.c                |   231 +
 tools/tpcds/tools/s_catalog_order.h                |    57 +
 tools/tpcds/tools/s_catalog_order_lineitem.c       |   197 +
 tools/tpcds/tools/s_catalog_order_lineitem.h       |    60 +
 tools/tpcds/tools/s_catalog_page.c                 |   131 +
 tools/tpcds/tools/s_catalog_page.h                 |    42 +
 tools/tpcds/tools/s_catalog_promotional_item.c     |   146 +
 tools/tpcds/tools/s_catalog_promotional_item.h     |    55 +
 tools/tpcds/tools/s_catalog_returns.c              |   183 +
 tools/tpcds/tools/s_catalog_returns.h              |    61 +
 tools/tpcds/tools/s_category.c                     |   146 +
 tools/tpcds/tools/s_category.h                     |    55 +
 tools/tpcds/tools/s_class.c                        |   149 +
 tools/tpcds/tools/s_class.h                        |    54 +
 tools/tpcds/tools/s_company.c                      |   145 +
 tools/tpcds/tools/s_company.h                      |    52 +
 tools/tpcds/tools/s_customer.c                     |   280 +
 tools/tpcds/tools/s_customer.h                     |    77 +
 tools/tpcds/tools/s_customer_address.c             |   129 +
 tools/tpcds/tools/s_customer_address.h             |    43 +
 tools/tpcds/tools/s_division.c                     |   147 +
 tools/tpcds/tools/s_division.h                     |    53 +
 tools/tpcds/tools/s_inventory.c                    |   167 +
 tools/tpcds/tools/s_inventory.h                    |    56 +
 tools/tpcds/tools/s_item.c                         |   178 +
 tools/tpcds/tools/s_item.h                         |    43 +
 tools/tpcds/tools/s_manager.c                      |   149 +
 tools/tpcds/tools/s_manager.h                      |    49 +
 tools/tpcds/tools/s_manufacturer.c                 |   145 +
 tools/tpcds/tools/s_manufacturer.h                 |    49 +
 tools/tpcds/tools/s_market.c                       |   150 +
 tools/tpcds/tools/s_market.h                       |    52 +
 tools/tpcds/tools/s_pline.c                        |   186 +
 tools/tpcds/tools/s_pline.h                        |    59 +
 tools/tpcds/tools/s_product.c                      |   150 +
 tools/tpcds/tools/s_product.h                      |    51 +
 tools/tpcds/tools/s_promotion.c                    |   191 +
 tools/tpcds/tools/s_promotion.h                    |    43 +
 tools/tpcds/tools/s_purchase.c                     |   225 +
 tools/tpcds/tools/s_purchase.h                     |    56 +
 tools/tpcds/tools/s_reason.c                       |   146 +
 tools/tpcds/tools/s_reason.h                       |    49 +
 tools/tpcds/tools/s_store.c                        |   155 +
 tools/tpcds/tools/s_store.h                        |    41 +
 tools/tpcds/tools/s_store_promotional_item.c       |   147 +
 tools/tpcds/tools/s_store_promotional_item.h       |    48 +
 tools/tpcds/tools/s_store_returns.c                |   176 +
 tools/tpcds/tools/s_store_returns.h                |    51 +
 tools/tpcds/tools/s_subcategory.c                  |   150 +
 tools/tpcds/tools/s_subcategory.h                  |    52 +
 tools/tpcds/tools/s_subclass.c                     |   150 +
 tools/tpcds/tools/s_subclass.h                     |    52 +
 tools/tpcds/tools/s_tdefs.h                        |    75 +
 tools/tpcds/tools/s_warehouse.c                    |   145 +
 tools/tpcds/tools/s_warehouse.h                    |    42 +
 tools/tpcds/tools/s_web_order.c                    |   237 +
 tools/tpcds/tools/s_web_order.h                    |    56 +
 tools/tpcds/tools/s_web_order_lineitem.c           |   228 +
 tools/tpcds/tools/s_web_order_lineitem.h           |    59 +
 tools/tpcds/tools/s_web_page.c                     |   174 +
 tools/tpcds/tools/s_web_page.h                     |    42 +
 tools/tpcds/tools/s_web_promotinal_item.c          |   149 +
 tools/tpcds/tools/s_web_promotional_item.h         |    49 +
 tools/tpcds/tools/s_web_returns.c                  |   188 +
 tools/tpcds/tools/s_web_returns.h                  |    57 +
 tools/tpcds/tools/s_web_site.c                     |   151 +
 tools/tpcds/tools/s_web_site.h                     |    41 +
 tools/tpcds/tools/s_zip_to_gmt.c                   |   256 +
 tools/tpcds/tools/s_zip_to_gmt.h                   |    48 +
 tools/tpcds/tools/scaling.c                        |   797 +
 tools/tpcds/tools/scaling.dst                      |   138 +
 tools/tpcds/tools/scaling.h                        |    49 +
 tools/tpcds/tools/scd.c                            |   422 +
 tools/tpcds/tools/scd.h                            |    58 +
 tools/tpcds/tools/source_schema.wam                | 24537 +++++++++++++++
 tools/tpcds/tools/sparse.c                         |   112 +
 tools/tpcds/tools/sparse.h                         |    37 +
 tools/tpcds/tools/streets.dst                      |   165 +
 tools/tpcds/tools/substitution.c                   |   188 +
 tools/tpcds/tools/substitution.h                   |    96 +
 tools/tpcds/tools/tdef_functions.c                 |   180 +
 tools/tpcds/tools/tdef_functions.h                 |    66 +
 tools/tpcds/tools/tdefs.c                          |   230 +
 tools/tpcds/tools/tdefs.h                          |   103 +
 tools/tpcds/tools/template.h                       |    72 +
 tools/tpcds/tools/text.c                           |   228 +
 tools/tpcds/tools/tokenizer.l                      |   278 +
 tools/tpcds/tools/tpcds.dst                        |   838 +
 tools/tpcds/tools/tpcds.sql                        |   588 +
 tools/tpcds/tools/tpcds.wam                        | 31365 +++++++++++++++++++
 tools/tpcds/tools/tpcds_20080910.sum               |    48 +
 tools/tpcds/tools/tpcds_ri.sql                     |   139 +
 tools/tpcds/tools/tpcds_source.sql                 |   429 +
 tools/tpcds/tools/validate.c                       |   207 +
 tools/tpcds/tools/validate.h                       |    45 +
 tools/tpcds/tools/w_call_center.c                  |   307 +
 tools/tpcds/tools/w_call_center.h                  |    80 +
 tools/tpcds/tools/w_catalog_page.c                 |   195 +
 tools/tpcds/tools/w_catalog_page.h                 |    57 +
 tools/tpcds/tools/w_catalog_returns.c              |   252 +
 tools/tpcds/tools/w_catalog_returns.h              |    74 +
 tools/tpcds/tools/w_catalog_sales.c                |   403 +
 tools/tpcds/tools/w_catalog_sales.h                |    71 +
 tools/tpcds/tools/w_customer.c                     |   217 +
 tools/tpcds/tools/w_customer.h                     |    68 +
 tools/tpcds/tools/w_customer_address.c             |   157 +
 tools/tpcds/tools/w_customer_address.h             |    55 +
 tools/tpcds/tools/w_customer_demographics.c        |   156 +
 tools/tpcds/tools/w_customer_demographics.h        |    67 +
 tools/tpcds/tools/w_datetbl.c                      |   323 +
 tools/tpcds/tools/w_datetbl.h                      |    80 +
 tools/tpcds/tools/w_household_demographics.c       |   153 +
 tools/tpcds/tools/w_household_demographics.h       |    53 +
 tools/tpcds/tools/w_income_band.c                  |   139 +
 tools/tpcds/tools/w_income_band.h                  |    48 +
 tools/tpcds/tools/w_inventory.c                    |   213 +
 tools/tpcds/tools/w_inventory.h                    |    51 +
 tools/tpcds/tools/w_item.c                         |   303 +
 tools/tpcds/tools/w_item.h                         |    79 +
 tools/tpcds/tools/w_promotion.c                    |   215 +
 tools/tpcds/tools/w_promotion.h                    |    68 +
 tools/tpcds/tools/w_reason.c                       |   141 +
 tools/tpcds/tools/w_reason.h                       |    52 +
 tools/tpcds/tools/w_ship_mode.c                    |   159 +
 tools/tpcds/tools/w_ship_mode.h                    |    56 +
 tools/tpcds/tools/w_store.c                        |   310 +
 tools/tpcds/tools/w_store.h                        |    92 +
 tools/tpcds/tools/w_store_returns.c                |   205 +
 tools/tpcds/tools/w_store_returns.h                |    64 +
 tools/tpcds/tools/w_store_sales.c                  |   297 +
 tools/tpcds/tools/w_store_sales.h                  |    65 +
 tools/tpcds/tools/w_tdefs.h                        |    66 +
 tools/tpcds/tools/w_timetbl.c                      |   156 +
 tools/tpcds/tools/w_timetbl.h                      |    58 +
 tools/tpcds/tools/w_warehouse.c                    |   166 +
 tools/tpcds/tools/w_warehouse.h                    |    57 +
 tools/tpcds/tools/w_web_page.c                     |   250 +
 tools/tpcds/tools/w_web_page.h                     |    60 +
 tools/tpcds/tools/w_web_returns.c                  |   226 +
 tools/tpcds/tools/w_web_returns.h                  |    63 +
 tools/tpcds/tools/w_web_sales.c                    |   360 +
 tools/tpcds/tools/w_web_sales.h                    |    79 +
 tools/tpcds/tools/w_web_site.c                     |   275 +
 tools/tpcds/tools/w_web_site.h                     |    72 +
 tools/tpch/.gitignore                              |    10 +
 tools/tpch/BUGS                                    |   993 +
 tools/tpch/HISTORY                                 |   535 +
 tools/tpch/Makefile                                |   183 +
 tools/tpch/PORTING.NOTES                           |   220 +
 tools/tpch/README                                  |   436 +
 tools/tpch/bcd2.c                                  |   264 +
 tools/tpch/bcd2.h                                  |    31 +
 tools/tpch/bm_utils.c                              |   558 +
 tools/tpch/build.c                                 |   466 +
 tools/tpch/cdbhash.c                               |    13 +
 tools/tpch/cdbhash.h                               |    16 +
 tools/tpch/config.h                                |   222 +
 tools/tpch/dbgen.vcproj                            |   469 +
 tools/tpch/dists.dss                               |   839 +
 tools/tpch/driver.c                                |   846 +
 tools/tpch/dss.ddl                                 |    70 +
 tools/tpch/dss.h                                   |   585 +
 tools/tpch/dss.ri                                  |   100 +
 tools/tpch/dsstypes.h                              |   186 +
 tools/tpch/load_stub.c                             |   221 +
 tools/tpch/makefile.suite                          |   182 +
 tools/tpch/permute.c                               |   205 +
 tools/tpch/permute.h                               |    67 +
 tools/tpch/print.c                                 |   730 +
 tools/tpch/qgen.c                                  |   494 +
 tools/tpch/qgen.vcproj                             |   269 +
 tools/tpch/release.h                               |     7 +
 tools/tpch/rnd.c                                   |   241 +
 tools/tpch/rnd.h                                   |   111 +
 tools/tpch/rng64.c                                 |   137 +
 tools/tpch/rng64.h                                 |    26 +
 tools/tpch/shared.h                                |    72 +
 tools/tpch/speed_seed.c                            |   260 +
 tools/tpch/text.c                                  |   388 +
 tools/tpch/tpcd.h                                  |   141 +
 tools/tpch/tpch.sln                                |    54 +
 tools/tpch/tpchdriver.c                            |  2609 ++
 tools/tpch/tpchdriver.vcproj                       |   413 +
 tools/tpch/update_release.sh                       |    23 +
 tools/tpch/varsub.c                                |   370 +
 tools/tpch/vsub.c                                  |   347 +
 1449 files changed, 335950 insertions(+), 21557 deletions(-)
 create mode 100644 .github/workflows/build.yml
 create mode 100644 .github/workflows/scripts/download/.gitignore
 create mode 100644 .github/workflows/scripts/gtest_filter_negative
 create mode 100755 .github/workflows/scripts/init_hawq.sh
 create mode 100755 .github/workflows/scripts/init_hdfs.sh
 create mode 100755 .github/workflows/scripts/init_linux.sh
 create mode 100755 .github/workflows/scripts/init_macos.sh
 create mode 100644 .github/workflows/scripts/toolchain.sh
 create mode 100644 CMakeLists.txt
 create mode 100755 commit-msg
 create mode 100644 contrib/extfmtcsv/Makefile
 create mode 100644 contrib/extfmtcsv/extfmtcsv.c
 create mode 100644 contrib/exthdfs/Makefile
 create mode 100644 contrib/exthdfs/common.h
 create mode 100644 contrib/exthdfs/exthdfs.c
 create mode 100644 contrib/exthive/Makefile
 create mode 100644 contrib/exthive/common.h
 create mode 100644 contrib/exthive/exthive.c
 create mode 100755 contrib/hawq-docker/centos7-docker/hawq-test/service-hawq.sh
 create mode 100755 contrib/hawq-docker/centos7-docker/hawq-test/service-pxf.sh
 create mode 100644 contrib/magma/Makefile
 create mode 100644 contrib/magma/magma.c
 create mode 100644 contrib/magma/magma_install.sql
 create mode 100644 contrib/magma/monitor_install.sql
 create mode 100644 contrib/orc/Makefile
 create mode 100644 contrib/orc/README
 create mode 100644 contrib/orc/hive_install.sql
 create mode 100644 contrib/orc/orc.c
 create mode 100644 contrib/orc/orc_init.sql
 create mode 100644 contrib/orc/orc_install.sql
 create mode 100644 contrib/orc/orc_uninstall.sql
 create mode 100644 contrib/oushu/load_orc_debug_udf.sql
 create mode 100755 contrib/oushu/orc_debug_metadata.py
 create mode 100755 contrib/oushu/orc_debug_statistics.py
 create mode 100755 coverage-report.sh
 create mode 100644 depends/dbcommon/.gitignore
 create mode 100644 depends/dbcommon/CMake/CMakeTestCompileInt64tType.cc
 create mode 100644 depends/dbcommon/CMake/FindCogapp.cmake
 create mode 100644 depends/dbcommon/CMake/FindGFlags.cmake
 create mode 100644 depends/dbcommon/CMake/FindGlog.cmake
 create mode 100644 depends/dbcommon/CMake/FindJSON.cmake
 create mode 100644 depends/dbcommon/CMake/FindSnappy.cmake
 copy depends/{libyarn => dbcommon}/CMake/Functions.cmake (100%)
 create mode 100644 depends/dbcommon/CMake/Options.cmake
 create mode 100644 depends/dbcommon/CMake/Platform.cmake
 create mode 100644 depends/dbcommon/CMakeLists.txt
 create mode 100644 depends/dbcommon/Makefile
 create mode 100644 depends/dbcommon/Makefile.global.in
 create mode 100644 depends/dbcommon/README
 create mode 100755 depends/dbcommon/bootstrap
 create mode 100644 depends/dbcommon/src/CMakeLists.txt
 create mode 100644 depends/dbcommon/src/dbcommon/checksum/checksum-util.cc
 create mode 100644 depends/dbcommon/src/dbcommon/checksum/checksum-util.h
 create mode 100644 depends/dbcommon/src/dbcommon/checksum/checksum.h
 create mode 100644 depends/dbcommon/src/dbcommon/checksum/hw-crc32c.cc
 create mode 100644 depends/dbcommon/src/dbcommon/checksum/hw-crc32c.h
 create mode 100644 depends/dbcommon/src/dbcommon/checksum/sw-crc32c.cc
 create mode 100644 depends/dbcommon/src/dbcommon/checksum/sw-crc32c.h
 create mode 100644 depends/dbcommon/src/dbcommon/common/node-deserializer.cc
 create mode 100644 depends/dbcommon/src/dbcommon/common/node-deserializer.h
 create mode 100644 depends/dbcommon/src/dbcommon/common/node-serializer.h
 create mode 100644 depends/dbcommon/src/dbcommon/common/tuple-batch-store.cc
 create mode 100644 depends/dbcommon/src/dbcommon/common/tuple-batch-store.h
 create mode 100644 depends/dbcommon/src/dbcommon/common/tuple-batch.cc
 create mode 100644 depends/dbcommon/src/dbcommon/common/tuple-batch.h
 create mode 100644 depends/dbcommon/src/dbcommon/common/tuple-desc.cc
 create mode 100644 depends/dbcommon/src/dbcommon/common/tuple-desc.h
 create mode 100644 depends/dbcommon/src/dbcommon/common/vector-transformer.h
 create mode 100644 depends/dbcommon/src/dbcommon/common/vector.cc
 create mode 100644 depends/dbcommon/src/dbcommon/common/vector.h
 create mode 100644 depends/dbcommon/src/dbcommon/common/vector/decimal-vector.cc
 create mode 100644 depends/dbcommon/src/dbcommon/common/vector/decimal-vector.h
 create mode 100644 depends/dbcommon/src/dbcommon/common/vector/fixed-length-vector.h
 create mode 100644 depends/dbcommon/src/dbcommon/common/vector/interval-vector.cc
 create mode 100644 depends/dbcommon/src/dbcommon/common/vector/interval-vector.h
 create mode 100644 depends/dbcommon/src/dbcommon/common/vector/list-vector.h
 create mode 100644 depends/dbcommon/src/dbcommon/common/vector/struct-vector.h
 create mode 100644 depends/dbcommon/src/dbcommon/common/vector/timestamp-vector.h
 create mode 100644 depends/dbcommon/src/dbcommon/common/vector/variable-length-vector.cc
 create mode 100644 depends/dbcommon/src/dbcommon/common/vector/variable-length-vector.h
 create mode 100644 depends/dbcommon/src/dbcommon/filesystem/file-system-manager.cc
 create mode 100644 depends/dbcommon/src/dbcommon/filesystem/file-system-manager.h
 create mode 100644 depends/dbcommon/src/dbcommon/filesystem/file-system.h
 create mode 100644 depends/dbcommon/src/dbcommon/filesystem/hdfs/hdfs-file-system.cc
 create mode 100644 depends/dbcommon/src/dbcommon/filesystem/hdfs/hdfs-file-system.h
 create mode 100644 depends/dbcommon/src/dbcommon/filesystem/hive/fb303.thrift
 create mode 100644 depends/dbcommon/src/dbcommon/filesystem/hive/hive-file-system.cc
 create mode 100644 depends/dbcommon/src/dbcommon/filesystem/hive/hive-file-system.h
 create mode 100644 depends/dbcommon/src/dbcommon/filesystem/hive/hive_metastore.thrift
 create mode 100644 depends/dbcommon/src/dbcommon/filesystem/local/local-file-system.cc
 create mode 100644 depends/dbcommon/src/dbcommon/filesystem/local/local-file-system.h
 create mode 100644 depends/dbcommon/src/dbcommon/function/CPPLINT.cfg
 create mode 100644 depends/dbcommon/src/dbcommon/function/README
 create mode 100644 depends/dbcommon/src/dbcommon/function/agg-func.cc
 create mode 100644 depends/dbcommon/src/dbcommon/function/agg-func.h
 create mode 100644 depends/dbcommon/src/dbcommon/function/arith-cmp-func.h
 create mode 100644 depends/dbcommon/src/dbcommon/function/arith-func.cc
 create mode 100644 depends/dbcommon/src/dbcommon/function/arithmetic-function.h
 create mode 100644 depends/dbcommon/src/dbcommon/function/array-function.cc
 create mode 100644 depends/dbcommon/src/dbcommon/function/array-function.h
 create mode 100644 depends/dbcommon/src/dbcommon/function/binary-function.cc
 create mode 100644 depends/dbcommon/src/dbcommon/function/cmp-func.cc
 create mode 100644 depends/dbcommon/src/dbcommon/function/comparison-function.h
 create mode 100644 depends/dbcommon/src/dbcommon/function/date-function.cc
 create mode 100644 depends/dbcommon/src/dbcommon/function/date-function.h
 create mode 100644 depends/dbcommon/src/dbcommon/function/decimal-function.cc
 create mode 100644 depends/dbcommon/src/dbcommon/function/decimal-function.h
 create mode 100644 depends/dbcommon/src/dbcommon/function/func-kind.h
 create mode 100644 depends/dbcommon/src/dbcommon/function/func.cc
 create mode 100644 depends/dbcommon/src/dbcommon/function/func.h
 create mode 100644 depends/dbcommon/src/dbcommon/function/function.h
 create mode 100644 depends/dbcommon/src/dbcommon/function/invoker.cc
 create mode 100644 depends/dbcommon/src/dbcommon/function/invoker.h
 create mode 100644 depends/dbcommon/src/dbcommon/function/mathematical-function.cc
 create mode 100644 depends/dbcommon/src/dbcommon/function/mathematical-function.h
 create mode 100644 depends/dbcommon/src/dbcommon/function/string-binary-function.h
 create mode 100644 depends/dbcommon/src/dbcommon/function/string-function.cc
 create mode 100644 depends/dbcommon/src/dbcommon/function/typecast-func.cc
 create mode 100644 depends/dbcommon/src/dbcommon/function/typecast-func.h
 create mode 100644 depends/dbcommon/src/dbcommon/function/typecast-function.cc
 create mode 100644 depends/dbcommon/src/dbcommon/function/typecast-function.h
 create mode 100644 depends/dbcommon/src/dbcommon/function/typecast-texttonum-func.cc
 create mode 100644 depends/dbcommon/src/dbcommon/function/typecast-texttonum-func.h
 create mode 100644 depends/dbcommon/src/dbcommon/function/volatile-func.cc
 create mode 100644 depends/dbcommon/src/dbcommon/function/volatile-func.h
 create mode 100644 depends/dbcommon/src/dbcommon/hash/cdb-hash.h
 create mode 100644 depends/dbcommon/src/dbcommon/hash/fast-hash.h
 create mode 100644 depends/dbcommon/src/dbcommon/hash/hash-keys.cc
 create mode 100644 depends/dbcommon/src/dbcommon/hash/hash-keys.h
 create mode 100644 depends/dbcommon/src/dbcommon/hash/native-hash-table.cc
 create mode 100644 depends/dbcommon/src/dbcommon/hash/native-hash-table.h
 create mode 100644 depends/dbcommon/src/dbcommon/hash/tuple-batch-hasher.h
 create mode 100644 depends/dbcommon/src/dbcommon/log/debug-logger.cc
 create mode 100644 depends/dbcommon/src/dbcommon/log/debug-logger.h
 create mode 100644 depends/dbcommon/src/dbcommon/log/error-code.h
 create mode 100644 depends/dbcommon/src/dbcommon/log/exception.h
 create mode 100644 depends/dbcommon/src/dbcommon/log/logger.cc
 create mode 100644 depends/dbcommon/src/dbcommon/log/logger.h
 create mode 100644 depends/dbcommon/src/dbcommon/log/stack-printer.cc
 create mode 100644 depends/dbcommon/src/dbcommon/log/stack-printer.h
 create mode 100644 depends/dbcommon/src/dbcommon/network/socket-tcp-message-client.cc
 create mode 100644 depends/dbcommon/src/dbcommon/network/socket-tcp-message-client.h
 create mode 100644 depends/dbcommon/src/dbcommon/network/socket-tcp-message-common.cc
 create mode 100644 depends/dbcommon/src/dbcommon/network/socket-tcp-message-common.h
 create mode 100644 depends/dbcommon/src/dbcommon/network/socket-tcp-message-server.cc
 create mode 100644 depends/dbcommon/src/dbcommon/network/socket-tcp-message-server.h
 create mode 100644 depends/dbcommon/src/dbcommon/network/socket-tcp-message-serverhandler.h
 create mode 100644 depends/dbcommon/src/dbcommon/network/socket-tcp.cc
 create mode 100644 depends/dbcommon/src/dbcommon/network/socket-tcp.h
 create mode 100644 depends/dbcommon/src/dbcommon/nodes/datum.cc
 create mode 100644 depends/dbcommon/src/dbcommon/nodes/datum.h
 create mode 100644 depends/dbcommon/src/dbcommon/nodes/scalar.cc
 create mode 100644 depends/dbcommon/src/dbcommon/nodes/scalar.h
 create mode 100644 depends/dbcommon/src/dbcommon/nodes/select-list.cc
 create mode 100644 depends/dbcommon/src/dbcommon/nodes/select-list.h
 create mode 100644 depends/dbcommon/src/dbcommon/python/code_generator.py
 create mode 100644 depends/dbcommon/src/dbcommon/testutil/agg-func-utils.h
 create mode 100644 depends/dbcommon/src/dbcommon/testutil/function-utils.cc
 create mode 100644 depends/dbcommon/src/dbcommon/testutil/function-utils.h
 create mode 100644 depends/dbcommon/src/dbcommon/testutil/scalar-utils.h
 create mode 100644 depends/dbcommon/src/dbcommon/testutil/tuple-batch-utils.h
 create mode 100644 depends/dbcommon/src/dbcommon/testutil/vector-utils.h
 create mode 100644 depends/dbcommon/src/dbcommon/thread/err-detect-callback.h
 create mode 100644 depends/dbcommon/src/dbcommon/thread/thread-base.cc
 create mode 100644 depends/dbcommon/src/dbcommon/thread/thread-base.h
 create mode 100644 depends/dbcommon/src/dbcommon/type/CPPLINT.cfg
 create mode 100644 depends/dbcommon/src/dbcommon/type/README
 create mode 100644 depends/dbcommon/src/dbcommon/type/array.cc
 create mode 100644 depends/dbcommon/src/dbcommon/type/array.h
 create mode 100644 depends/dbcommon/src/dbcommon/type/bool.cc
 create mode 100644 depends/dbcommon/src/dbcommon/type/bool.h
 create mode 100644 depends/dbcommon/src/dbcommon/type/date.cc
 create mode 100644 depends/dbcommon/src/dbcommon/type/date.h
 create mode 100644 depends/dbcommon/src/dbcommon/type/decimal.cc
 create mode 100644 depends/dbcommon/src/dbcommon/type/decimal.h
 create mode 100644 depends/dbcommon/src/dbcommon/type/float.h
 create mode 100644 depends/dbcommon/src/dbcommon/type/integer.h
 create mode 100644 depends/dbcommon/src/dbcommon/type/interval.h
 create mode 100644 depends/dbcommon/src/dbcommon/type/magma-tid.h
 create mode 100644 depends/dbcommon/src/dbcommon/type/type-kind.h
 create mode 100644 depends/dbcommon/src/dbcommon/type/type-modifier.h
 create mode 100644 depends/dbcommon/src/dbcommon/type/type-util.cc
 create mode 100644 depends/dbcommon/src/dbcommon/type/type-util.h
 create mode 100644 depends/dbcommon/src/dbcommon/type/typebase.cc
 create mode 100644 depends/dbcommon/src/dbcommon/type/typebase.h
 create mode 100644 depends/dbcommon/src/dbcommon/type/varlen.cc
 create mode 100644 depends/dbcommon/src/dbcommon/type/varlen.h
 create mode 100644 depends/dbcommon/src/dbcommon/utils/async-queue.h
 create mode 100644 depends/dbcommon/src/dbcommon/utils/block-memory-buffer.h
 create mode 100644 depends/dbcommon/src/dbcommon/utils/bool-buffer.cc
 create mode 100644 depends/dbcommon/src/dbcommon/utils/bool-buffer.h
 create mode 100644 depends/dbcommon/src/dbcommon/utils/byte-buffer.h
 create mode 100644 depends/dbcommon/src/dbcommon/utils/comp/compressor.h
 create mode 100644 depends/dbcommon/src/dbcommon/utils/comp/lz4-compressor.cc
 create mode 100644 depends/dbcommon/src/dbcommon/utils/comp/lz4-compressor.h
 create mode 100644 depends/dbcommon/src/dbcommon/utils/comp/snappy-compressor.cc
 create mode 100644 depends/dbcommon/src/dbcommon/utils/comp/snappy-compressor.h
 create mode 100644 depends/dbcommon/src/dbcommon/utils/comp/zlib-compressor.cc
 create mode 100644 depends/dbcommon/src/dbcommon/utils/comp/zlib-compressor.h
 create mode 100644 depends/dbcommon/src/dbcommon/utils/cutils.cc
 create mode 100644 depends/dbcommon/src/dbcommon/utils/cutils.h
 create mode 100644 depends/dbcommon/src/dbcommon/utils/file-info.h
 create mode 100644 depends/dbcommon/src/dbcommon/utils/flat-memory-buffer.h
 create mode 100644 depends/dbcommon/src/dbcommon/utils/global.cc
 create mode 100644 depends/dbcommon/src/dbcommon/utils/global.h
 create mode 100644 depends/dbcommon/src/dbcommon/utils/instrument.cc
 create mode 100644 depends/dbcommon/src/dbcommon/utils/instrument.h
 create mode 100644 depends/dbcommon/src/dbcommon/utils/int-util.h
 create mode 100644 depends/dbcommon/src/dbcommon/utils/join-tuple-buffer.cc
 create mode 100644 depends/dbcommon/src/dbcommon/utils/join-tuple-buffer.h
 create mode 100644 depends/dbcommon/src/dbcommon/utils/lock.cc
 create mode 100644 depends/dbcommon/src/dbcommon/utils/lock.h
 create mode 100644 depends/dbcommon/src/dbcommon/utils/macro.h
 create mode 100644 depends/dbcommon/src/dbcommon/utils/mb/mb-converter.cc
 create mode 100644 depends/dbcommon/src/dbcommon/utils/mb/mb-converter.h
 create mode 100644 depends/dbcommon/src/dbcommon/utils/memory-pool.cc
 create mode 100644 depends/dbcommon/src/dbcommon/utils/memory-pool.h
 create mode 100644 depends/dbcommon/src/dbcommon/utils/net-client.h
 create mode 100644 depends/dbcommon/src/dbcommon/utils/object-counter.cc
 create mode 100644 depends/dbcommon/src/dbcommon/utils/object-counter.h
 create mode 100644 depends/dbcommon/src/dbcommon/utils/parameters.h
 create mode 100644 depends/dbcommon/src/dbcommon/utils/string-util.cc
 create mode 100644 depends/dbcommon/src/dbcommon/utils/string-util.h
 create mode 100644 depends/dbcommon/src/dbcommon/utils/sys-info.h
 create mode 100644 depends/dbcommon/src/dbcommon/utils/time-util.h
 create mode 100644 depends/dbcommon/src/dbcommon/utils/timezone-util.cc
 create mode 100644 depends/dbcommon/src/dbcommon/utils/timezone-util.h
 create mode 100644 depends/dbcommon/src/dbcommon/utils/url.cc
 create mode 100644 depends/dbcommon/src/dbcommon/utils/url.h
 create mode 100644 depends/dbcommon/test/CMakeLists.txt
 copy depends/{libyarn => dbcommon}/test/data/checksum1.in (100%)
 copy depends/{libyarn => dbcommon}/test/data/checksum2.in (100%)
 create mode 100755 depends/dbcommon/test/parallel/parallel-launcher.py
 create mode 100644 depends/dbcommon/test/unit/CMakeLists.txt
 create mode 100644 depends/dbcommon/test/unit/checksum/test-checksum.cc
 create mode 100644 depends/dbcommon/test/unit/common/test-async-queue.cc
 create mode 100644 depends/dbcommon/test/unit/common/test-function.cc
 create mode 100644 depends/dbcommon/test/unit/common/test-tuple-batch-copy-control.cc
 create mode 100644 depends/dbcommon/test/unit/common/test-tuple-batch-store.cc
 create mode 100644 depends/dbcommon/test/unit/common/test-tuple-batch.cc
 create mode 100644 depends/dbcommon/test/unit/common/test-vector-copy-control.cc
 create mode 100644 depends/dbcommon/test/unit/common/test-vector.cc
 create mode 100644 depends/dbcommon/test/unit/filesystem/test-file-system.cc
 create mode 100644 depends/dbcommon/test/unit/function/CPPLINT.cfg
 create mode 100644 depends/dbcommon/test/unit/function/test-agg-func-has-no-group-by.cc
 create mode 100644 depends/dbcommon/test/unit/function/test-agg-func-small-scale.cc
 create mode 100644 depends/dbcommon/test/unit/function/test-agg-func.cc
 create mode 100644 depends/dbcommon/test/unit/function/test-array-func.cc
 create mode 100644 depends/dbcommon/test/unit/function/test-binary-cmp-function.cc
 create mode 100644 depends/dbcommon/test/unit/function/test-binary-function.cc
 create mode 100644 depends/dbcommon/test/unit/function/test-codegen-arith-functions.cc
 create mode 100644 depends/dbcommon/test/unit/function/test-codegen-cmp-functions.cc
 create mode 100644 depends/dbcommon/test/unit/function/test-codegen-typecast-functions.cc
 create mode 100644 depends/dbcommon/test/unit/function/test-date-function.cc
 create mode 100644 depends/dbcommon/test/unit/function/test-decimal-function.cc
 create mode 100644 depends/dbcommon/test/unit/function/test-mathematical-function.cc
 create mode 100644 depends/dbcommon/test/unit/function/test-string-cmp-function.cc
 create mode 100644 depends/dbcommon/test/unit/function/test-string-function.cc
 create mode 100644 depends/dbcommon/test/unit/function/test-timestamp-function.cc
 create mode 100644 depends/dbcommon/test/unit/function/test-typecast-function.cc
 create mode 100644 depends/dbcommon/test/unit/function/test-typecast-texttonum-func.cc
 create mode 100644 depends/dbcommon/test/unit/log/test-debug-logger.cc
 create mode 100644 depends/dbcommon/test/unit/network/test-socket-tcp-message-comm.cc
 create mode 100644 depends/dbcommon/test/unit/nodes/test-datum.cc
 create mode 100644 depends/dbcommon/test/unit/nodes/test-select-list.cc
 create mode 100644 depends/dbcommon/test/unit/test-hash-table.cc
 create mode 100644 depends/dbcommon/test/unit/thread/test-err-detect-callback.cc
 create mode 100644 depends/dbcommon/test/unit/type/test-type.cc
 create mode 100644 depends/dbcommon/test/unit/unit-test-main.cc
 create mode 100644 depends/dbcommon/test/unit/utils/test-bool-buffer.cc
 create mode 100644 depends/dbcommon/test/unit/utils/test-byte-buffer.cc
 create mode 100644 depends/dbcommon/test/unit/utils/test-cutils.cc
 create mode 100644 depends/dbcommon/test/unit/utils/test-flat-memory-buffer.cc
 create mode 100644 depends/dbcommon/test/unit/utils/test-int-util.cc
 create mode 100644 depends/dbcommon/test/unit/utils/test-join-tuple-buffer.cc
 create mode 100644 depends/dbcommon/test/unit/utils/test-lock.cc
 create mode 100644 depends/dbcommon/test/unit/utils/test-lz4-compress.cc
 create mode 100644 depends/dbcommon/test/unit/utils/test-mb-converter.cc
 create mode 100644 depends/dbcommon/test/unit/utils/test-string-util.cc
 create mode 100644 depends/dbcommon/test/unit/utils/test-url.cc
 create mode 100644 depends/storage/.gitignore
 create mode 100644 depends/storage/CMake/CMakeTestCompileInt64tType.cc
 create mode 100644 depends/storage/CMake/FindCogapp.cmake
 create mode 100644 depends/storage/CMake/FindGFlags.cmake
 create mode 100644 depends/storage/CMake/FindGlog.cmake
 create mode 100644 depends/storage/CMake/FindJSON.cmake
 create mode 100644 depends/storage/CMake/FindSnappy.cmake
 create mode 100644 depends/storage/CMake/FindZLIB.cmake
 copy depends/{libyarn => storage}/CMake/Functions.cmake (100%)
 create mode 100644 depends/storage/CMake/Options.cmake
 create mode 100644 depends/storage/CMake/Platform.cmake
 create mode 100644 depends/storage/CMakeLists.txt
 create mode 100644 depends/storage/Makefile
 create mode 100644 depends/storage/Makefile.global.in
 create mode 100644 depends/storage/README
 create mode 100755 depends/storage/bootstrap
 create mode 100644 depends/storage/src/CMakeLists.txt
 copy tools/bin/pythonSrc/unittest2-0.5.1/unittest2/test/dummy.py => depends/storage/src/storage/README (100%)
 create mode 100644 depends/storage/src/storage/common/bloom-filter.h
 create mode 100644 depends/storage/src/storage/common/string.h
 create mode 100644 depends/storage/src/storage/cwrapper/hdfs-file-system-c.cc
 create mode 100644 depends/storage/src/storage/cwrapper/hdfs-file-system-c.h
 create mode 100644 depends/storage/src/storage/cwrapper/orc-format-c.cc
 create mode 100644 depends/storage/src/storage/cwrapper/orc-format-c.h
 create mode 100644 depends/storage/src/storage/format/format.cc
 create mode 100644 depends/storage/src/storage/format/format.h
 create mode 100644 depends/storage/src/storage/format/orc/README
 create mode 100644 depends/storage/src/storage/format/orc/byte-rle.cc
 create mode 100644 depends/storage/src/storage/format/orc/byte-rle.h
 create mode 100644 depends/storage/src/storage/format/orc/column-printer.cc
 create mode 100644 depends/storage/src/storage/format/orc/column-printer.h
 create mode 100644 depends/storage/src/storage/format/orc/data-buffer.cc
 create mode 100644 depends/storage/src/storage/format/orc/data-buffer.h
 create mode 100644 depends/storage/src/storage/format/orc/exceptions.cc
 create mode 100644 depends/storage/src/storage/format/orc/exceptions.h
 create mode 100644 depends/storage/src/storage/format/orc/file-version.h
 create mode 100644 depends/storage/src/storage/format/orc/input-stream.cc
 create mode 100644 depends/storage/src/storage/format/orc/input-stream.h
 create mode 100644 depends/storage/src/storage/format/orc/int128.cc
 create mode 100644 depends/storage/src/storage/format/orc/int128.h
 create mode 100644 depends/storage/src/storage/format/orc/lzo-decompressor.cc
 create mode 100644 depends/storage/src/storage/format/orc/lzo-decompressor.h
 create mode 100644 depends/storage/src/storage/format/orc/orc-format-reader.cc
 create mode 100644 depends/storage/src/storage/format/orc/orc-format-reader.h
 create mode 100644 depends/storage/src/storage/format/orc/orc-format-writer.cc
 create mode 100644 depends/storage/src/storage/format/orc/orc-format-writer.h
 create mode 100644 depends/storage/src/storage/format/orc/orc-format.cc
 create mode 100644 depends/storage/src/storage/format/orc/orc-format.h
 create mode 100644 depends/storage/src/storage/format/orc/orc-predicates.cc
 create mode 100644 depends/storage/src/storage/format/orc/orc-predicates.h
 create mode 100644 depends/storage/src/storage/format/orc/orc-proto-definition.cc
 create mode 100644 depends/storage/src/storage/format/orc/orc-proto-definition.h
 create mode 100644 depends/storage/src/storage/format/orc/orc_proto.proto
 create mode 100644 depends/storage/src/storage/format/orc/output-stream.cc
 create mode 100644 depends/storage/src/storage/format/orc/output-stream.h
 create mode 100644 depends/storage/src/storage/format/orc/reader.cc
 create mode 100644 depends/storage/src/storage/format/orc/reader.h
 create mode 100644 depends/storage/src/storage/format/orc/rle-v0.h
 create mode 100644 depends/storage/src/storage/format/orc/rle-v1.h
 create mode 100644 depends/storage/src/storage/format/orc/rle-v2.h
 create mode 100644 depends/storage/src/storage/format/orc/rle.cc
 create mode 100644 depends/storage/src/storage/format/orc/rle.h
 create mode 100644 depends/storage/src/storage/format/orc/seekable-input-stream.cc
 create mode 100644 depends/storage/src/storage/format/orc/seekable-input-stream.h
 create mode 100644 depends/storage/src/storage/format/orc/seekable-output-stream.cc
 create mode 100644 depends/storage/src/storage/format/orc/seekable-output-stream.h
 create mode 100644 depends/storage/src/storage/format/orc/string-dictionary.cc
 create mode 100644 depends/storage/src/storage/format/orc/string-dictionary.h
 create mode 100644 depends/storage/src/storage/format/orc/timezone.cc
 create mode 100644 depends/storage/src/storage/format/orc/timezone.h
 create mode 100644 depends/storage/src/storage/format/orc/type-impl.cc
 create mode 100644 depends/storage/src/storage/format/orc/type-impl.h
 create mode 100644 depends/storage/src/storage/format/orc/type.h
 create mode 100644 depends/storage/src/storage/format/orc/vector.cc
 create mode 100644 depends/storage/src/storage/format/orc/vector.h
 create mode 100644 depends/storage/src/storage/format/orc/writer.cc
 create mode 100644 depends/storage/src/storage/format/orc/writer.h
 create mode 100644 depends/storage/src/storage/format/orc/writer/binary-column-writer.cc
 create mode 100644 depends/storage/src/storage/format/orc/writer/decimal-column-writer.cc
 create mode 100644 depends/storage/src/storage/format/orc/writer/string-column-writer.cc
 create mode 100644 depends/storage/src/storage/testutil/file-utils.h
 create mode 100644 depends/storage/src/storage/testutil/format-util.h
 create mode 100644 depends/storage/test/CMakeLists.txt
 create mode 100644 depends/storage/test/data/hawq-write-orc.sql
 copy tools/bin/pythonSrc/unittest2-0.5.1/unittest2/test/dummy.py => depends/storage/test/data/sampledata (100%)
 create mode 100644 depends/storage/test/data/spark-read-orc.sql
 create mode 100755 depends/storage/test/parallel/parallel-launcher.py
 create mode 100644 depends/storage/test/unit/CMakeLists.txt
 create mode 100644 depends/storage/test/unit/common/test-bloom-filter.cc
 create mode 100644 depends/storage/test/unit/format/test-filter-pushdown.cc
 create mode 100644 depends/storage/test/unit/format/test-orc-byte-rle-encoder.cc
 create mode 100644 depends/storage/test/unit/format/test-orc-byte-rle.cc
 create mode 100644 depends/storage/test/unit/format/test-orc-format.cc
 create mode 100644 depends/storage/test/unit/format/test-orc-int128.cc
 create mode 100644 depends/storage/test/unit/format/test-orc-proto-definition.cc
 create mode 100644 depends/storage/test/unit/format/test-orc-rle.cc
 create mode 100644 depends/storage/test/unit/format/test-orc-vector.cc
 create mode 100644 depends/storage/test/unit/format/test-string-dictionary.cc
 create mode 100644 depends/storage/test/unit/unit-test-main.cc
 create mode 100644 depends/univplan/.gitignore
 create mode 100644 depends/univplan/CMake/CMakeTestCompileInt64tType.cc
 create mode 100644 depends/univplan/CMake/FindCogapp.cmake
 create mode 100644 depends/univplan/CMake/FindGFlags.cmake
 create mode 100644 depends/univplan/CMake/FindGlog.cmake
 create mode 100644 depends/univplan/CMake/FindJSON.cmake
 create mode 100644 depends/univplan/CMake/FindSnappy.cmake
 copy depends/{libyarn => univplan}/CMake/Functions.cmake (100%)
 create mode 100644 depends/univplan/CMake/Options.cmake
 create mode 100644 depends/univplan/CMake/Platform.cmake
 create mode 100644 depends/univplan/CMakeLists.txt
 create mode 100644 depends/univplan/Makefile
 create mode 100644 depends/univplan/Makefile.global.in
 copy tools/bin/pythonSrc/unittest2-0.5.1/unittest2/test/dummy.py => depends/univplan/README (100%)
 create mode 100755 depends/univplan/bootstrap
 create mode 100644 depends/univplan/src/CMakeLists.txt
 create mode 100644 depends/univplan/src/univplan/common/expression.cc
 create mode 100644 depends/univplan/src/univplan/common/expression.h
 create mode 100644 depends/univplan/src/univplan/common/plannode-util.h
 create mode 100644 depends/univplan/src/univplan/common/plannode-walker.h
 create mode 100644 depends/univplan/src/univplan/common/stagize.cc
 create mode 100644 depends/univplan/src/univplan/common/stagize.h
 create mode 100644 depends/univplan/src/univplan/common/statistics.h
 create mode 100644 depends/univplan/src/univplan/common/subplan-util.cc
 create mode 100644 depends/univplan/src/univplan/common/subplan-util.h
 create mode 100644 depends/univplan/src/univplan/common/univplan-type.h
 create mode 100644 depends/univplan/src/univplan/common/var-util.cc
 create mode 100644 depends/univplan/src/univplan/common/var-util.h
 create mode 100644 depends/univplan/src/univplan/cwrapper/univplan-c.cc
 create mode 100644 depends/univplan/src/univplan/cwrapper/univplan-c.h
 create mode 100644 depends/univplan/src/univplan/minmax/minmax-predicates.cc
 create mode 100644 depends/univplan/src/univplan/minmax/minmax-predicates.h
 create mode 100644 depends/univplan/src/univplan/proto/universal-plan-catalog.proto
 create mode 100644 depends/univplan/src/univplan/proto/universal-plan-expr.proto
 create mode 100644 depends/univplan/src/univplan/proto/universal-plan.proto
 create mode 100644 depends/univplan/src/univplan/testutil/univplan-proto-util.cc
 create mode 100644 depends/univplan/src/univplan/testutil/univplan-proto-util.h
 create mode 100644 depends/univplan/src/univplan/univplanbuilder/univplanbuilder-agg.cc
 create mode 100644 depends/univplan/src/univplan/univplanbuilder/univplanbuilder-agg.h
 create mode 100644 depends/univplan/src/univplan/univplanbuilder/univplanbuilder-append.cc
 create mode 100644 depends/univplan/src/univplan/univplanbuilder/univplanbuilder-append.h
 create mode 100644 depends/univplan/src/univplan/univplanbuilder/univplanbuilder-column.h
 create mode 100644 depends/univplan/src/univplan/univplanbuilder/univplanbuilder-connector.h
 create mode 100644 depends/univplan/src/univplan/univplanbuilder/univplanbuilder-expr-node.h
 create mode 100644 depends/univplan/src/univplan/univplanbuilder/univplanbuilder-expr-poly.h
 create mode 100644 depends/univplan/src/univplan/univplanbuilder/univplanbuilder-expr-tree.h
 create mode 100644 depends/univplan/src/univplan/univplanbuilder/univplanbuilder-ext-gs-filter.cc
 create mode 100644 depends/univplan/src/univplan/univplanbuilder/univplanbuilder-ext-gs-filter.h
 create mode 100644 depends/univplan/src/univplan/univplanbuilder/univplanbuilder-ext-gs-proj.cc
 create mode 100644 depends/univplan/src/univplan/univplanbuilder/univplanbuilder-ext-gs-proj.h
 create mode 100644 depends/univplan/src/univplan/univplanbuilder/univplanbuilder-ext-gs-scan.cc
 create mode 100644 depends/univplan/src/univplan/univplanbuilder/univplanbuilder-ext-gs-scan.h
 create mode 100644 depends/univplan/src/univplan/univplanbuilder/univplanbuilder-hash.h
 create mode 100644 depends/univplan/src/univplan/univplanbuilder/univplanbuilder-hashjoin.h
 create mode 100644 depends/univplan/src/univplan/univplanbuilder/univplanbuilder-insert.cc
 create mode 100644 depends/univplan/src/univplan/univplanbuilder/univplanbuilder-insert.h
 create mode 100644 depends/univplan/src/univplan/univplanbuilder/univplanbuilder-limit.cc
 create mode 100644 depends/univplan/src/univplan/univplanbuilder/univplanbuilder-limit.h
 create mode 100644 depends/univplan/src/univplan/univplanbuilder/univplanbuilder-listener.h
 create mode 100644 depends/univplan/src/univplan/univplanbuilder/univplanbuilder-material.h
 create mode 100644 depends/univplan/src/univplan/univplanbuilder/univplanbuilder-mergejoin.h
 create mode 100644 depends/univplan/src/univplan/univplanbuilder/univplanbuilder-nestloop.h
 create mode 100644 depends/univplan/src/univplan/univplanbuilder/univplanbuilder-node.cc
 create mode 100644 depends/univplan/src/univplan/univplanbuilder/univplanbuilder-node.h
 create mode 100644 depends/univplan/src/univplan/univplanbuilder/univplanbuilder-paraminfo.h
 create mode 100644 depends/univplan/src/univplan/univplanbuilder/univplanbuilder-plan-node-poly.h
 create mode 100644 depends/univplan/src/univplan/univplanbuilder/univplanbuilder-plan.cc
 create mode 100644 depends/univplan/src/univplan/univplanbuilder/univplanbuilder-plan.h
 create mode 100644 depends/univplan/src/univplan/univplanbuilder/univplanbuilder-range-tbl-entry.h
 create mode 100644 depends/univplan/src/univplan/univplanbuilder/univplanbuilder-receiver.h
 create mode 100644 depends/univplan/src/univplan/univplanbuilder/univplanbuilder-result.h
 create mode 100644 depends/univplan/src/univplan/univplanbuilder/univplanbuilder-scan-seq.cc
 create mode 100644 depends/univplan/src/univplan/univplanbuilder/univplanbuilder-scan-seq.h
 create mode 100644 depends/univplan/src/univplan/univplanbuilder/univplanbuilder-scan-task.h
 create mode 100644 depends/univplan/src/univplan/univplanbuilder/univplanbuilder-shareinput-scan.h
 create mode 100644 depends/univplan/src/univplan/univplanbuilder/univplanbuilder-sink.h
 create mode 100644 depends/univplan/src/univplan/univplanbuilder/univplanbuilder-sort.cc
 create mode 100644 depends/univplan/src/univplan/univplanbuilder/univplanbuilder-sort.h
 create mode 100644 depends/univplan/src/univplan/univplanbuilder/univplanbuilder-subquery-scan.h
 create mode 100644 depends/univplan/src/univplan/univplanbuilder/univplanbuilder-table.h
 create mode 100644 depends/univplan/src/univplan/univplanbuilder/univplanbuilder-target-entry.h
 create mode 100644 depends/univplan/src/univplan/univplanbuilder/univplanbuilder-unique.cc
 create mode 100644 depends/univplan/src/univplan/univplanbuilder/univplanbuilder-unique.h
 create mode 100644 depends/univplan/src/univplan/univplanbuilder/univplanbuilder.cc
 create mode 100644 depends/univplan/src/univplan/univplanbuilder/univplanbuilder.h
 create mode 100644 depends/univplan/test/CMakeLists.txt
 create mode 100644 depends/univplan/test/data/TestAgg
 create mode 100644 depends/univplan/test/data/TestCompletedPlanAfter
 create mode 100644 depends/univplan/test/data/TestCompletedPlanBefore
 create mode 100644 depends/univplan/test/data/TestLimitCount
 create mode 100644 depends/univplan/test/data/TestLimitCountOffset
 create mode 100644 depends/univplan/test/data/TestLimitOffset
 create mode 100644 depends/univplan/test/data/TestNullTest
 create mode 100644 depends/univplan/test/data/TestQualListAndExpr
 create mode 100644 depends/univplan/test/data/TestSort
 create mode 100644 depends/univplan/test/data/TestStagizeAfter
 create mode 100644 depends/univplan/test/data/TestStagizeBefore
 create mode 100644 depends/univplan/test/data/TestUnivPlanProtoGenerate
 create mode 100755 depends/univplan/test/parallel/parallel-launcher.py
 create mode 100644 depends/univplan/test/unit/CMakeLists.txt
 create mode 100644 depends/univplan/test/unit/test-basic-univplan.cc
 create mode 100644 depends/univplan/test/unit/test-minmax-cotasklist.cc
 create mode 100644 depends/univplan/test/unit/test-univplan-cwrapper.cc
 create mode 100644 depends/univplan/test/unit/test-univplan.h
 create mode 100644 depends/univplan/test/unit/unit-test-main.cc
 create mode 100755 pre-push
 create mode 100644 pxf/pxf-jdbc/src/main/java/org/apache/hawq/pxf/plugins/jdbc/JdbcAccessor.java
 create mode 100644 pxf/pxf-jdbc/src/main/java/org/apache/hawq/pxf/plugins/jdbc/JdbcResolver.java
 create mode 100644 pxf/pxf-jdbc/src/main/java/org/apache/hawq/pxf/plugins/jdbc/utils/MicrosoftProduct.java
 create mode 100644 pxf/pxf-jdbc/src/main/java/org/apache/hawq/pxf/plugins/jdbc/writercallable/BatchWriterCallable.java
 create mode 100644 pxf/pxf-jdbc/src/main/java/org/apache/hawq/pxf/plugins/jdbc/writercallable/SimpleWriterCallable.java
 create mode 100644 pxf/pxf-jdbc/src/main/java/org/apache/hawq/pxf/plugins/jdbc/writercallable/WriterCallable.java
 create mode 100644 pxf/pxf-jdbc/src/main/java/org/apache/hawq/pxf/plugins/jdbc/writercallable/WriterCallableFactory.java
 create mode 100644 pxf/pxf-service/src/main/java/org/apache/hawq/pxf/service/rest/ServletLifecycleListener.java
 create mode 100644 pxf/pxf-service/src/main/resources/pxf-privateoushu.classpath
 create mode 100755 sanity-test.sh
 create mode 100644 src/backend/access/external/read_cache.c
 create mode 100644 src/backend/access/orc/Makefile
 create mode 100644 src/backend/access/orc/orcam.c
 create mode 100644 src/backend/access/orc/orcsegfiles.c
 create mode 100644 src/backend/cdb/dispatcher_mgr.c
 create mode 100644 src/backend/cdb/dispatcher_new.c
 create mode 100644 src/backend/cdb/executormgr_new.c
 create mode 100644 src/backend/cdb/motion/ic_new.c
 create mode 100644 src/backend/cdb/scheduler.c
 create mode 100644 src/backend/executor/newExecutor.c
 create mode 100644 src/backend/libpq/cloudrest.c
 create mode 100644 src/backend/optimizer/plan/newPlanner.c
 create mode 100644 src/backend/postmaster/pg_stat_activity_history_process.c
 create mode 100755 src/backend/utils/Gen_hawq_funcoid_mapping.sh
 create mode 100644 src/backend/utils/adt/array_distance_install.sql
 create mode 100644 src/backend/utils/adt/array_distance_uninstall.sql
 create mode 100644 src/backend/utils/adt/json.c
 create mode 100644 src/backend/utils/adt/jsonb.c
 create mode 100644 src/backend/utils/adt/jsonb_gin.c
 create mode 100644 src/backend/utils/adt/jsonb_op.c
 create mode 100644 src/backend/utils/adt/jsonb_util.c
 create mode 100644 src/backend/utils/adt/jsonfuncs.c
 create mode 100644 src/backend/utils/hawq_type_mapping.c
 create mode 100644 src/include/access/orcam.h
 create mode 100644 src/include/access/orcsegfiles.h
 create mode 100644 src/include/access/read_cache.h
 create mode 100644 src/include/cdb/dispatcher_mgr.h
 create mode 100644 src/include/cdb/dispatcher_new.h
 create mode 100644 src/include/cdb/executormgr_new.h
 create mode 100644 src/include/cdb/scheduler.h
 create mode 100644 src/include/cwrapper/cached-result.h
 create mode 100644 src/include/cwrapper/executor-c.h
 create mode 100644 src/include/cwrapper/func-kind.cg.h
 create mode 100644 src/include/cwrapper/hdfs-file-system-c.h
 create mode 100644 src/include/cwrapper/hive-file-system-c.h
 create mode 100644 src/include/cwrapper/instrument.h
 create mode 100644 src/include/cwrapper/magma-client-c.h
 create mode 100644 src/include/cwrapper/magma-format-c.h
 create mode 100644 src/include/cwrapper/orc-format-c.h
 create mode 100644 src/include/cwrapper/scheduler-c.h
 create mode 100644 src/include/cwrapper/text-format-c.h
 create mode 100644 src/include/cwrapper/type-kind.h
 create mode 100644 src/include/cwrapper/univplan-c.h
 create mode 100644 src/include/optimizer/newPlanner.h
 create mode 100644 src/include/pg_stat_activity_history_process.h
 create mode 100644 src/include/utils/cloudrest.h
 create mode 100644 src/include/utils/hawq_type_mapping.h
 create mode 100644 src/include/utils/json.h
 create mode 100644 src/include/utils/jsonapi.h
 create mode 100644 src/include/utils/jsonb.h
 create mode 100644 src/test/feature/ExternalSource/ans/exttable_extorc_datatype_bool.ans.source
 create mode 100644 src/test/feature/ExternalSource/ans/exttable_extorc_datatype_char.ans.source
 create mode 100644 src/test/feature/ExternalSource/ans/exttable_extorc_datatype_datetime.ans.source
 create mode 100644 src/test/feature/ExternalSource/ans/exttable_extorc_datatype_decimal.ans.source
 create mode 100644 src/test/feature/ExternalSource/ans/exttable_extorc_datatype_double.ans.source
 create mode 100644 src/test/feature/ExternalSource/ans/exttable_extorc_datatype_integer.ans.source
 create mode 100644 src/test/feature/ExternalSource/ans/exttable_extorc_datatype_money.ans.source
 create mode 100644 src/test/feature/ExternalSource/ans/exttable_extorc_datatype_varchar.ans.source
 create mode 100644 src/test/feature/ExternalSource/ans/exttable_extorc_encoding.ans.source
 create mode 100644 src/test/feature/ExternalSource/ans/exttable_extorc_negativepath.ans.source
 create mode 100644 src/test/feature/ExternalSource/ans/exttable_extorc_normalpath.ans
 create mode 100644 src/test/feature/ExternalSource/ans/exttable_extorc_normalpath.ans.source
 create mode 100644 src/test/feature/ExternalSource/ans/exttable_extorc_same_name_test.ans.source
 create mode 100644 src/test/feature/ExternalSource/ans/exttable_extorc_testtruncate.ans.source
 create mode 100644 src/test/feature/ExternalSource/sql/exttable_extorc_datatype_bool.sql.source
 create mode 100644 src/test/feature/ExternalSource/sql/exttable_extorc_datatype_char.sql.source
 create mode 100644 src/test/feature/ExternalSource/sql/exttable_extorc_datatype_datetime.sql.source
 create mode 100644 src/test/feature/ExternalSource/sql/exttable_extorc_datatype_decimal.sql.source
 create mode 100644 src/test/feature/ExternalSource/sql/exttable_extorc_datatype_double.sql.source
 create mode 100644 src/test/feature/ExternalSource/sql/exttable_extorc_datatype_integer.sql.source
 create mode 100644 src/test/feature/ExternalSource/sql/exttable_extorc_datatype_money.sql.source
 create mode 100644 src/test/feature/ExternalSource/sql/exttable_extorc_datatype_varchar.sql.source
 create mode 100644 src/test/feature/ExternalSource/sql/exttable_extorc_encoding.sql.source
 create mode 100644 src/test/feature/ExternalSource/sql/exttable_extorc_negativepath.sql.source
 create mode 100644 src/test/feature/ExternalSource/sql/exttable_extorc_normalpath.sql
 create mode 100644 src/test/feature/ExternalSource/sql/exttable_extorc_normalpath.sql.source
 create mode 100644 src/test/feature/ExternalSource/sql/exttable_extorc_same_name_test.sql.source
 create mode 100644 src/test/feature/ExternalSource/sql/exttable_extorc_testtruncate.sql.source
 create mode 100644 src/test/feature/ExternalSource/sql/init_file
 create mode 100644 src/test/feature/ExternalSource/test_extfmt_orc.cpp
 create mode 100644 src/test/feature/UDF/ans/test_udf_debug.ans
 create mode 100644 src/test/feature/UDF/sql/test_udf_debug.sql
 mode change 100755 => 100644 src/test/feature/catalog/ans/char.ans
 create mode 100644 src/test/feature/catalog/ans/char_linux.ans
 mode change 100755 => 100644 src/test/feature/catalog/ans/varchar.ans
 create mode 100644 src/test/feature/catalog/ans/varchar_linux.ans
 create mode 100644 src/test/feature/cloudtest/sql/normal/all.sql
 create mode 100644 src/test/feature/cloudtest/sql/normal/before_normal.sql
 create mode 100644 src/test/feature/cloudtest/sql/parallel/before_parallel.sql
 create mode 100644 src/test/feature/cloudtest/sql/parallel/parallel.sql
 create mode 100644 src/test/feature/cloudtest/test_cloud.cpp
 create mode 100644 src/test/feature/cloudtest/test_cloud.h
 create mode 100644 src/test/feature/cloudtest/test_cloud.xml
 create mode 100644 src/test/feature/lib/compent_config.cpp
 create mode 100644 src/test/feature/lib/compent_config.h
 create mode 100644 src/test/feature/lib/parse_out.cpp
 create mode 100644 src/test/feature/lib/parse_out.h
 create mode 100644 src/test/feature/lib/sql_util_parallel.cpp
 create mode 100644 src/test/feature/lib/sql_util_parallel.h
 create mode 100644 src/test/feature/lib/sqlfile-parsebase.cpp
 create mode 100644 src/test/feature/lib/sqlfile-parsebase.h
 delete mode 100644 src/test/feature/lock/TestLock.cpp
 delete mode 100644 src/test/feature/lock/ans/lock.ans
 delete mode 100644 src/test/feature/lock/sql/lock.sql
 create mode 100755 tools/bin/autoswitch.sh
 copy tools/bin/gppylib/data/{2.4.json => 3.0.json} (100%)
 copy tools/bin/gppylib/data/{2.4.json => 3.1.json} (100%)
 copy tools/bin/gppylib/data/{2.4.json => 3.2.json} (100%)
 create mode 100644 tools/bin/gppylib/data/4.0.json
 create mode 100644 tools/bin/gppylib/gpsqlUtil.py
 create mode 100755 tools/bin/magma
 create mode 100644 tools/bin/pre_setup.sh
 create mode 100644 tools/bin/upgrade.sh
 create mode 100644 tools/tpcds/.gitignore
 create mode 100644 tools/tpcds/makefile
 create mode 100644 tools/tpcds/parallel_dsdgen.cpp
 create mode 100644 tools/tpcds/tools/Cygwin Tools.rules
 create mode 100644 tools/tpcds/tools/HISTORY
 create mode 100644 tools/tpcds/tools/How_To_Guide-DS-V2.0.0.docx
 create mode 100644 tools/tpcds/tools/How_To_Guide.doc
 create mode 100644 tools/tpcds/tools/Makefile.suite
 create mode 100644 tools/tpcds/tools/PORTING.NOTES
 create mode 100644 tools/tpcds/tools/QGEN.doc
 create mode 100644 tools/tpcds/tools/QgenMain.c
 create mode 100644 tools/tpcds/tools/README
 create mode 100644 tools/tpcds/tools/README_grammar.txt
 create mode 100644 tools/tpcds/tools/ReleaseNotes.txt
 create mode 100644 tools/tpcds/tools/StringBuffer.c
 create mode 100644 tools/tpcds/tools/StringBuffer.h
 create mode 100644 tools/tpcds/tools/address.c
 create mode 100644 tools/tpcds/tools/address.h
 create mode 100644 tools/tpcds/tools/build_support.c
 create mode 100644 tools/tpcds/tools/build_support.h
 create mode 100644 tools/tpcds/tools/calendar.dst
 create mode 100644 tools/tpcds/tools/checksum.c
 create mode 100644 tools/tpcds/tools/checksum.vcproj
 create mode 100644 tools/tpcds/tools/cities.dst
 create mode 100644 tools/tpcds/tools/column_list.txt
 create mode 100644 tools/tpcds/tools/config.h
 create mode 100644 tools/tpcds/tools/constants.h
 create mode 100644 tools/tpcds/tools/date.c
 create mode 100644 tools/tpcds/tools/date.h
 create mode 100644 tools/tpcds/tools/dbgen2.sln
 create mode 100644 tools/tpcds/tools/dbgen2.vcproj
 create mode 100644 tools/tpcds/tools/dbgen_version.c
 create mode 100644 tools/tpcds/tools/dbgen_version.h
 create mode 100644 tools/tpcds/tools/dcgram.c
 create mode 100644 tools/tpcds/tools/dcgram.h
 create mode 100644 tools/tpcds/tools/dcomp.c
 create mode 100644 tools/tpcds/tools/dcomp.h
 create mode 100644 tools/tpcds/tools/dcomp_params.h
 create mode 100644 tools/tpcds/tools/decimal.c
 create mode 100644 tools/tpcds/tools/decimal.h
 create mode 100644 tools/tpcds/tools/dist.c
 create mode 100644 tools/tpcds/tools/dist.h
 create mode 100644 tools/tpcds/tools/distcomp.vcproj
 create mode 100644 tools/tpcds/tools/driver.c
 create mode 100644 tools/tpcds/tools/driver.h
 create mode 100644 tools/tpcds/tools/english.dst
 create mode 100644 tools/tpcds/tools/error_msg.c
 create mode 100644 tools/tpcds/tools/error_msg.h
 create mode 100644 tools/tpcds/tools/eval.c
 create mode 100644 tools/tpcds/tools/eval.h
 create mode 100644 tools/tpcds/tools/expr.c
 create mode 100644 tools/tpcds/tools/expr.h
 create mode 100644 tools/tpcds/tools/fips.dst
 create mode 100644 tools/tpcds/tools/genrand.c
 create mode 100644 tools/tpcds/tools/genrand.h
 create mode 100644 tools/tpcds/tools/grammar.c
 create mode 100644 tools/tpcds/tools/grammar.h
 create mode 100644 tools/tpcds/tools/grammar.vcproj
 create mode 100644 tools/tpcds/tools/grammar_support.c
 create mode 100644 tools/tpcds/tools/grammar_support.h
 create mode 100644 tools/tpcds/tools/items.dst
 create mode 100644 tools/tpcds/tools/join.c
 create mode 100644 tools/tpcds/tools/keywords.c
 create mode 100644 tools/tpcds/tools/keywords.h
 create mode 100644 tools/tpcds/tools/list.c
 create mode 100644 tools/tpcds/tools/list.h
 create mode 100644 tools/tpcds/tools/load.c
 create mode 100644 tools/tpcds/tools/load.h
 create mode 100644 tools/tpcds/tools/makefile
 create mode 100644 tools/tpcds/tools/mathops.h
 create mode 100644 tools/tpcds/tools/misc.c
 create mode 100644 tools/tpcds/tools/misc.h
 create mode 100644 tools/tpcds/tools/mkheader.c
 create mode 100644 tools/tpcds/tools/mkheader.vcproj
 create mode 100644 tools/tpcds/tools/names.dst
 create mode 100644 tools/tpcds/tools/nulls.c
 create mode 100644 tools/tpcds/tools/nulls.h
 create mode 100644 tools/tpcds/tools/parallel.c
 create mode 100644 tools/tpcds/tools/parallel.h
 create mode 100644 tools/tpcds/tools/parallel.sh
 create mode 100644 tools/tpcds/tools/params.h
 create mode 100644 tools/tpcds/tools/permute.c
 create mode 100644 tools/tpcds/tools/permute.h
 create mode 100644 tools/tpcds/tools/porting.c
 create mode 100644 tools/tpcds/tools/porting.h
 create mode 100644 tools/tpcds/tools/pricing.c
 create mode 100644 tools/tpcds/tools/pricing.h
 create mode 100644 tools/tpcds/tools/print.c
 create mode 100644 tools/tpcds/tools/print.h
 create mode 100644 tools/tpcds/tools/qgen.y
 create mode 100644 tools/tpcds/tools/qgen2.vcproj
 create mode 100644 tools/tpcds/tools/qgen_params.h
 create mode 100644 tools/tpcds/tools/query_handler.c
 create mode 100644 tools/tpcds/tools/query_handler.h
 create mode 100644 tools/tpcds/tools/r_params.c
 create mode 100644 tools/tpcds/tools/r_params.h
 create mode 100644 tools/tpcds/tools/release.c
 create mode 100644 tools/tpcds/tools/release.h
 create mode 100644 tools/tpcds/tools/s_brand.c
 create mode 100644 tools/tpcds/tools/s_brand.h
 create mode 100644 tools/tpcds/tools/s_call_center.c
 create mode 100644 tools/tpcds/tools/s_call_center.h
 create mode 100644 tools/tpcds/tools/s_catalog.c
 create mode 100644 tools/tpcds/tools/s_catalog.h
 create mode 100644 tools/tpcds/tools/s_catalog_order.c
 create mode 100644 tools/tpcds/tools/s_catalog_order.h
 create mode 100644 tools/tpcds/tools/s_catalog_order_lineitem.c
 create mode 100644 tools/tpcds/tools/s_catalog_order_lineitem.h
 create mode 100644 tools/tpcds/tools/s_catalog_page.c
 create mode 100644 tools/tpcds/tools/s_catalog_page.h
 create mode 100644 tools/tpcds/tools/s_catalog_promotional_item.c
 create mode 100644 tools/tpcds/tools/s_catalog_promotional_item.h
 create mode 100644 tools/tpcds/tools/s_catalog_returns.c
 create mode 100644 tools/tpcds/tools/s_catalog_returns.h
 create mode 100644 tools/tpcds/tools/s_category.c
 create mode 100644 tools/tpcds/tools/s_category.h
 create mode 100644 tools/tpcds/tools/s_class.c
 create mode 100644 tools/tpcds/tools/s_class.h
 create mode 100644 tools/tpcds/tools/s_company.c
 create mode 100644 tools/tpcds/tools/s_company.h
 create mode 100644 tools/tpcds/tools/s_customer.c
 create mode 100644 tools/tpcds/tools/s_customer.h
 create mode 100644 tools/tpcds/tools/s_customer_address.c
 create mode 100644 tools/tpcds/tools/s_customer_address.h
 create mode 100644 tools/tpcds/tools/s_division.c
 create mode 100644 tools/tpcds/tools/s_division.h
 create mode 100644 tools/tpcds/tools/s_inventory.c
 create mode 100644 tools/tpcds/tools/s_inventory.h
 create mode 100644 tools/tpcds/tools/s_item.c
 create mode 100644 tools/tpcds/tools/s_item.h
 create mode 100644 tools/tpcds/tools/s_manager.c
 create mode 100644 tools/tpcds/tools/s_manager.h
 create mode 100644 tools/tpcds/tools/s_manufacturer.c
 create mode 100644 tools/tpcds/tools/s_manufacturer.h
 create mode 100644 tools/tpcds/tools/s_market.c
 create mode 100644 tools/tpcds/tools/s_market.h
 create mode 100644 tools/tpcds/tools/s_pline.c
 create mode 100644 tools/tpcds/tools/s_pline.h
 create mode 100644 tools/tpcds/tools/s_product.c
 create mode 100644 tools/tpcds/tools/s_product.h
 create mode 100644 tools/tpcds/tools/s_promotion.c
 create mode 100644 tools/tpcds/tools/s_promotion.h
 create mode 100644 tools/tpcds/tools/s_purchase.c
 create mode 100644 tools/tpcds/tools/s_purchase.h
 create mode 100644 tools/tpcds/tools/s_reason.c
 create mode 100644 tools/tpcds/tools/s_reason.h
 create mode 100644 tools/tpcds/tools/s_store.c
 create mode 100644 tools/tpcds/tools/s_store.h
 create mode 100644 tools/tpcds/tools/s_store_promotional_item.c
 create mode 100644 tools/tpcds/tools/s_store_promotional_item.h
 create mode 100644 tools/tpcds/tools/s_store_returns.c
 create mode 100644 tools/tpcds/tools/s_store_returns.h
 create mode 100644 tools/tpcds/tools/s_subcategory.c
 create mode 100644 tools/tpcds/tools/s_subcategory.h
 create mode 100644 tools/tpcds/tools/s_subclass.c
 create mode 100644 tools/tpcds/tools/s_subclass.h
 create mode 100644 tools/tpcds/tools/s_tdefs.h
 create mode 100644 tools/tpcds/tools/s_warehouse.c
 create mode 100644 tools/tpcds/tools/s_warehouse.h
 create mode 100644 tools/tpcds/tools/s_web_order.c
 create mode 100644 tools/tpcds/tools/s_web_order.h
 create mode 100644 tools/tpcds/tools/s_web_order_lineitem.c
 create mode 100644 tools/tpcds/tools/s_web_order_lineitem.h
 create mode 100644 tools/tpcds/tools/s_web_page.c
 create mode 100644 tools/tpcds/tools/s_web_page.h
 create mode 100644 tools/tpcds/tools/s_web_promotinal_item.c
 create mode 100644 tools/tpcds/tools/s_web_promotional_item.h
 create mode 100644 tools/tpcds/tools/s_web_returns.c
 create mode 100644 tools/tpcds/tools/s_web_returns.h
 create mode 100644 tools/tpcds/tools/s_web_site.c
 create mode 100644 tools/tpcds/tools/s_web_site.h
 create mode 100644 tools/tpcds/tools/s_zip_to_gmt.c
 create mode 100644 tools/tpcds/tools/s_zip_to_gmt.h
 create mode 100644 tools/tpcds/tools/scaling.c
 create mode 100644 tools/tpcds/tools/scaling.dst
 create mode 100644 tools/tpcds/tools/scaling.h
 create mode 100644 tools/tpcds/tools/scd.c
 create mode 100644 tools/tpcds/tools/scd.h
 create mode 100644 tools/tpcds/tools/source_schema.wam
 create mode 100644 tools/tpcds/tools/sparse.c
 create mode 100644 tools/tpcds/tools/sparse.h
 create mode 100644 tools/tpcds/tools/streets.dst
 create mode 100644 tools/tpcds/tools/substitution.c
 create mode 100644 tools/tpcds/tools/substitution.h
 create mode 100644 tools/tpcds/tools/tdef_functions.c
 create mode 100644 tools/tpcds/tools/tdef_functions.h
 create mode 100644 tools/tpcds/tools/tdefs.c
 create mode 100644 tools/tpcds/tools/tdefs.h
 create mode 100644 tools/tpcds/tools/template.h
 create mode 100644 tools/tpcds/tools/text.c
 create mode 100644 tools/tpcds/tools/tokenizer.l
 create mode 100644 tools/tpcds/tools/tpcds.dst
 create mode 100644 tools/tpcds/tools/tpcds.sql
 create mode 100644 tools/tpcds/tools/tpcds.wam
 create mode 100644 tools/tpcds/tools/tpcds_20080910.sum
 create mode 100644 tools/tpcds/tools/tpcds_ri.sql
 create mode 100644 tools/tpcds/tools/tpcds_source.sql
 create mode 100644 tools/tpcds/tools/validate.c
 create mode 100644 tools/tpcds/tools/validate.h
 create mode 100644 tools/tpcds/tools/w_call_center.c
 create mode 100644 tools/tpcds/tools/w_call_center.h
 create mode 100644 tools/tpcds/tools/w_catalog_page.c
 create mode 100644 tools/tpcds/tools/w_catalog_page.h
 create mode 100644 tools/tpcds/tools/w_catalog_returns.c
 create mode 100644 tools/tpcds/tools/w_catalog_returns.h
 create mode 100644 tools/tpcds/tools/w_catalog_sales.c
 create mode 100644 tools/tpcds/tools/w_catalog_sales.h
 create mode 100644 tools/tpcds/tools/w_customer.c
 create mode 100644 tools/tpcds/tools/w_customer.h
 create mode 100644 tools/tpcds/tools/w_customer_address.c
 create mode 100644 tools/tpcds/tools/w_customer_address.h
 create mode 100644 tools/tpcds/tools/w_customer_demographics.c
 create mode 100644 tools/tpcds/tools/w_customer_demographics.h
 create mode 100644 tools/tpcds/tools/w_datetbl.c
 create mode 100644 tools/tpcds/tools/w_datetbl.h
 create mode 100644 tools/tpcds/tools/w_household_demographics.c
 create mode 100644 tools/tpcds/tools/w_household_demographics.h
 create mode 100644 tools/tpcds/tools/w_income_band.c
 create mode 100644 tools/tpcds/tools/w_income_band.h
 create mode 100644 tools/tpcds/tools/w_inventory.c
 create mode 100644 tools/tpcds/tools/w_inventory.h
 create mode 100644 tools/tpcds/tools/w_item.c
 create mode 100644 tools/tpcds/tools/w_item.h
 create mode 100644 tools/tpcds/tools/w_promotion.c
 create mode 100644 tools/tpcds/tools/w_promotion.h
 create mode 100644 tools/tpcds/tools/w_reason.c
 create mode 100644 tools/tpcds/tools/w_reason.h
 create mode 100644 tools/tpcds/tools/w_ship_mode.c
 create mode 100644 tools/tpcds/tools/w_ship_mode.h
 create mode 100644 tools/tpcds/tools/w_store.c
 create mode 100644 tools/tpcds/tools/w_store.h
 create mode 100644 tools/tpcds/tools/w_store_returns.c
 create mode 100644 tools/tpcds/tools/w_store_returns.h
 create mode 100644 tools/tpcds/tools/w_store_sales.c
 create mode 100644 tools/tpcds/tools/w_store_sales.h
 create mode 100644 tools/tpcds/tools/w_tdefs.h
 create mode 100644 tools/tpcds/tools/w_timetbl.c
 create mode 100644 tools/tpcds/tools/w_timetbl.h
 create mode 100644 tools/tpcds/tools/w_warehouse.c
 create mode 100644 tools/tpcds/tools/w_warehouse.h
 create mode 100644 tools/tpcds/tools/w_web_page.c
 create mode 100644 tools/tpcds/tools/w_web_page.h
 create mode 100644 tools/tpcds/tools/w_web_returns.c
 create mode 100644 tools/tpcds/tools/w_web_returns.h
 create mode 100644 tools/tpcds/tools/w_web_sales.c
 create mode 100644 tools/tpcds/tools/w_web_sales.h
 create mode 100644 tools/tpcds/tools/w_web_site.c
 create mode 100644 tools/tpcds/tools/w_web_site.h
 create mode 100755 tools/tpch/.gitignore
 create mode 100755 tools/tpch/BUGS
 create mode 100755 tools/tpch/HISTORY
 create mode 100755 tools/tpch/Makefile
 create mode 100755 tools/tpch/PORTING.NOTES
 create mode 100755 tools/tpch/README
 create mode 100755 tools/tpch/bcd2.c
 create mode 100755 tools/tpch/bcd2.h
 create mode 100755 tools/tpch/bm_utils.c
 create mode 100755 tools/tpch/build.c
 create mode 100755 tools/tpch/cdbhash.c
 create mode 100755 tools/tpch/cdbhash.h
 create mode 100755 tools/tpch/config.h
 create mode 100755 tools/tpch/dbgen.vcproj
 create mode 100755 tools/tpch/dists.dss
 create mode 100755 tools/tpch/driver.c
 create mode 100755 tools/tpch/dss.ddl
 create mode 100755 tools/tpch/dss.h
 create mode 100755 tools/tpch/dss.ri
 create mode 100755 tools/tpch/dsstypes.h
 create mode 100755 tools/tpch/load_stub.c
 create mode 100755 tools/tpch/makefile.suite
 create mode 100755 tools/tpch/permute.c
 create mode 100755 tools/tpch/permute.h
 create mode 100755 tools/tpch/print.c
 create mode 100755 tools/tpch/qgen.c
 create mode 100755 tools/tpch/qgen.vcproj
 create mode 100755 tools/tpch/release.h
 create mode 100755 tools/tpch/rnd.c
 create mode 100755 tools/tpch/rnd.h
 create mode 100755 tools/tpch/rng64.c
 create mode 100755 tools/tpch/rng64.h
 create mode 100755 tools/tpch/shared.h
 create mode 100755 tools/tpch/speed_seed.c
 create mode 100755 tools/tpch/text.c
 create mode 100755 tools/tpch/tpcd.h
 create mode 100755 tools/tpch/tpch.sln
 create mode 100755 tools/tpch/tpchdriver.c
 create mode 100755 tools/tpch/tpchdriver.vcproj
 create mode 100755 tools/tpch/update_release.sh
 create mode 100755 tools/tpch/varsub.c
 create mode 100755 tools/tpch/vsub.c

[hawq] 01/01: HAWQ-1799. Init HAWQ 3.0.0.0 repo

Posted by zt...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

ztao1987 pushed a commit to branch taoz
in repository https://gitbox.apache.org/repos/asf/hawq.git

commit a89e10a8eb3516e3488034f6aff410924b47d695
Author: ztao1987 <zh...@gmail.com>
AuthorDate: Fri Jul 30 11:40:05 2021 +0800

    HAWQ-1799. Init HAWQ 3.0.0.0 repo
---
 CMakeLists.txt                                     |   164 +
 GNUmakefile.in                                     |    77 +-
 LICENSE                                            |     4 +-
 NOTICE                                             |     2 +-
 README.md                                          |   301 +-
 commit-msg                                         |    25 +
 config/programs.m4                                 |     8 +-
 config/tomcat.m4                                   |     2 +
 configure                                          |   484 +-
 configure.in                                       |    75 +-
 contrib/Makefile                                   |     4 +-
 contrib/extfmtcsv/Makefile                         |    15 +
 contrib/extfmtcsv/extfmtcsv.c                      |   676 +
 contrib/exthdfs/Makefile                           |    29 +-
 contrib/exthdfs/common.h                           |    21 +-
 contrib/exthdfs/exthdfs.c                          |   400 +-
 contrib/exthive/Makefile                           |    13 +
 contrib/exthive/common.h                           |    18 +
 contrib/exthive/exthive.c                          |   493 +
 contrib/extprotocol/gpextprotocol.c                |     2 +-
 contrib/formatter_fixedwidth/fixedwidth.c          |     4 +-
 contrib/hawq-ambari-plugin/README.md               |     4 +-
 contrib/hawq-ambari-plugin/build.properties        |     4 +-
 contrib/hawq-ambari-plugin/pom.xml                 |     6 +-
 .../src/main/resources/utils/add-hawq.py           |     4 +-
 contrib/hawq-docker/Makefile                       |   253 +-
 contrib/hawq-docker/README.md                      |    57 +-
 .../hawq-docker/centos6-docker/hawq-dev/Dockerfile |     1 -
 .../hawq-docker/centos7-docker/hawq-dev/Dockerfile |    29 +-
 .../centos7-docker/hawq-test/Dockerfile            |    10 -
 .../centos7-docker/hawq-test/conf/core-site.xml    |     2 +-
 .../centos7-docker/hawq-test/entrypoint.sh         |     3 -
 contrib/hawq-hadoop/Makefile                       |     4 -
 contrib/hawq-package/README                        |     6 +-
 contrib/hawq-package/build_hawq_rpm.sh             |     2 +-
 contrib/hawq-package/hawq.spec                     |    10 +-
 contrib/hawq-package/make_rpm_tarball.sh           |     6 +-
 contrib/magma/Makefile                             |    17 +
 contrib/magma/magma.c                              |  3885 +++
 contrib/magma/magma_install.sql                    |   217 +
 contrib/magma/monitor_install.sql                  |    77 +
 contrib/orc/Makefile                               |    22 +-
 contrib/orc/orc.c                                  |  3273 +-
 contrib/orc/orc_init.sql                           |     1 -
 contrib/oushu/load_orc_debug_udf.sql               |   180 +
 contrib/oushu/orc_debug_metadata.py                |    10 +
 contrib/oushu/orc_debug_statistics.py              |    73 +
 contrib/pgcrypto/px-crypt.c                        |     2 +-
 coverage-report.sh                                 |   298 +
 depends/libhdfs3/CMake/FindGoogleTest.cmake        |    12 +-
 depends/libhdfs3/CMakeLists.txt                    |     5 +-
 depends/libhdfs3/Makefile                          |     2 +-
 depends/libhdfs3/bootstrap                         |     4 +-
 depends/libhdfs3/src/CMakeLists.txt                |     6 -
 depends/libhdfs3/src/client/DirectoryIterator.cpp  |    32 +-
 depends/libhdfs3/src/client/DirectoryIterator.h    |     1 -
 depends/libhdfs3/src/client/FileEncryptionInfo.h   |     2 +-
 depends/libhdfs3/src/client/Hdfs.cpp               |    29 +-
 depends/libhdfs3/src/client/InputStreamImpl.cpp    |    41 +-
 depends/libhdfs3/src/client/InputStreamImpl.h      |    26 -
 depends/libhdfs3/src/client/OutputStreamImpl.cpp   |    63 +-
 depends/libhdfs3/src/client/OutputStreamImpl.h     |    26 -
 depends/libhdfs3/src/client/Permission.cpp         |     5 +-
 depends/libhdfs3/src/client/UserInfo.h             |     4 -
 depends/libhdfs3/src/client/hdfs.h                 |    34 +-
 depends/libhdfs3/src/common/SessionConfig.cpp      |    14 +-
 depends/libhdfs3/src/common/SessionConfig.h        |    34 -
 depends/libhdfs3/src/rpc/RpcChannel.cpp            |    13 +-
 depends/libhdfs3/src/rpc/RpcConfig.h               |    13 +-
 depends/libhdfs3/src/server/Namenode.h             |     2 -
 depends/libhdfs3/test/data/function-test.xml       |    15 -
 depends/libhdfs3/test/function/CMakeLists.txt      |     4 -
 depends/libhdfs3/test/function/TestCInterface.cpp  |   776 +-
 .../libhdfs3/test/function/TestOutputStream.cpp    |     2 +-
 depends/libhdfs3/test/unit/CMakeLists.txt          |     4 -
 .../libhdfs3/test/unit/UnitTestOutputStream.cpp    |    65 +-
 depends/libyarn/CMake/FindGoogleTest.cmake         |    12 +-
 depends/libyarn/CMakeLists.txt                     |     3 +-
 depends/libyarn/Makefile                           |     2 +-
 depends/libyarn/bootstrap                          |     4 +-
 .../src/libyarnclient/ApplicationClient.cpp        |     5 +-
 .../src/libyarnclient/ApplicationMaster.cpp        |     5 +-
 dist/hawq/LICENSE                                  |     4 +-
 dist/hawq/NOTICE                                   |     2 +-
 doc/src/sgml/ref/alter_database.sgml               |    50 -
 doc/src/sgml/ref/alter_role.sgml                   |     2 +-
 doc/src/sgml/ref/alter_schema.sgml                 |    15 -
 doc/src/sgml/ref/alter_sequence.sgml               |    16 -
 doc/src/sgml/ref/alter_type.sgml                   |    27 +-
 doc/src/sgml/ref/create_external_table.sgml        |    12 +-
 doc/src/sgml/ref/create_role.sgml                  |     2 +-
 doc/src/sgml/ref/create_table.sgml                 |   103 +-
 doc/src/sgml/ref/fetch.sgml                        |    53 +-
 doc/src/sgml/ref/grant.sgml                        |    32 +-
 doc/src/sgml/ref/revoke.sgml                       |    18 +-
 getversion                                         |    10 +-
 pom.xml                                            |    13 +-
 pre-push                                           |    47 +
 pxf/Makefile                                       |    25 +-
 pxf/README.md                                      |    21 -
 pxf/build.gradle                                   |   208 +-
 pxf/gradle.properties                              |     9 +-
 pxf/gradle/wrapper/gradle-wrapper.properties       |     8 +-
 pxf/gradlew                                        |    90 +-
 .../java/org/apache/hawq/pxf/api/OneField.java     |     5 -
 .../main/java/org/apache/hawq/pxf/api/OneRow.java  |     9 -
 .../apache/hawq/pxf/api/utilities/InputData.java   |    69 +-
 .../hawq/pxf/api/utilities/ProfilesConf.java       |     2 +-
 .../apache/hawq/pxf/api/utilities/Utilities.java   |    99 -
 .../pxf/api/utilities/ColumnDescriptorTest.java    |     3 +-
 .../hawq/pxf/api/utilities/ProfilesConfTest.java   |    12 +-
 .../hawq/pxf/api/utilities/UtilitiesTest.java      |   138 -
 .../pxf/plugins/hdfs/HdfsAtomicDataAccessor.java   |     2 +-
 .../plugins/hdfs/HdfsSplittableDataAccessor.java   |     2 +-
 .../pxf/plugins/hdfs/utilities/HdfsUtilities.java  |    69 +-
 .../plugins/hdfs/utilities/HdfsUtilitiesTest.java  |    21 -
 .../hawq/pxf/plugins/hive/HiveDataFragmenter.java  |    58 +-
 .../plugins/hive/HiveInputFormatFragmenter.java    |     2 +-
 .../pxf/plugins/hive/HiveLineBreakAccessor.java    |     2 +-
 .../hawq/pxf/plugins/hive/HiveMetadataFetcher.java |     2 +-
 .../hawq/pxf/plugins/hive/HiveORCAccessor.java     |    77 +-
 .../pxf/plugins/hive/utilities/HiveUtilities.java  |    27 +-
 .../pxf/plugins/hive/utilities/ProfileFactory.java |    19 +-
 .../pxf/plugins/hive/HiveDataFragmenterTest.java   |   218 -
 .../hawq/pxf/plugins/hive/HiveORCAccessorTest.java |    15 -
 .../plugins/hive/utilities/HiveUtilitiesTest.java  |     3 +-
 pxf/pxf-jdbc/README.md                             |   343 +-
 .../hawq/pxf/plugins/jdbc/JdbcFilterBuilder.java   |    75 +-
 .../pxf/plugins/jdbc/JdbcPartitionFragmenter.java  |   391 +-
 .../apache/hawq/pxf/plugins/jdbc/JdbcPlugin.java   |   228 +-
 .../hawq/pxf/plugins/jdbc/JdbcReadAccessor.java    |   122 +
 .../hawq/pxf/plugins/jdbc/JdbcReadResolver.java    |   103 +
 .../hawq/pxf/plugins/jdbc/WhereSQLBuilder.java     |   162 +-
 .../hawq/pxf/plugins/jdbc/utils/ByteUtil.java      |    38 +-
 .../hawq/pxf/plugins/jdbc/utils/DbProduct.java     |    45 +-
 .../hawq/pxf/plugins/jdbc/utils/MysqlProduct.java  |    10 +-
 .../hawq/pxf/plugins/jdbc/utils/OracleProduct.java |    11 +-
 .../pxf/plugins/jdbc/utils/PostgresProduct.java    |    11 +-
 .../plugins/jdbc/JdbcPartitionFragmenterTest.java  |   189 +-
 .../hawq/pxf/plugins/jdbc/SqlBuilderTest.java      |    54 +-
 .../apache/hawq/pxf/plugins/json/JsonAccessor.java |     2 +-
 .../org/apache/hawq/pxf/plugins/json/PxfUnit.java  |    16 +-
 .../parser/PartitionedJsonParserNoSeekTest.java    |    11 +-
 pxf/pxf-service/src/configs/pxf-site.xml           |    39 +
 pxf/pxf-service/src/configs/tomcat/bin/setenv.sh   |    19 +-
 .../hawq/pxf/service/BridgeOutputBuilder.java      |    13 -
 .../pxf/service/FragmentsResponseFormatter.java    |     4 +-
 .../org/apache/hawq/pxf/service/ReadBridge.java    |     2 +-
 .../hawq/pxf/service/rest/BridgeResource.java      |    48 +-
 .../hawq/pxf/service/rest/FragmenterResource.java  |     1 +
 .../hawq/pxf/service/rest/MetadataResource.java    |     7 +-
 .../pxf/service/rest/ServletLifecycleListener.java |    63 +
 .../hawq/pxf/service/rest/VersionResource.java     |     2 +-
 .../hawq/pxf/service/rest/WritableResource.java    |    34 +-
 .../pxf/service/utilities/CustomWebappLoader.java  |     2 +-
 .../hawq/pxf/service/utilities/ProtocolData.java   |    89 +-
 .../hawq/pxf/service/utilities/SecureLogin.java    |    58 +-
 .../hawq/pxf/service/utilities/SecuredHDFS.java    |    17 +-
 .../src/main/resources/pxf-log4j.properties        |     6 +-
 .../src/main/resources/pxf-private.classpath       |    35 +-
 .../src/main/resources/pxf-privatebigtop.classpath |     6 +-
 .../src/main/resources/pxf-privatehdp.classpath    |     7 +-
 .../src/main/resources/pxf-privateoushu.classpath  |    52 +
 .../src/main/resources/pxf-privatephd.classpath    |     8 +-
 .../src/main/resources/pxf-profiles-default.xml    |    61 +-
 pxf/pxf-service/src/main/webapp/WEB-INF/web.xml    |    10 +-
 pxf/pxf-service/src/scripts/pxf-env.sh             |    43 +-
 pxf/pxf-service/src/scripts/pxf-service            |   369 +-
 .../hawq/pxf/service/BridgeOutputBuilderTest.java  |    15 +-
 .../pxf/service/utilities/ProtocolDataTest.java    |   100 +-
 .../pxf/service/utilities/SecuredHDFSTest.java     |    31 +-
 pxf/settings.gradle                                |     3 +-
 pxf/tomcat/src/scripts/pre-install.sh              |     2 +-
 ranger-plugin/admin-plugin/pom.xml                 |     2 +-
 ranger-plugin/conf/rps.properties                  |     5 +-
 ranger-plugin/integration/admin/pom.xml            |     2 +-
 ranger-plugin/integration/pom.xml                  |     2 +-
 ranger-plugin/integration/service/pom.xml          |     2 +-
 ranger-plugin/pom.xml                              |     2 +-
 ranger-plugin/service/pom.xml                      |     2 +-
 sanity-test.sh                                     |   219 +
 src/Makefile                                       |    41 +-
 src/Makefile.global.in                             |    46 +-
 src/Makefile.mock                                  |     6 +-
 src/backend/Makefile                               |    35 +-
 src/backend/access/Makefile                        |     2 +-
 src/backend/access/appendonly/aosegfiles.c         |    13 +-
 src/backend/access/appendonly/appendonlyam.c       |     9 +-
 src/backend/access/appendonly/appendonlywriter.c   |   198 +-
 src/backend/access/bitmap/bitmapattutil.c          |     3 +-
 src/backend/access/common/printtup.c               |    66 +-
 src/backend/access/common/reloptions.c             |   126 +-
 src/backend/access/common/tupdesc.c                |    57 +-
 src/backend/access/external/Makefile               |     4 +-
 src/backend/access/external/fileam.c               |   520 +-
 src/backend/access/external/plugstorage.c          |   333 +-
 src/backend/access/external/pxffilters.c           |   222 +-
 src/backend/access/external/pxfheaders.c           |     8 +-
 src/backend/access/external/pxfmasterapi.c         |     6 +-
 src/backend/access/external/pxfuriparser.c         |     6 +-
 src/backend/access/external/pxfutils.c             |     2 -
 src/backend/access/external/read_cache.c           |   240 +
 src/backend/access/external/test/pxffilters_test.c |   132 +-
 src/backend/access/external/test/pxfheaders_test.c |    35 -
 .../access/external/test/pxfuriparser_test.c       |     4 +-
 src/backend/access/external/url.c                  |  1256 +-
 src/backend/access/external/url_curl.c             |  1349 +
 src/backend/access/heap/heapam.c                   |    14 +-
 src/backend/access/index/catquery.c                |     1 -
 src/backend/access/index/gperf.init                |     1 -
 src/backend/access/orc/Makefile                    |    13 +
 src/backend/access/orc/orcam.c                     |   859 +
 src/backend/access/orc/orcsegfiles.c               |   382 +
 src/backend/access/parquet/parquetam.c             |     4 +-
 src/backend/access/transam/xact.c                  |   243 +
 src/backend/bootstrap/bootparse.y                  |     3 +-
 src/backend/catalog/.gitignore                     |     2 +-
 src/backend/catalog/Makefile                       |    10 +-
 src/backend/catalog/aclchk.c                       |    12 +-
 src/backend/catalog/aoseg.c                        |    24 +-
 src/backend/catalog/catalog.c                      |    14 +-
 src/backend/catalog/cdb_external_extensions.sql    |    34 +
 src/backend/catalog/dependency.c                   |     4 +-
 src/backend/catalog/external/externalmd.c          |     4 +-
 src/backend/catalog/gp_toolkit.sql.in              |    37 +-
 src/backend/catalog/gp_toolkit_test.sql.in         |     1 +
 src/backend/catalog/heap.c                         |   226 +-
 src/backend/catalog/index.c                        |   279 +-
 src/backend/catalog/information_schema.sql         |    27 +
 src/backend/catalog/namespace.c                    |   211 +-
 src/backend/catalog/pg_aggregate.c                 |     5 +-
 src/backend/catalog/pg_attribute_encoding.c        |     2 +-
 src/backend/catalog/pg_compression.c               |     4 +-
 src/backend/catalog/pg_constraint.c                |    73 +
 src/backend/catalog/pg_extprotocol.c               |     5 +-
 src/backend/catalog/pg_exttable.c                  |   251 +-
 src/backend/catalog/pg_namespace.c                 |    98 +
 src/backend/catalog/system_views.sql               |    11 +
 src/backend/catalog/toasting.c                     |     5 +-
 src/backend/cdb/Makefile                           |     9 +-
 src/backend/cdb/cdbcat.c                           |    27 +-
 src/backend/cdb/cdbconn.c                          |   131 +
 src/backend/cdb/cdbcopy.c                          |    56 +-
 src/backend/cdb/cdbdatabaseinfo.c                  |   263 +-
 src/backend/cdb/cdbdatalocality.c                  |  2815 +-
 src/backend/cdb/cdbdirectopen.c                    |    33 +
 src/backend/cdb/cdbdispatchedtablespaceinfo.c      |     3 +-
 src/backend/cdb/cdbdispatchresult.c                |   222 +-
 src/backend/cdb/cdbexplain.c                       |   344 +-
 src/backend/cdb/cdbfilesplit.c                     |    65 +-
 src/backend/cdb/cdbfilesystemcredential.c          |    31 +-
 src/backend/cdb/cdbgang.c                          |     1 +
 src/backend/cdb/cdbgroup.c                         |    20 +-
 src/backend/cdb/cdbhash.c                          |    76 +
 src/backend/cdb/cdbllize.c                         |    11 +-
 src/backend/cdb/cdbmirroredappendonly.c            |    15 +
 src/backend/cdb/cdbmirroredfilesysobj.c            |    49 +-
 src/backend/cdb/cdbmutate.c                        |   174 +-
 src/backend/cdb/cdbpartition.c                     |    86 +-
 src/backend/cdb/cdbpath.c                          |     2 +
 src/backend/cdb/cdbpathlocus.c                     |     8 +-
 src/backend/cdb/cdbpersistentbuild.c               |    20 +-
 src/backend/cdb/cdbpersistentrecovery.c            |    24 +-
 src/backend/cdb/cdbpersistenttablespace.c          |     1 -
 src/backend/cdb/cdbplan.c                          |    34 +-
 src/backend/cdb/cdbquerycontextdispatching.c       |   550 +-
 src/backend/cdb/cdbsharedstorageop.c               |    26 +-
 src/backend/cdb/cdbsrlz.c                          |   103 +-
 src/backend/cdb/cdbtargeteddispatch.c              |    17 +-
 src/backend/cdb/cdbutil.c                          |    13 +
 src/backend/cdb/cdbvars.c                          |    13 +-
 src/backend/cdb/dispatcher.c                       |   178 +-
 src/backend/cdb/dispatcher_mgr.c                   |   306 +
 src/backend/cdb/dispatcher_mgt.c                   |    91 +-
 src/backend/cdb/dispatcher_new.c                   |  1506 +
 src/backend/cdb/executormgr.c                      |   126 +-
 src/backend/cdb/executormgr_new.c                  |   688 +
 src/backend/cdb/motion/Makefile                    |     2 +-
 src/backend/cdb/motion/cdbmotion.c                 |     3 +-
 src/backend/cdb/motion/ic_common.c                 |     6 +-
 src/backend/cdb/motion/ic_new.c                    |    90 +
 src/backend/cdb/motion/ic_udp.c                    |    48 +-
 src/backend/cdb/poolmgr.c                          |    84 +-
 src/backend/cdb/scheduler.c                        |   688 +
 src/backend/cdb/workermgr.c                        |    16 +
 src/backend/commands/alter.c                       |    14 +-
 src/backend/commands/analyze.c                     |   844 +-
 src/backend/commands/cluster.c                     |    19 +-
 src/backend/commands/conversioncmds.c              |     6 +-
 src/backend/commands/copy.c                        |   708 +-
 src/backend/commands/dbcommands.c                  |   106 +-
 src/backend/commands/explain.c                     |   109 +-
 src/backend/commands/extprotocolcmds.c             |     3 +-
 src/backend/commands/filespace.c                   |    19 +-
 src/backend/commands/filesystemcmds.c              |     3 +-
 src/backend/commands/foreigncmds.c                 |    31 +-
 src/backend/commands/functioncmds.c                |    22 +
 src/backend/commands/indexcmds.c                   |   108 +-
 src/backend/commands/portalcmds.c                  |     3 +
 src/backend/commands/prepare.c                     |     3 +
 src/backend/commands/schemacmds.c                  |     7 +
 src/backend/commands/sequence.c                    |     2 +-
 src/backend/commands/tablecmds.c                   |  1600 +-
 src/backend/commands/tablespace.c                  |    39 +-
 src/backend/commands/trigger.c                     |    15 +-
 src/backend/commands/typecmds.c                    |     8 +-
 src/backend/commands/user.c                        |   161 +-
 src/backend/commands/vacuum.c                      |     9 +-
 src/backend/commands/vacuumlazy.c                  |    25 +-
 src/backend/commands/view.c                        |     4 +-
 src/backend/executor/Makefile                      |     3 +-
 src/backend/executor/execAmi.c                     |     6 +-
 src/backend/executor/execDML.c                     |   573 +-
 src/backend/executor/execHHashagg.c                |   145 +-
 src/backend/executor/execMain.c                    |   934 +-
 src/backend/executor/execProcnode.c                |    63 +-
 src/backend/executor/execQual.c                    |    22 +-
 src/backend/executor/execScan.c                    |    21 +-
 src/backend/executor/execTuples.c                  |    10 +-
 src/backend/executor/execUtils.c                   |   170 +-
 src/backend/executor/functions.c                   |    36 +-
 src/backend/executor/newExecutor.c                 |   475 +
 src/backend/executor/nodeAgg.c                     |    47 +-
 src/backend/executor/nodeDML.c                     |     6 +-
 src/backend/executor/nodeDynamicTableScan.c        |     2 +-
 src/backend/executor/nodeExternalscan.c            |    91 +-
 src/backend/executor/nodeMotion.c                  |    16 +-
 src/backend/executor/nodeResult.c                  |     8 +-
 src/backend/executor/nodeRowTrigger.c              |     3 +-
 src/backend/executor/nodeSubplan.c                 |   171 +-
 src/backend/executor/spi.c                         |   121 +-
 src/backend/gp_libpq_fe/fe-connect.c               |    35 +-
 src/backend/gp_libpq_fe/fe-exec.c                  |   230 +
 src/backend/gp_libpq_fe/fe-protocol3.c             |    27 +-
 src/backend/gp_libpq_fe/gp-libpq-fe.h              |    46 +
 src/backend/gp_libpq_fe/gp-libpq-int.h             |     4 +
 src/backend/gpopt/gpdbwrappers.cpp                 |     2 +-
 .../gpopt/translate/CTranslatorDXLToPlStmt.cpp     |    12 +-
 .../gpopt/translate/CTranslatorRelcacheToDXL.cpp   |     1 +
 src/backend/libpq/auth.c                           |     2 +-
 src/backend/libpq/cloudrest.c                      |    49 +-
 src/backend/libpq/pqcomm.c                         |    16 +-
 src/backend/nodes/copyfuncs.c                      |    89 +-
 src/backend/nodes/equalfuncs.c                     |     3 +
 src/backend/nodes/outfast.c                        |    76 +-
 src/backend/nodes/outfuncs.c                       |    64 +-
 src/backend/nodes/print.c                          |     4 +
 src/backend/nodes/readfast.c                       |    79 +-
 src/backend/nodes/readfuncs.c                      |     5 +-
 src/backend/optimizer/path/allpaths.c              |   276 +-
 src/backend/optimizer/path/indxpath.c              |   145 +-
 src/backend/optimizer/plan/Makefile                |     5 +-
 src/backend/optimizer/plan/createplan.c            |   338 +-
 src/backend/optimizer/plan/initsplan.c             |    83 +-
 src/backend/optimizer/plan/newPlanner.c            |  2028 ++
 src/backend/optimizer/plan/planmain.c              |     4 +
 src/backend/optimizer/plan/planner.c               |   481 +-
 src/backend/optimizer/plan/planpartition.c         |     2 +
 src/backend/optimizer/plan/planshare.c             |    56 +-
 src/backend/optimizer/plan/planwindow.c            |     9 +-
 src/backend/optimizer/plan/setrefs.c               |    46 +-
 src/backend/optimizer/plan/subselect.c             |    10 +-
 src/backend/optimizer/prep/preptlist.c             |    19 +-
 src/backend/optimizer/prep/prepunion.c             |    27 +-
 src/backend/optimizer/util/clauses.c               |    48 +-
 src/backend/optimizer/util/pathnode.c              |    16 +-
 src/backend/optimizer/util/plancat.c               |    78 +
 src/backend/optimizer/util/relnode.c               |     2 +-
 src/backend/optimizer/util/var.c                   |    56 +
 src/backend/optimizer/util/walkers.c               |     8 +
 src/backend/parser/analyze.c                       | 19940 ++++++------
 src/backend/parser/gram.y                          |   411 +-
 src/backend/parser/parse_clause.c                  |    40 +-
 src/backend/parser/parse_coerce.c                  |    13 +-
 src/backend/parser/parse_expr.c                    |    25 +-
 src/backend/parser/parse_func.c                    |    55 +-
 src/backend/parser/parse_relation.c                |    29 +-
 src/backend/parser/parse_utilcmd.c                 |     5 +
 src/backend/postmaster/Makefile                    |     2 +-
 src/backend/postmaster/identity.c                  |    40 +-
 .../postmaster/pg_stat_activity_history_process.c  |   945 +
 src/backend/postmaster/postmaster.c                |    81 +
 src/backend/postmaster/service.c                   |    11 +
 src/backend/postmaster/syslogger.c                 |     4 +-
 src/backend/resourcemanager/requesthandler.c       |    45 +-
 src/backend/resourcemanager/requesthandler_RMSEG.c |     8 +-
 src/backend/resourcemanager/resqueuemanager.c      |     8 +-
 src/backend/rewrite/rewriteDefine.c                |     4 +-
 src/backend/storage/buffer/bufmgr.c                |     8 +-
 src/backend/storage/file/fd.c                      |    55 +-
 src/backend/storage/ipc/ipci.c                     |     5 +
 src/backend/storage/lmgr/lock.c                    |     2 +-
 src/backend/storage/lmgr/proc.c                    |     7 +-
 src/backend/storage/lmgr/spin.c                    |     3 +-
 src/backend/storage/page/itemptr.c                 |    29 +
 src/backend/tcop/dest.c                            |    13 +-
 src/backend/tcop/postgres.c                        |   450 +-
 src/backend/tcop/pquery.c                          |    16 +
 src/backend/tcop/utility.c                         |   184 +-
 src/backend/utils/.gitignore                       |     1 +
 src/backend/utils/Gen_hawq_funcoid_mapping.sh      |   731 +
 src/backend/utils/Makefile                         |    10 +-
 src/backend/utils/adt/Makefile                     |     3 +-
 src/backend/utils/adt/array_distance_install.sql   |    15 +
 src/backend/utils/adt/array_distance_uninstall.sql |    15 +
 src/backend/utils/adt/array_userfuncs.c            |   274 +
 src/backend/utils/adt/arrayfuncs.c                 |    42 +
 src/backend/utils/adt/dbsize.c                     |    94 +-
 src/backend/utils/adt/int.c                        |   191 +-
 src/backend/utils/adt/int8.c                       |   148 +-
 src/backend/utils/adt/json.c                       |  2525 ++
 src/backend/utils/adt/jsonb.c                      |  1968 ++
 src/backend/utils/adt/jsonb_gin.c                  |   624 +
 src/backend/utils/adt/jsonb_op.c                   |   292 +
 src/backend/utils/adt/jsonb_util.c                 |  1802 ++
 src/backend/utils/adt/jsonfuncs.c                  |  3958 +++
 src/backend/utils/adt/numeric.c                    |    38 +
 src/backend/utils/adt/pxf_functions.c              |     4 +
 src/backend/utils/adt/regproc.c                    |     6 +-
 src/backend/utils/adt/ruleutils.c                  |    57 +-
 src/backend/utils/adt/selfuncs.c                   |   266 +-
 src/backend/utils/cache/lsyscache.c                |    38 +
 src/backend/utils/cache/relcache.c                 |     3 +-
 src/backend/utils/cache/typcache.c                 |   380 +-
 src/backend/utils/error/elog.c                     |    10 +
 src/backend/utils/fmgr/fmgr.c                      |    62 +-
 src/backend/utils/gp/segadmin.c                    |   126 +
 src/backend/utils/hawq_type_mapping.c              |    16 +-
 src/backend/utils/init/globals.c                   |    14 +
 src/backend/utils/mb/mbutils.c                     |    59 +
 src/backend/utils/misc/atomic.c                    |    12 +-
 src/backend/utils/misc/etc/gpcheck.cnf             |    18 +-
 src/backend/utils/misc/etc/hawq-site.xml           |    17 +
 src/backend/utils/misc/etc/hdfs-client.xml         |     9 -
 src/backend/utils/misc/etc/template-hawq-site.xml  |    11 +
 src/backend/utils/misc/fstream/gfile.c             |    47 +-
 src/backend/utils/misc/guc.c                       |   555 +-
 src/backend/utils/misc/uriparser.c                 |    92 +-
 src/backend/utils/mmgr/mcxt.c                      |     2 +
 src/backend/utils/mmgr/memprot.c                   |     2 +-
 src/backend/utils/mmgr/portalmem.c                 |    23 +-
 src/bin/Makefile                                   |     2 +-
 src/bin/gpcheckhdfs/Makefile                       |     5 +-
 src/bin/gpcheckhdfs/gpcheckhdfs.c                  |    12 +-
 src/bin/gpfdist/Makefile                           |    18 +-
 src/bin/gpfdist/src/gpfdist/glob.c                 |    55 +-
 src/bin/gpfdist/src/gpfdist/gpfdist.c              |  6549 ++--
 src/bin/gpfdist/src/gpfdist/gpfdist_helper.c       |    30 +
 src/bin/gpfdist/src/gpfdist/gpfdist_helper.h       |     7 +
 src/bin/gpfdist/src/gpfdist/gpfxdist.h             |    19 -
 src/bin/gpfdist/src/gpfdist/include/glob.h         |     8 +-
 src/bin/gpfdist/src/gpfdist/transform.c            |   172 +-
 src/bin/gpfdist/src/gpfdist/transform.h            |   247 +
 src/bin/gpfilesystem/hdfs/Makefile                 |     2 +-
 src/bin/gpfusion/gpbridgeapi.c                     |     5 +-
 src/bin/pg_ctl/pg_ctl.c                            |     6 +-
 src/bin/pg_dump/dumputils.c                        |    44 +
 src/bin/pg_dump/pg_backup_archiver.c               |     3 -
 src/bin/pg_dump/pg_dump.c                          |   352 +-
 src/bin/pg_dump/pg_dumpall.c                       |     6 +-
 src/bin/psql/describe.c                            |   218 +-
 src/bin/psql/tab-complete.c                        |   339 +-
 src/include/Makefile                               |     4 +-
 src/include/access/appendonlywriter.h              |     3 -
 src/include/access/extprotocol.h                   |    60 +-
 src/include/access/fileam.h                        |    39 +-
 src/include/access/filesplit.h                     |    10 +
 src/include/access/formatter.h                     |    11 +
 src/include/access/gin.h                           |    23 +
 src/include/access/heapam.h                        |     2 +-
 src/include/access/orcam.h                         |   108 +
 src/include/access/orcsegfiles.h                   |    62 +
 src/include/access/persistentfilesysobjname.h      |     1 +
 src/include/access/plugstorage.h                   |   130 +-
 src/include/access/plugstorage_utils.h             |    50 +-
 src/include/access/pxffilters.h                    |     2 +-
 src/include/access/read_cache.h                    |    29 +
 src/include/access/relscan.h                       |    18 +-
 src/include/access/tupdesc.h                       |     2 +
 src/include/access/url.h                           |    19 +-
 src/include/access/xact.h                          |    53 +
 src/include/catalog/calico.pl                      |     3 +-
 src/include/catalog/caqltrack.pl                   |     2 +-
 src/include/catalog/caqluniqdef.pl                 |     2 +-
 src/include/catalog/catullus.pl                    |     2 +-
 src/include/catalog/heap.h                         |    10 +-
 src/include/catalog/index.h                        |    15 +
 src/include/catalog/namespace.h                    |     3 +-
 src/include/catalog/pablopcatso.pl                 |     2 +-
 src/include/catalog/pg_aggregate.h                 |     6 +
 src/include/catalog/pg_amop.h                      |    27 +
 src/include/catalog/pg_amproc.h                    |    19 +-
 src/include/catalog/pg_authid.h                    |    20 +-
 src/include/catalog/pg_cast.h                      |     3 +
 src/include/catalog/pg_class.h                     |     9 +-
 src/include/catalog/pg_constraint.h                |     4 +
 src/include/catalog/pg_database.h                  |     2 -
 src/include/catalog/pg_exttable.h                  |    51 +-
 src/include/catalog/pg_namespace.h                 |    46 +-
 src/include/catalog/pg_opclass.h                   |     5 +-
 src/include/catalog/pg_operator.h                  |    57 +-
 src/include/catalog/pg_proc.h                      |   219 +-
 src/include/catalog/pg_proc.sql                    |    10 +
 src/include/catalog/pg_type.h                      |    11 +-
 src/include/catalog/sleazy.pl                      |     2 +-
 src/include/catalog/tidycat.pl                     |     2 +-
 src/include/cdb/cdbconn.h                          |     9 +
 src/include/cdb/cdbcopy.h                          |     3 +-
 src/include/cdb/cdbdatabaseinfo.h                  |    33 +
 src/include/cdb/cdbdatalocality.h                  |    68 +
 src/include/cdb/cdbdirectopen.h                    |     4 +
 src/include/cdb/cdbdisp.h                          |     4 +
 src/include/cdb/cdbdispatchresult.h                |    16 +-
 src/include/cdb/cdbexplain.h                       |     8 +
 src/include/cdb/cdbfilesystemcredential.h          |    17 +
 src/include/cdb/cdbgang.h                          |     1 +
 src/include/cdb/cdbhash.h                          |    10 +
 src/include/cdb/cdbmirroredfilesysobj.h            |     3 +
 src/include/cdb/cdbmotion.h                        |     1 -
 src/include/cdb/cdbparquetstoragewrite.h           |    40 +-
 src/include/cdb/cdbpartition.h                     |     2 +-
 src/include/cdb/cdbquerycontextdispatching.h       |    67 +-
 src/include/cdb/cdbutil.h                          |     1 +
 src/include/cdb/cdbvars.h                          |     9 +-
 src/include/cdb/dispatcher.h                       |    10 +-
 src/include/cdb/dispatcher_mgr.h                   |    32 +
 src/include/cdb/dispatcher_new.h                   |    74 +
 src/include/cdb/executormgr.h                      |    14 +-
 src/include/cdb/executormgr_new.h                  |    94 +
 src/include/cdb/ml_ipc.h                           |     5 +
 src/include/cdb/poolmgr.h                          |    36 +-
 src/include/cdb/scheduler.h                        |    96 +
 src/include/cdb/workermgr.h                        |     2 +
 src/include/commands/copy.h                        |     4 +-
 src/include/commands/defrem.h                      |     4 +
 src/include/commands/tablecmds.h                   |    51 +-
 src/include/commands/vacuum.h                      |     1 +
 src/include/cwrapper/cached-result.h               |    43 +
 src/include/cwrapper/executor-c.h                  |    53 +
 src/include/cwrapper/func-kind.cg.h                |   941 +
 src/include/cwrapper/hdfs-file-system-c.h          |   117 +
 src/include/cwrapper/hive-file-system-c.h          |    28 +
 src/include/cwrapper/instrument.h                  |    54 +
 src/include/cwrapper/magma-client-c.h              |   141 +
 src/include/cwrapper/magma-format-c.h              |   121 +
 src/include/cwrapper/orc-format-c.h                |   125 +
 src/include/cwrapper/scheduler-c.h                 |    42 +
 src/include/cwrapper/text-format-c.h               |    66 +
 src/include/cwrapper/type-kind.h                   |   108 +
 src/include/cwrapper/univplan-c.h                  |   397 +
 src/include/executor/execDML.h                     |    51 +
 src/include/executor/execHHashagg.h                |    27 +-
 src/include/executor/execdesc.h                    |    14 +
 src/include/executor/executor.h                    |    50 +-
 src/include/executor/nodeAgg.h                     |     9 +-
 src/include/executor/nodeMotion.h                  |     2 -
 src/include/executor/tuptable.h                    |     4 +-
 src/include/fmgr.h                                 |     5 +
 src/include/fstream/gfile.h                        |     4 +-
 src/include/funcapi.h                              |     8 +
 src/include/mb/pg_wchar.h                          |     1 +
 src/include/miscadmin.h                            |    13 +
 src/include/nodes/execnodes.h                      |    34 +-
 src/include/nodes/memnodes.h                       |     2 +
 src/include/nodes/nodes.h                          |    38 +-
 src/include/nodes/parsenodes.h                     |  3050 +-
 src/include/nodes/plannerconfig.h                  |     4 +
 src/include/nodes/plannodes.h                      |    50 +-
 src/include/nodes/relation.h                       |    25 +-
 src/include/optimizer/clauses.h                    |     1 +
 src/include/optimizer/cost.h                       |    13 +-
 src/include/optimizer/newPlanner.h                 |    92 +
 src/include/optimizer/paths.h                      |     4 +
 src/include/optimizer/planmain.h                   |     2 +
 src/include/optimizer/planshare.h                  |    10 +-
 src/include/optimizer/var.h                        |     2 +-
 src/include/parser/analyze.h                       |    52 +-
 src/include/parser/kwlist.h                        |     2 +-
 src/include/parser/parse_func.h                    |     6 +-
 src/include/pg_config.h.in                         |    23 +-
 src/include/pg_stat_activity_history_process.h     |    46 +
 src/include/postmaster/identity.h                  |     8 +-
 src/include/storage/fd.h                           |     4 +
 src/include/storage/itemptr.h                      |     4 +
 src/include/storage/lwlock.h                       |     2 +-
 src/include/storage/s_lock.h                       |     4 +
 src/include/tcop/dest.h                            |     5 +
 src/include/utils/.gitignore                       |     1 +
 src/include/utils/acl.h                            |     1 +
 src/include/utils/array.h                          |     6 +
 src/include/utils/builtins.h                       |     5 +
 src/include/utils/cloudrest.h                      |    25 +-
 src/include/utils/guc.h                            |    44 +-
 src/include/utils/guc_tables.h                     |     2 +
 src/include/utils/hawq_type_mapping.h              |    95 +-
 src/include/utils/json.h                           |    86 +
 src/include/utils/jsonapi.h                        |   134 +
 src/include/utils/jsonb.h                          |   438 +
 src/include/utils/lsyscache.h                      |     5 +
 src/include/utils/memaccounting.h                  |     3 +
 src/include/utils/memutils.h                       |     3 +-
 src/include/utils/numeric.h                        |     1 +
 src/include/utils/rel.h                            |     8 +-
 src/include/utils/typcache.h                       |     2 +
 src/include/utils/uri.h                            |     9 +-
 src/pl/Makefile                                    |    21 +-
 src/pl/pljava/src/C/pljava/Makefile                |     3 +-
 src/pl/plperl/GNUmakefile                          |     5 +
 src/pl/plpgsql/src/pl_comp.c                       |     3 +-
 src/pl/plpgsql/src/pl_exec.c                       |    63 +-
 src/pl/plpython/plpython.c                         |     6 +
 src/pl/plr.spec                                    |     4 +-
 src/pl/vplr.spec                                   |     3 +-
 src/test/regress/atmsort.pl                        |     4 +-
 src/test/regress/checkinc.py                       |     8 +-
 src/test/regress/data/tenk.data                    |     2 +-
 .../regress/data/upgrade41/catalog40/toolkit.sql   |     4 +-
 src/test/regress/dld.pl                            |     2 +-
 src/test/regress/explain.pl                        |     8 +-
 src/test/regress/get_ereport.pl                    |     2 +-
 src/test/regress/gpdiff.pl                         |     2 +-
 src/test/regress/gpexclude.pl                      |     2 +-
 src/test/regress/gpsourcify.pl                     |     2 +-
 src/test/regress/gpstringsubs.pl                   |     2 +-
 src/test/regress/gptorment.pl                      |     2 +-
 src/test/regress/output/hcatalog_lookup.source     |    14 +-
 src/test/regress/upg2_wizard.pl                    |     2 +-
 src/timezone/Makefile                              |     2 +-
 src/timezone/strftime.c                            |    40 +-
 tools/Makefile                                     |     6 +
 tools/bin/Makefile                                 |     6 +-
 tools/bin/autoswitch.sh                            |    73 +
 tools/bin/generate-greenplum-path.sh               |    23 +-
 tools/bin/generate_load_tpch.pl                    |    34 +-
 tools/bin/gpcheck                                  |   828 +-
 tools/bin/gpload.py                                |     4 +-
 tools/bin/gppylib/commands/base.py                 |     9 +-
 tools/bin/gppylib/data/3.0.json                    | 10458 +++++++
 tools/bin/gppylib/data/3.1.json                    | 10458 +++++++
 tools/bin/gppylib/data/3.2.json                    | 10458 +++++++
 tools/bin/gppylib/data/4.0.json                    | 10458 +++++++
 tools/bin/gppylib/gpsqlUtil.py                     |    61 +
 tools/bin/gpscp                                    |     4 +-
 tools/bin/gpsd                                     |     2 +-
 tools/bin/hawq                                     |    28 +-
 tools/bin/hawq_ctl                                 |   236 +-
 tools/bin/hawqconfig                               |    21 +-
 tools/bin/hawqpylib/HAWQ_HELP.py                   |    89 +-
 tools/bin/hawqpylib/hawqlib.py                     |   184 +-
 tools/bin/hawqregister                             |    11 +-
 tools/bin/lib/hawqinit.sh                          |    40 +-
 tools/bin/magma                                    |   415 +
 tools/bin/pre_setup.sh                             |    52 +
 tools/bin/upgrade.sh                               |   213 +
 tools/tpcds/.gitignore                             |    16 +
 tools/tpcds/makefile                               |    39 +
 tools/tpcds/parallel_dsdgen.cpp                    |   193 +
 tools/tpcds/tools/Cygwin Tools.rules               |    30 +
 tools/tpcds/tools/HISTORY                          |    36 +
 tools/tpcds/tools/How_To_Guide-DS-V2.0.0.docx      |   Bin 0 -> 29054 bytes
 tools/tpcds/tools/How_To_Guide.doc                 |   Bin 0 -> 68608 bytes
 tools/tpcds/tools/Makefile.suite                   |   692 +
 tools/tpcds/tools/PORTING.NOTES                    |   201 +
 tools/tpcds/tools/QGEN.doc                         |   Bin 0 -> 151552 bytes
 tools/tpcds/tools/QgenMain.c                       |   375 +
 tools/tpcds/tools/README                           |    73 +
 tools/tpcds/tools/README_grammar.txt               |    63 +
 tools/tpcds/tools/ReleaseNotes.txt                 |    49 +
 tools/tpcds/tools/StringBuffer.c                   |   189 +
 tools/tpcds/tools/StringBuffer.h                   |    58 +
 tools/tpcds/tools/address.c                        |   350 +
 tools/tpcds/tools/address.h                        |    79 +
 tools/tpcds/tools/build_support.c                  |   448 +
 tools/tpcds/tools/build_support.h                  |    59 +
 tools/tpcds/tools/calendar.dst                     |   441 +
 tools/tpcds/tools/checksum.c                       |    98 +
 tools/tpcds/tools/checksum.vcproj                  |   170 +
 tools/tpcds/tools/cities.dst                       |  1057 +
 tools/tpcds/tools/column_list.txt                  |  1024 +
 tools/tpcds/tools/config.h                         |   177 +
 tools/tpcds/tools/constants.h                      |   325 +
 tools/tpcds/tools/date.c                           |   647 +
 tools/tpcds/tools/date.h                           |    76 +
 tools/tpcds/tools/dbgen2.sln                       |    70 +
 tools/tpcds/tools/dbgen2.vcproj                    |  3358 ++
 tools/tpcds/tools/dbgen_version.c                  |   156 +
 tools/tpcds/tools/dbgen_version.h                  |    52 +
 tools/tpcds/tools/dcgram.c                         |   657 +
 tools/tpcds/tools/dcgram.h                         |    42 +
 tools/tpcds/tools/dcomp.c                          |   327 +
 tools/tpcds/tools/dcomp.h                          |    87 +
 tools/tpcds/tools/dcomp_params.h                   |    61 +
 tools/tpcds/tools/decimal.c                        |   398 +
 tools/tpcds/tools/decimal.h                        |    70 +
 tools/tpcds/tools/dist.c                           |   973 +
 tools/tpcds/tools/dist.h                           |   105 +
 tools/tpcds/tools/distcomp.vcproj                  |   573 +
 tools/tpcds/tools/driver.c                         |   574 +
 tools/tpcds/tools/driver.h                         |    70 +
 tools/tpcds/tools/english.dst                      |  4790 +++
 tools/tpcds/tools/error_msg.c                      |   241 +
 tools/tpcds/tools/error_msg.h                      |   102 +
 tools/tpcds/tools/eval.c                           |   886 +
 tools/tpcds/tools/eval.h                           |    38 +
 tools/tpcds/tools/expr.c                           |   542 +
 tools/tpcds/tools/expr.h                           |   131 +
 tools/tpcds/tools/fips.dst                         |  3187 ++
 tools/tpcds/tools/genrand.c                        |   728 +
 tools/tpcds/tools/genrand.h                        |    82 +
 tools/tpcds/tools/grammar.c                        |   383 +
 tools/tpcds/tools/grammar.h                        |    54 +
 tools/tpcds/tools/grammar.vcproj                   |   321 +
 tools/tpcds/tools/grammar_support.c                |   217 +
 tools/tpcds/tools/grammar_support.h                |    63 +
 tools/tpcds/tools/items.dst                        |   516 +
 tools/tpcds/tools/join.c                           |   462 +
 tools/tpcds/tools/keywords.c                       |   217 +
 tools/tpcds/tools/keywords.h                       |    52 +
 tools/tpcds/tools/list.c                           |   329 +
 tools/tpcds/tools/list.h                           |    74 +
 tools/tpcds/tools/load.c                           |   116 +
 tools/tpcds/tools/load.h                           |    42 +
 tools/tpcds/tools/makefile                         |   700 +
 tools/tpcds/tools/mathops.h                        |    54 +
 tools/tpcds/tools/misc.c                           |   124 +
 tools/tpcds/tools/misc.h                           |    48 +
 tools/tpcds/tools/mkheader.c                       |   177 +
 tools/tpcds/tools/mkheader.vcproj                  |   243 +
 tools/tpcds/tools/names.dst                        | 10220 ++++++
 tools/tpcds/tools/nulls.c                          |   112 +
 tools/tpcds/tools/nulls.h                          |    38 +
 tools/tpcds/tools/parallel.c                       |   229 +
 tools/tpcds/tools/parallel.h                       |    41 +
 tools/tpcds/tools/parallel.sh                      |   101 +
 tools/tpcds/tools/params.h                         |    78 +
 tools/tpcds/tools/permute.c                        |   132 +
 tools/tpcds/tools/permute.h                        |    39 +
 tools/tpcds/tools/porting.c                        |    57 +
 tools/tpcds/tools/porting.h                        |   153 +
 tools/tpcds/tools/pricing.c                        |   278 +
 tools/tpcds/tools/pricing.h                        |    77 +
 tools/tpcds/tools/print.c                          |   690 +
 tools/tpcds/tools/print.h                          |    58 +
 tools/tpcds/tools/qgen.y                           |   572 +
 tools/tpcds/tools/qgen2.vcproj                     |  1092 +
 tools/tpcds/tools/qgen_params.h                    |    83 +
 tools/tpcds/tools/query_handler.c                  |   315 +
 tools/tpcds/tools/query_handler.h                  |    39 +
 tools/tpcds/tools/r_params.c                       |   953 +
 tools/tpcds/tools/r_params.h                       |    83 +
 tools/tpcds/tools/release.c                        |    68 +
 tools/tpcds/tools/release.h                        |    51 +
 tools/tpcds/tools/s_brand.c                        |   151 +
 tools/tpcds/tools/s_brand.h                        |    53 +
 tools/tpcds/tools/s_call_center.c                  |   151 +
 tools/tpcds/tools/s_call_center.h                  |    42 +
 tools/tpcds/tools/s_catalog.c                      |   156 +
 tools/tpcds/tools/s_catalog.h                      |    61 +
 tools/tpcds/tools/s_catalog_order.c                |   231 +
 tools/tpcds/tools/s_catalog_order.h                |    57 +
 tools/tpcds/tools/s_catalog_order_lineitem.c       |   197 +
 tools/tpcds/tools/s_catalog_order_lineitem.h       |    60 +
 tools/tpcds/tools/s_catalog_page.c                 |   131 +
 tools/tpcds/tools/s_catalog_page.h                 |    42 +
 tools/tpcds/tools/s_catalog_promotional_item.c     |   146 +
 tools/tpcds/tools/s_catalog_promotional_item.h     |    55 +
 tools/tpcds/tools/s_catalog_returns.c              |   183 +
 tools/tpcds/tools/s_catalog_returns.h              |    61 +
 tools/tpcds/tools/s_category.c                     |   146 +
 tools/tpcds/tools/s_category.h                     |    55 +
 tools/tpcds/tools/s_class.c                        |   149 +
 tools/tpcds/tools/s_class.h                        |    54 +
 tools/tpcds/tools/s_company.c                      |   145 +
 tools/tpcds/tools/s_company.h                      |    52 +
 tools/tpcds/tools/s_customer.c                     |   280 +
 tools/tpcds/tools/s_customer.h                     |    77 +
 tools/tpcds/tools/s_customer_address.c             |   129 +
 tools/tpcds/tools/s_customer_address.h             |    43 +
 tools/tpcds/tools/s_division.c                     |   147 +
 tools/tpcds/tools/s_division.h                     |    53 +
 tools/tpcds/tools/s_inventory.c                    |   167 +
 tools/tpcds/tools/s_inventory.h                    |    56 +
 tools/tpcds/tools/s_item.c                         |   178 +
 tools/tpcds/tools/s_item.h                         |    43 +
 tools/tpcds/tools/s_manager.c                      |   149 +
 tools/tpcds/tools/s_manager.h                      |    49 +
 tools/tpcds/tools/s_manufacturer.c                 |   145 +
 tools/tpcds/tools/s_manufacturer.h                 |    49 +
 tools/tpcds/tools/s_market.c                       |   150 +
 tools/tpcds/tools/s_market.h                       |    52 +
 tools/tpcds/tools/s_pline.c                        |   186 +
 tools/tpcds/tools/s_pline.h                        |    59 +
 tools/tpcds/tools/s_product.c                      |   150 +
 tools/tpcds/tools/s_product.h                      |    51 +
 tools/tpcds/tools/s_promotion.c                    |   191 +
 tools/tpcds/tools/s_promotion.h                    |    43 +
 tools/tpcds/tools/s_purchase.c                     |   225 +
 tools/tpcds/tools/s_purchase.h                     |    56 +
 tools/tpcds/tools/s_reason.c                       |   146 +
 tools/tpcds/tools/s_reason.h                       |    49 +
 tools/tpcds/tools/s_store.c                        |   155 +
 tools/tpcds/tools/s_store.h                        |    41 +
 tools/tpcds/tools/s_store_promotional_item.c       |   147 +
 tools/tpcds/tools/s_store_promotional_item.h       |    48 +
 tools/tpcds/tools/s_store_returns.c                |   176 +
 tools/tpcds/tools/s_store_returns.h                |    51 +
 tools/tpcds/tools/s_subcategory.c                  |   150 +
 tools/tpcds/tools/s_subcategory.h                  |    52 +
 tools/tpcds/tools/s_subclass.c                     |   150 +
 tools/tpcds/tools/s_subclass.h                     |    52 +
 tools/tpcds/tools/s_tdefs.h                        |    75 +
 tools/tpcds/tools/s_warehouse.c                    |   145 +
 tools/tpcds/tools/s_warehouse.h                    |    42 +
 tools/tpcds/tools/s_web_order.c                    |   237 +
 tools/tpcds/tools/s_web_order.h                    |    56 +
 tools/tpcds/tools/s_web_order_lineitem.c           |   228 +
 tools/tpcds/tools/s_web_order_lineitem.h           |    59 +
 tools/tpcds/tools/s_web_page.c                     |   174 +
 tools/tpcds/tools/s_web_page.h                     |    42 +
 tools/tpcds/tools/s_web_promotinal_item.c          |   149 +
 tools/tpcds/tools/s_web_promotional_item.h         |    49 +
 tools/tpcds/tools/s_web_returns.c                  |   188 +
 tools/tpcds/tools/s_web_returns.h                  |    57 +
 tools/tpcds/tools/s_web_site.c                     |   151 +
 tools/tpcds/tools/s_web_site.h                     |    41 +
 tools/tpcds/tools/s_zip_to_gmt.c                   |   256 +
 tools/tpcds/tools/s_zip_to_gmt.h                   |    48 +
 tools/tpcds/tools/scaling.c                        |   797 +
 tools/tpcds/tools/scaling.dst                      |   138 +
 tools/tpcds/tools/scaling.h                        |    49 +
 tools/tpcds/tools/scd.c                            |   422 +
 tools/tpcds/tools/scd.h                            |    58 +
 tools/tpcds/tools/source_schema.wam                | 24537 +++++++++++++++
 tools/tpcds/tools/sparse.c                         |   112 +
 tools/tpcds/tools/sparse.h                         |    37 +
 tools/tpcds/tools/streets.dst                      |   165 +
 tools/tpcds/tools/substitution.c                   |   188 +
 tools/tpcds/tools/substitution.h                   |    96 +
 tools/tpcds/tools/tdef_functions.c                 |   180 +
 tools/tpcds/tools/tdef_functions.h                 |    66 +
 tools/tpcds/tools/tdefs.c                          |   230 +
 tools/tpcds/tools/tdefs.h                          |   103 +
 tools/tpcds/tools/template.h                       |    72 +
 tools/tpcds/tools/text.c                           |   228 +
 tools/tpcds/tools/tokenizer.l                      |   278 +
 tools/tpcds/tools/tpcds.dst                        |   838 +
 tools/tpcds/tools/tpcds.sql                        |   588 +
 tools/tpcds/tools/tpcds.wam                        | 31365 +++++++++++++++++++
 tools/tpcds/tools/tpcds_20080910.sum               |    48 +
 tools/tpcds/tools/tpcds_ri.sql                     |   139 +
 tools/tpcds/tools/tpcds_source.sql                 |   429 +
 tools/tpcds/tools/validate.c                       |   207 +
 tools/tpcds/tools/validate.h                       |    45 +
 tools/tpcds/tools/w_call_center.c                  |   307 +
 tools/tpcds/tools/w_call_center.h                  |    80 +
 tools/tpcds/tools/w_catalog_page.c                 |   195 +
 tools/tpcds/tools/w_catalog_page.h                 |    57 +
 tools/tpcds/tools/w_catalog_returns.c              |   252 +
 tools/tpcds/tools/w_catalog_returns.h              |    74 +
 tools/tpcds/tools/w_catalog_sales.c                |   403 +
 tools/tpcds/tools/w_catalog_sales.h                |    71 +
 tools/tpcds/tools/w_customer.c                     |   217 +
 tools/tpcds/tools/w_customer.h                     |    68 +
 tools/tpcds/tools/w_customer_address.c             |   157 +
 tools/tpcds/tools/w_customer_address.h             |    55 +
 tools/tpcds/tools/w_customer_demographics.c        |   156 +
 tools/tpcds/tools/w_customer_demographics.h        |    67 +
 tools/tpcds/tools/w_datetbl.c                      |   323 +
 tools/tpcds/tools/w_datetbl.h                      |    80 +
 tools/tpcds/tools/w_household_demographics.c       |   153 +
 tools/tpcds/tools/w_household_demographics.h       |    53 +
 tools/tpcds/tools/w_income_band.c                  |   139 +
 tools/tpcds/tools/w_income_band.h                  |    48 +
 tools/tpcds/tools/w_inventory.c                    |   213 +
 tools/tpcds/tools/w_inventory.h                    |    51 +
 tools/tpcds/tools/w_item.c                         |   303 +
 tools/tpcds/tools/w_item.h                         |    79 +
 tools/tpcds/tools/w_promotion.c                    |   215 +
 tools/tpcds/tools/w_promotion.h                    |    68 +
 tools/tpcds/tools/w_reason.c                       |   141 +
 tools/tpcds/tools/w_reason.h                       |    52 +
 tools/tpcds/tools/w_ship_mode.c                    |   159 +
 tools/tpcds/tools/w_ship_mode.h                    |    56 +
 tools/tpcds/tools/w_store.c                        |   310 +
 tools/tpcds/tools/w_store.h                        |    92 +
 tools/tpcds/tools/w_store_returns.c                |   205 +
 tools/tpcds/tools/w_store_returns.h                |    64 +
 tools/tpcds/tools/w_store_sales.c                  |   297 +
 tools/tpcds/tools/w_store_sales.h                  |    65 +
 tools/tpcds/tools/w_tdefs.h                        |    66 +
 tools/tpcds/tools/w_timetbl.c                      |   156 +
 tools/tpcds/tools/w_timetbl.h                      |    58 +
 tools/tpcds/tools/w_warehouse.c                    |   166 +
 tools/tpcds/tools/w_warehouse.h                    |    57 +
 tools/tpcds/tools/w_web_page.c                     |   250 +
 tools/tpcds/tools/w_web_page.h                     |    60 +
 tools/tpcds/tools/w_web_returns.c                  |   226 +
 tools/tpcds/tools/w_web_returns.h                  |    63 +
 tools/tpcds/tools/w_web_sales.c                    |   360 +
 tools/tpcds/tools/w_web_sales.h                    |    79 +
 tools/tpcds/tools/w_web_site.c                     |   275 +
 tools/tpcds/tools/w_web_site.h                     |    72 +
 tools/tpch/.gitignore                              |    10 +
 tools/tpch/BUGS                                    |   993 +
 tools/tpch/HISTORY                                 |   535 +
 tools/tpch/Makefile                                |   183 +
 tools/tpch/PORTING.NOTES                           |   220 +
 tools/tpch/README                                  |   436 +
 tools/tpch/bcd2.c                                  |   264 +
 tools/tpch/bcd2.h                                  |    31 +
 tools/tpch/bm_utils.c                              |   558 +
 tools/tpch/build.c                                 |   466 +
 tools/tpch/cdbhash.c                               |    13 +
 tools/tpch/cdbhash.h                               |    16 +
 tools/tpch/config.h                                |   222 +
 tools/tpch/dbgen.vcproj                            |   469 +
 tools/tpch/dists.dss                               |   839 +
 tools/tpch/driver.c                                |   846 +
 tools/tpch/dss.ddl                                 |    70 +
 tools/tpch/dss.h                                   |   585 +
 tools/tpch/dss.ri                                  |   100 +
 tools/tpch/dsstypes.h                              |   186 +
 tools/tpch/load_stub.c                             |   221 +
 tools/tpch/makefile.suite                          |   182 +
 tools/tpch/permute.c                               |   205 +
 tools/tpch/permute.h                               |    67 +
 tools/tpch/print.c                                 |   730 +
 tools/tpch/qgen.c                                  |   494 +
 tools/tpch/qgen.vcproj                             |   269 +
 tools/tpch/release.h                               |     7 +
 tools/tpch/rnd.c                                   |   241 +
 tools/tpch/rnd.h                                   |   111 +
 tools/tpch/rng64.c                                 |   137 +
 tools/tpch/rng64.h                                 |    26 +
 tools/tpch/shared.h                                |    72 +
 tools/tpch/speed_seed.c                            |   260 +
 tools/tpch/text.c                                  |   388 +
 tools/tpch/tpcd.h                                  |   141 +
 tools/tpch/tpch.sln                                |    54 +
 tools/tpch/tpchdriver.c                            |  2609 ++
 tools/tpch/tpchdriver.vcproj                       |   413 +
 tools/tpch/update_release.sh                       |    23 +
 tools/tpch/varsub.c                                |   370 +
 tools/tpch/vsub.c                                  |   347 +
 944 files changed, 251088 insertions(+), 28687 deletions(-)

diff --git a/CMakeLists.txt b/CMakeLists.txt
new file mode 100644
index 0000000..d137340
--- /dev/null
+++ b/CMakeLists.txt
@@ -0,0 +1,164 @@
+CMAKE_MINIMUM_REQUIRED(VERSION 3.12)
+PROJECT(hawq)
+
+SET(generate_source
+        ${CMAKE_CURRENT_SOURCE_DIR}/src/backend/parser/gram.c
+        ${CMAKE_CURRENT_SOURCE_DIR}/src/backend/catalog/caql/gram.c
+        ${CMAKE_CURRENT_SOURCE_DIR}/src/backend/catalog/caql/catquery.c
+        ${CMAKE_CURRENT_SOURCE_DIR}/src/backend/catalog/core/catcoretable.c
+        ${CMAKE_CURRENT_SOURCE_DIR}/src/backend/bootstrap/bootparse.c
+        ${CMAKE_CURRENT_SOURCE_DIR}/src/backend/port/dynloader.c
+        ${CMAKE_CURRENT_SOURCE_DIR}/src/backend/port/pg_sema.c
+        ${CMAKE_CURRENT_SOURCE_DIR}/src/backend/port/pg_shmem.c
+        ${CMAKE_CURRENT_SOURCE_DIR}/src/backend/utils/fmgrtab.c
+        )
+set_source_files_properties(
+        ${CMAKE_CURRENT_SOURCE_DIR}/src/backend/parser/gram.c
+        ${CMAKE_CURRENT_SOURCE_DIR}/src/backend/catalog/caql/gram.c
+        ${CMAKE_CURRENT_SOURCE_DIR}/src/backend/catalog/caql/catquery.c
+        ${CMAKE_CURRENT_SOURCE_DIR}/src/backend/catalog/core/catcoretable.c
+        ${CMAKE_CURRENT_SOURCE_DIR}/src/backend/bootstrap/bootparse.c
+        ${CMAKE_CURRENT_SOURCE_DIR}/src/backend/port/dynloader.c
+        ${CMAKE_CURRENT_SOURCE_DIR}/src/backend/port/pg_sema.c
+        ${CMAKE_CURRENT_SOURCE_DIR}/src/backend/port/pg_shmem.c
+        ${CMAKE_CURRENT_SOURCE_DIR}/src/backend/utils/fmgrtab.c
+        PROPERTIES GENERATED TRUE
+)
+add_custom_command(
+        OUTPUT ${CMAKE_CURRENT_SOURCE_DIR}/config.log
+        COMMAND ./configure --enable-orca --with-python
+        COMMAND make skip-orca-build
+        COMMAND make -C src/port/ pg_config_paths.h
+        COMMAND make -C src/backend/
+        ../../src/include/utils/hawq_funcoid_mapping.h
+        ../../src/include/utils/fmgroids.h
+        ../../src/include/utils/probes.h
+        ../../src/include/parser/gram.h
+        COMMAND make -C src/backend/catalog/caql/ catquery.c gram.c scan.c
+        COMMAND make -C src/backend/catalog/core/ catcoretable.c
+        COMMAND make -C src/backend/bootstrap/ bootparse.c
+        WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
+)
+add_custom_target(config
+        DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/config.log
+        )
+
+
+FILE(GLOB cdb_parquet_source "${CMAKE_CURRENT_SOURCE_DIR}/src/backend/access/parquet/metadatautil_c++/*.cpp")
+
+FILE(GLOB_RECURSE dxltranslators_source "${CMAKE_CURRENT_SOURCE_DIR}/src/backend/gpopt/*.cpp")
+
+FILE(GLOB_RECURSE pg_timezone_source "${CMAKE_CURRENT_SOURCE_DIR}/src/timezone/*.c")
+LIST(FILTER pg_timezone_source EXCLUDE REGEX ".*/timezone/zic.c")
+
+FILE(GLOB_RECURSE pg_port_source "${CMAKE_CURRENT_SOURCE_DIR}/src/port/*.c")
+LIST(FILTER pg_port_source INCLUDE REGEX "chklocale|copydir|dirmod|exec|noblock|path|pgsleep|pgstrcasecmp|qsort|qsort_arg|sprompt|thread|strlcpy|strlcat|pg_crc32c")
+
+FILE(GLOB_RECURSE pg_regex_source "${CMAKE_CURRENT_SOURCE_DIR}/src/backend/regex/*.c")
+LIST(FILTER pg_regex_source INCLUDE REGEX "regcomp|regerror|regexec|regfree")
+
+
+FILE(GLOB_RECURSE cdb_source "${CMAKE_CURRENT_SOURCE_DIR}/src/backend/*.c")
+
+LIST(FILTER cdb_source EXCLUDE REGEX ".*/test/.*")
+LIST(FILTER cdb_source EXCLUDE REGEX ".*/test_discard/.*")
+LIST(FILTER cdb_source EXCLUDE REGEX ".*/mb/conversion_procs/.*")
+LIST(FILTER cdb_source EXCLUDE REGEX ".*/mb/win1251.c")
+LIST(FILTER cdb_source EXCLUDE REGEX ".*/mb/win866.c")
+LIST(FILTER cdb_source EXCLUDE REGEX ".*/mb/iso.c")
+LIST(FILTER cdb_source EXCLUDE REGEX ".*/index/catquery.c")
+LIST(FILTER cdb_source EXCLUDE REGEX ".*/bootscanner.c")
+LIST(FILTER cdb_source EXCLUDE REGEX ".*/scan.c")
+LIST(FILTER cdb_source EXCLUDE REGEX ".*/guc-file.c")
+LIST(FILTER cdb_source EXCLUDE REGEX ".*/cdbdistributedxidmap.c")
+LIST(FILTER cdb_source EXCLUDE REGEX ".*/nodeParquetScan.c")
+LIST(FILTER cdb_source EXCLUDE REGEX ".*/nodeSeqscan.c")
+LIST(FILTER cdb_source EXCLUDE REGEX ".*/like_match.c")
+LIST(FILTER cdb_source EXCLUDE REGEX ".*/win32.c")
+LIST(FILTER cdb_source EXCLUDE REGEX ".*/regex/.*")
+LIST(FILTER cdb_source EXCLUDE REGEX ".*/port/beos/.*")
+LIST(FILTER cdb_source EXCLUDE REGEX ".*/port/dynloader/.*")
+LIST(FILTER cdb_source EXCLUDE REGEX ".*/port/nextstep/.*")
+LIST(FILTER cdb_source EXCLUDE REGEX ".*/port/qnx4/.*")
+LIST(FILTER cdb_source EXCLUDE REGEX ".*/port/win32/.*")
+LIST(FILTER cdb_source EXCLUDE REGEX ".*/port/ipc_test.c")
+LIST(FILTER cdb_source EXCLUDE REGEX ".*/port/posix_sema.c")
+LIST(FILTER cdb_source EXCLUDE REGEX ".*/port/win32_sema.c")
+LIST(FILTER cdb_source EXCLUDE REGEX ".*/port/win32_shmem.c")
+LIST(FILTER cdb_source EXCLUDE REGEX ".*/port/sysv_sema.c")
+LIST(FILTER cdb_source EXCLUDE REGEX ".*/port/sysv_shmem.c")
+LIST(APPEND cdb_source ${pg_port_source} ${pg_timezone_source} ${cdb_parquet_source} ${pg_regex_source})
+
+if (CMAKE_SYSTEM_NAME STREQUAL Darwin)
+    ADD_DEFINITIONS(-DGPOS_Darwin)
+endif (CMAKE_SYSTEM_NAME STREQUAL Darwin)
+if (CMAKE_SYSTEM_NAME STREQUAL Linux)
+    ADD_DEFINITIONS(-D_GNU_SOURCE)
+    LIST(FILTER cdb_source EXCLUDE REGEX "src/backend/port/darwin/system.c")
+endif (CMAKE_SYSTEM_NAME STREQUAL Linux)
+ADD_DEFINITIONS(-DDLSUFFIX="so")
+SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -msse4.2")
+
+include_directories(src/include)
+include_directories(src/backend/resourcemanager/include)
+include_directories(src/backend/gp_libpq_fe)
+include_directories(src/port)
+
+include_directories(${CMAKE_SOURCE_DIR}/hornet/dbcommon/src)
+include_directories(${CMAKE_SOURCE_DIR}/hornet/univplan/src)
+include_directories(${CMAKE_SOURCE_DIR}/hornet/magma/src)
+include_directories(${CMAKE_SOURCE_DIR}/hornet/storage/src)
+include_directories(${CMAKE_SOURCE_DIR}/hornet/executor/src)
+include_directories(${CMAKE_SOURCE_DIR}/hornet/scheduler/src)
+include_directories(${CMAKE_BINARY_DIR}/hornet/dbcommon/src)
+include_directories(/opt/dependency/package/include)
+
+link_directories(/opt/dependency-Darwin/package/lib)
+link_directories(/opt/dependency/package/lib)
+
+# gporca
+add_library(dxltranslators ${dxltranslators_source})
+add_dependencies(dxltranslators config)
+
+# postgres
+add_executable(postgres ${cdb_source} ${generate_source})
+target_link_libraries(postgres z bz2 lz4 snappy xml2 curl ldap json-c krb5 yarn thrift) # basic
+target_link_libraries(postgres gpos xerces-c naucrates gpdbcost gpopt dxltranslators) # gp-orca
+target_link_libraries(postgres hdfs3 dbcommon-shared univplan-shared storage-shared magma-client-shared executor-shared scheduler-shared) # hornet
+target_link_libraries(postgres dl)
+add_dependencies(postgres config)
+
+# pluggable storage
+add_library(orc.so contrib/orc/orc.c)
+add_library(magma.so contrib/magma/magma.c)
+
+# libpq
+FILE(GLOB_RECURSE libpq_source "${CMAKE_CURRENT_SOURCE_DIR}/src/interfaces/libpq/*.c")
+LIST(FILTER libpq_source EXCLUDE REGEX "win32.c")
+LIST(APPEND libpq_source src/backend/libpq/ip.c)
+LIST(APPEND libpq_source src/backend/libpq/md5.c)
+LIST(APPEND libpq_source src/backend/utils/mb/encnames.c)
+LIST(APPEND libpq_source src/backend/utils/mb/wchar.c)
+add_library(pq ${libpq_source})
+add_library(pgport ${pg_port_source})
+target_link_libraries(pq ldap pgport)
+target_compile_options(pq PRIVATE -DFRONTEND)
+add_dependencies(pq config)
+add_dependencies(pgport config)
+
+# feature-test
+set(CMAKE_CXX_STANDARD 11)
+FILE(GLOB_RECURSE feature_test_source "${CMAKE_CURRENT_SOURCE_DIR}/src/test/feature/*.cpp")
+LIST(FILTER feature_test_source EXCLUDE REGEX "/Ranger/")
+LIST(FILTER feature_test_source EXCLUDE REGEX "/userPoc/")
+LIST(FILTER feature_test_source EXCLUDE REGEX "/sqlreport/")
+add_executable(feature-test ${feature_test_source})
+target_include_directories(feature-test PUBLIC src/test/feature)
+target_include_directories(feature-test PUBLIC src/test/feature/lib)
+target_include_directories(feature-test PUBLIC src/interfaces/libpq)
+target_link_libraries(feature-test xml2 gtest pq)
+target_link_libraries(feature-test pthread)
+set_target_properties(feature-test
+        PROPERTIES
+        RUNTIME_OUTPUT_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}/src/test/feature/"
+        )
diff --git a/GNUmakefile.in b/GNUmakefile.in
index b69df1b..a991c4f 100644
--- a/GNUmakefile.in
+++ b/GNUmakefile.in
@@ -10,18 +10,6 @@ include $(top_builddir)/src/Makefile.global
 
 all:
 #	$(MAKE) -C doc $@
-	$(MAKE) -C depends/thirdparty/googletest $@
-	$(MAKE) -C depends/thirdparty/googletest install
-	$(MAKE) -C depends/libhdfs3 $@
-	$(MAKE) -C depends/libhdfs3 install
-	$(MAKE) -C depends/libyarn $@
-	$(MAKE) -C depends/libyarn install
-	$(MAKE) -C depends/dbcommon $@
-	$(MAKE) -C depends/dbcommon install
-	$(MAKE) -C depends/univplan $@
-	$(MAKE) -C depends/univplan install
-	$(MAKE) -C depends/storage $@
-	$(MAKE) -C depends/storage install
 	$(MAKE) -C src $@
 	$(MAKE) -C config $@
 	$(MAKE) -C contrib $@
@@ -31,12 +19,6 @@ all:
 
 install:
 #	$(MAKE) -C doc $@
-	$(MAKE) -C depends/thirdparty/googletest install
-	$(MAKE) -C depends/libhdfs3 install
-	$(MAKE) -C depends/libyarn install
-	$(MAKE) -C depends/dbcommon install
-	$(MAKE) -C depends/univplan install
-	$(MAKE) -C depends/storage install
 	$(MAKE) -C src $@
 	$(MAKE) -C config $@
 	$(MAKE) -C contrib $@
@@ -60,15 +42,10 @@ distprep:
 
 rpm:
 	$(MAKE) -C contrib/hawq-package $@
-	$(MAKE) -C pxf $@
+#	$(MAKE) -C pxf $@
 	$(MAKE) -C ranger-plugin $@
 	$(MAKE) -C contrib/hawq-package rpm-tarball
 
-unittest:
-	$(MAKE) -C depends/dbcommon $@
-	$(MAKE) -C depends/univplan $@
-	$(MAKE) -C depends/storage $@
-
 feature-test:
 	$(MAKE) -C src feature-test
 
@@ -80,18 +57,12 @@ feature-test-clean:
 clean:
 #	$(MAKE) -C doc $@
 	$(MAKE) -C contrib $@
-	-$(MAKE) -C depends/thirdparty/googletest $@ 
-	-$(MAKE) -C depends/libhdfs3 $@
-	-$(MAKE) -C depends/libyarn $@
-	-$(MAKE) -C depends/dbcommon $@
-	-$(MAKE) -C depends/univplan $@
-	-$(MAKE) -C depends/storage $@
 	$(MAKE) -C src $@
 	$(MAKE) -C config $@
 	$(MAKE) -C contrib $@
 	$(MAKE) -C tools $@
 	$(MAKE) -C contrib/hawq-package $@
-	$(MAKE) -C pxf $@
+#	$(MAKE) -C pxf $@
 	$(MAKE) -C ranger-plugin $@
 	-$(MAKE) -C src feature-test-clean
 # Garbage from autoconf:
@@ -102,17 +73,11 @@ clean:
 distclean maintainer-clean:
 #	-$(MAKE) -C doc $@
 	-$(MAKE) -C contrib $@
-	-$(MAKE) -C depends/thirdparty/googletest $@ 
-	-$(MAKE) -C depends/libhdfs3 $@
-	-$(MAKE) -C depends/libyarn $@
-	-$(MAKE) -C depends/dbcommon $@
-	-$(MAKE) -C depends/univplan $@
-	-$(MAKE) -C depends/storage $@
 	-$(MAKE) -C config $@
 	-$(MAKE) -C tools $@
 	-$(MAKE) -C src feature-test-clean
 	-$(MAKE) -C src $@
-	-$(MAKE) -C pxf $@
+#	-$(MAKE) -C pxf $@
 	-$(MAKE) -C ranger-plugin $@
 	-rm -f config.cache config.log config.status GNUmakefile
 # Garbage from autoconf:
@@ -203,14 +168,44 @@ ifeq ($(origin filter), undefined)
 filter = .
 endif
 
-coverage-show:
+coverage:
+	@rm -rf CodeCoverageReport CodeCoverage.info CodeCoverage.info.cleaned
 	lcov --directory $(filter) --capture --output-file CodeCoverage.info
 	lcov --remove CodeCoverage.info 'test/*' 'mock/*' '/usr/*' '/opt/*' '*ext/rhel5_x86_64*' '*ext/osx*' --output-file CodeCoverage.info.cleaned
-	genhtml -o CodeCoverageReport CodeCoverage.info.cleaned
+	genhtml -o CodeCoverageReport CodeCoverage.info.cleaned --prefix `pwd`
 
-coverage-reset:
+resetcoverage:
 	lcov --directory . --zerocounters
 
+sanity:
+	./sanity-test.sh
+
+skip-orca-build:
+	touch ./depends/thirdparty/gpos.commit
+	touch ./depends/thirdparty/gpos_prepare_timestamp
+	touch ./depends/thirdparty/gpos_build_timestamp
+
+	touch ./depends/thirdparty/gp-xerces.commit
+	touch ./depends/thirdparty/gp-xerces_prepare_timestamp
+	touch ./depends/thirdparty/gp-xerces_build_timestamp
+
+	touch ./depends/thirdparty/gporca.commit
+	touch ./depends/thirdparty/gporca_prepare_timestamp
+	touch ./depends/thirdparty/gporca_build_timestamp
+
+	mkdir -p ./depends/thirdparty/gpos/build/
+	mkdir -p ./depends/thirdparty/gp-xerces/build/
+	mkdir -p ./depends/thirdparty/gporca/build/
+
+	echo install: > ./depends/thirdparty/gpos/build/makefile
+	echo install: > ./depends/thirdparty/gp-xerces/build/makefile
+	echo install: > ./depends/thirdparty/gporca/build/makefile
+
+install-orca:
+	$(MAKE) -C src/backend submake-libdxltranslators
+	$(MAKE) -C depends/thirdparty/gpos/build/ install
+	$(MAKE) -C depends/thirdparty/gp-xerces/build/ install
+	$(MAKE) -C depends/thirdparty/gporca/build/ install
 
 .PHONY: dist distdir distcheck
 #unexport split-dist
diff --git a/LICENSE b/LICENSE
index 13edf22..fe6ad1a 100644
--- a/LICENSE
+++ b/LICENSE
@@ -202,9 +202,9 @@
 
 =======================================================================
 
-Apache HAWQ Subcomponents:
+Apache HAWQ (incubating) Subcomponents:
 
-  The Apache HAWQ project contains subcomponents with
+  The Apache HAWQ (incubating) project contains subcomponents with
   separate copyright notices and license terms. Your use of the source
   code for these subcomponents is subject to the terms and conditions
   of the following licenses.
diff --git a/NOTICE b/NOTICE
index 20ee60c..1b1b2e7 100644
--- a/NOTICE
+++ b/NOTICE
@@ -1,4 +1,4 @@
-Apache HAWQ
+Apache HAWQ (incubating) 
 Copyright 2017 The Apache Software Foundation.
 
 This product includes software developed at
diff --git a/README.md b/README.md
index 3e39cec..1d8c80b 100644
--- a/README.md
+++ b/README.md
@@ -1,20 +1,20 @@
-![HAWQ](http://hawq.apache.org/images/logo-hawq.png)
+![HAWQ](http://hawq.incubator.apache.org/images/logo-hawq.png)
 
 ---
 
 |CI Process|Status|
 |---|---|
-|Travis CI Build|[![https://travis-ci.org/apache/hawq.svg?branch=master](https://travis-ci.org/apache/hawq.png?branch=master)](https://travis-ci.org/apache/hawq?branch=master)|
+|Travis CI Build|[![https://travis-ci.org/apache/incubator-hawq.svg?branch=master](https://travis-ci.org/apache/incubator-hawq.png?branch=master)](https://travis-ci.org/apache/incubator-hawq?branch=master)|
 |Apache Release Audit Tool ([RAT](https://creadur.apache.org/rat/))|[![Rat Status](https://builds.apache.org/buildStatus/icon?job=HAWQ-rat)](https://builds.apache.org/view/HAWQ/job/HAWQ-rat/)|
 |Coverity Static Analysis   |[![Coverity Scan Build](https://scan.coverity.com/projects/apache-incubator-hawq/badge.svg)](https://scan.coverity.com/projects/apache-incubator-hawq)|
 
 ---
 
-[Website](http://hawq.apache.org/) |
+[Website](http://hawq.incubator.apache.org/) |
 [Wiki](https://cwiki.apache.org/confluence/display/HAWQ) |
-[Documentation](http://hawq.apache.org/docs/userguide/latest/) |
-[Developer Mailing List](mailto:dev@hawq.apache.org) |
-[User Mailing List](mailto:user@hawq.apache.org) |
+[Documentation](http://hawq.incubator.apache.org/docs/userguide/2.2.0.0-incubating/overview/HAWQOverview.html) |
+[Developer Mailing List](mailto:dev@hawq.incubator.apache.org) |
+[User Mailing List](mailto:user@hawq.incubator.apache.org) |
 [Q&A Collections](https://cwiki.apache.org/confluence/pages/viewpage.action?pageId=65144284) |
 [Open Defect](https://issues.apache.org/jira/browse/HAWQ)
 
@@ -43,11 +43,296 @@ Apache HAWQ is a Hadoop native SQL query engine that combines the key technologi
  - Support most third party tools: Tableau, SAS et al.
  - Standard connectivity: JDBC/ODBC
 
-# Build & Install & Test
+# Build & Setup HAWQ++ on Mac
+
+## Step 1 Setup HDFS
+
+Install HomeBrew referring to [here](https://brew.sh/).
+
+```
+brew install hadoop
+```
+### Step 1.1 Configure HDFS parameters
+
+* `${HADOOP_HOME}/etc/hadoop/slaves`
+	
+	For example, `/usr/local/Cellar/hadoop/2.8.1/libexec/etc/hadoop/slaves`
+
+	```
+	localhost
+	```
+
+* `${HADOOP_HOME}/etc/hadoop/core-site.xml`
+
+	For example, `/usr/local/Cellar/hadoop/2.8.1/libexec/etc/hadoop/core-site.xml`
+
+	```xml
+	<?xml version="1.0" encoding="UTF-8"?>
+	<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+	<configuration>
+	    <property>
+	        <name>fs.defaultFS</name>
+	        <value>hdfs://localhost:8020</value>
+	    </property>
+	</configuration>
+	```
+
+* `${HADOOP_HOME}/etc/hadoop/hdfs-site.xml`
+
+	For example, `/usr/local/Cellar/hadoop/2.8.1/libexec/etc/hadoop/hdfs-site.xml`
+
+	**Attention: Replace `${HADOOP_DATA_DIRECTORY}` and `${USER_NAME}` variables with your own specific values.**
+
+	```xml
+	<?xml version="1.0" encoding="UTF-8"?>
+	<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+	<configuration>
+	    <property>
+	        <name>dfs.namenode.name.dir</name>
+	        <value>file://${HADOOP_DATA_DIRECTORY}/name</value>
+	        <description>Specify your dfs namenode dir path</description>
+	    </property>
+	    <property>
+	        <name>dfs.datanode.data.dir</name>
+	        <value>file://${HADOOP_DATA_DIRECTORY}/data</value>
+	        <description>Specify your dfs datanode dir path</description>
+	    </property>
+	    <property>
+	        <name>dfs.replication</name>
+	        <value>1</value>
+	    </property>
+	</configuration>
+	```
+
+### Step 1.2 Configure HDFS environment
+
+```bash
+touch ~/.bashrc
+touch ~/.bash_profile
+	
+echo "if [ -f ~/.bashrc ]; then
+source ~/.bashrc
+fi" >> ~/.bash_profile
+	
+echo "export HADOOP_HOME=/usr/local/Cellar/hadoop/2.8.1/libexec" >> ~/.bashrc
+echo "export PATH=$PATH:${HADOOP_HOME}/bin:${HADOOP_HOME}/sbin" >> ~/.bashrc
+	
+source ~/.bashrc
+```
+
+### Step 1.3 Setup passphraseless ssh	
+```bash
+ssh-keygen -t rsa -P '' -f ~/.ssh/id_rsa
+cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys
+chmod 0600 ~/.ssh/authorized_keys
+```
+
+Now you can `ssh localhost` without a passphrase. If you meet `Port 22 connecting refused` error, turn on `Remote login` in your Mac's `System Preferences->Sharing`.
+
+### Step 1.4 Format the HDFS filesystem
+
+```bash
+hdfs namenode -format
+```
+
+### Step 1.5 Start HDFS
+
+```bash
+# start/stop HDFS
+start-dfs.sh/stop-dfs.sh
+ 
+# Do some basic tests to make sure HDFS works
+hdfs dfsadmin -report
+hadoop fs -ls /
+```
+
+**When things go wrong, check the log and view the FAQ in wiki first.**
+
+## Step 2 Setup hawq++
+
+### Step 2.1 System configuration
+
+#### Step 2.1.1 Turn off Rootless System Integrity Protection
+
+Turning Off Rootless System Integrity Protection on macOS that newer than `OS X El Capitan 10.11` if you encounter some tricky LIBRARY_PATH problems, e.g. HAWQ-513, which makes hawq binary not able to find its shared library dependencies. Steps below:
+
+1. Reboot the Mac and hold down Command + R keys simultaneously after you hear the startup chime, this will boot OS X into Recovery Mode 
+2. When the “OS X Utilities” screen appears, pull down the ‘Utilities’ menu at the top of the screen instead, and choose “Terminal”
+3. Type the following command into the terminal then hit return: csrutil disable; reboot
+
+#### Step 2.1.2 Configure `sysctl.conf`
+
+For Mac OSX 10.10 / 10.11, add following content to `/etc/sysctl.conf` and then `sudo sysctl -p` to activate them.
+
+For Mac OSX 10.12+, add following content to `/etc/sysctl.conf` and then `cat /etc/sysctl.conf | xargs sudo sysctl` to check.
+
+```
+kern.sysv.shmmax=2147483648
+kern.sysv.shmmin=1
+kern.sysv.shmmni=64
+kern.sysv.shmseg=16
+kern.sysv.shmall=524288
+kern.maxfiles=65535
+kern.maxfilesperproc=65536
+kern.corefile=/cores/core.%N.%P
+```
+
+### Step 2.2 Prepare source code and target folder
+
+```bash
+mkdir ~/dev
+git clone git@github.com:oushu-io/hawq ~/dev/hawq
+git clone git@github.com:oushu-io/hornet ~/dev/hornet
+git clone git@github.com:oushu-io/libhdfs3 ~/dev/libhdfs3
+
+sudo mkdir -p /opt
+sudo chmod a+w /opt
+sudo install -o $USER -d /usr/local/hawq
+```
+### Step 2.3 Setup toolchain and thirdparty dependency
+
+1. Setup toolchain and thirdparty dependency referring to [here](https://github.com/oushu-io/hornet/tree/master/thirdparty).
+2. Build hornet referring to [here](https://github.com/oushu-io/hornet#hornet).
+
+### Step 2.4 Build HAWQ++
+
+- 2.4.1 Add hawq environment information to `~/.bashrc`, and **`source ~/.bashrc`** to make it effect.
+
+  ```bash
+  ulimit -c 10000000000
+  export CC=clang
+  export CXX=clang++
+  export DEPENDENCY_PATH=/opt/dependency/package
+  source /opt/dependency-Darwin/package/env.sh
+  ```
+- 2.4.2 Build HAWQ++
+
+  ```bash
+  cd ~/dev/hawq
+  git checkout oushu-master
+  ln -sf ../../pre-push .git/hooks/pre-push
+  ln -sf ../../commit-msg .git/hooks/commit-msg
+  ./configure
+  make -j8
+  make -j8 install
+  ```
+
+
+### Step 2.5 Configure HAWQ++
+
+```shell
+mkdir /tmp/magma_master
+mkdir /tmp/magma_segment
+```
+
+Feel free to use the default `/usr/local/hawq/etc/hawq-site.xml`. Pay attention to mapping `hawq_dfs_url` to `fs.defaultFS` in `${HADOOP_HOME}/etc/hadoop/core-site.xml`.
+
+### Step 2.6 Init/Start/Stop HAWQ++
+
+```bash
+# Before initializing HAWQ, you need to install HDFS and make sure it works.
+ 
+source /usr/local/hawq/greenplum_path.sh
+ 
+# Besides you need to set password-less ssh on the systems.
+# If only install hawq for developing in localhost, skip this step.
+# Exchange SSH keys between the hosts host1, host2, and host3:
+#hawq ssh-exkeys -h host1 -h host2 -h host3
+
+# Initialize HAWQ cluster and start HAWQ by default
+hawq init cluster -a
+
+# Now you can stop/restart/start the cluster using:  
+hawq stop/restart/start cluster
+# Init command would invoke start command automaticlly too.
+ 
+# HAWQ master and segments are completely decoupled.
+# So you can also init, start or stop the master and segments separately.
+# For example, to init: hawq init master, then hawq init segment
+#              to stop: hawq stop master, then hawq stop segment
+#              to start: hawq start master, then hawq start segment
+```
+
+> Everytime you init hawq you need to delete some files. The directory of all files you need to delete have been configured in /usr/local/hawq/etc/hawq-site.xml.
+> 
+> - 1) Name:`hawq_dfs_url` Description:URL for accessing HDFS
+> - 2) Name:`hawq_master_directory` Description:The directory of hawq master
+> - 3) Name:`hawq_segment_directory` Description:The directory of hawq segment
+> - 4) Name:`hawq_magma_locations_master` Description:HAWQ magma service locations on master
+> - 5) Name:`hawq_magma_locations_segment` Description:HAWQ magma service locations on segment
+> 
+> i.e.
+> 
+> ```bash
+> hdfs dfs -rm -r /hawq*
+> rm -rf /Users/xxx/data/hawq/master/*
+> rm -rf /Users/xxx/data/hawq/segment/*
+> rm -rf /Users/xxx/data/hawq/tmp/magma_master/*
+> rm -rf /Users/xxx/data/hawq/tmp/magma_segment/*
+> ```
+> 
+> Check whether there is any process of postgres or magma running in your computer. If they are running ,you must kill them before you init hawq. For example,
+> 
+> ```bash
+> ps -ef | grep postgres | grep -v grep | awk '{print $2}'| xargs kill -9
+> ps -ef | grep magma | grep -v grep | awk '{print $2}'| xargs kill -9
+> ```
+
+# Build HAWQ++ on Centos 7
+
+Almost the same as that on macOS, feel free to have a try, referring to [here](https://github.com/oushu-io/knowledgebase/wiki/Build-HAWQ-on-Linux-macOS#build-hawq-on-linuxmacos).
+
+# Build HAWQ++ on Centos 7(6.X) using docker
+
+Almost the same as that on macOS, feel free to have a try, referring to [here](https://github.com/oushu-io/knowledgebase/wiki/Build-HAWQ-in-Docker-Container#build-hawq-in-docker-container).
+
+# Build & Install & Test (Apache HAWQ Version)
+
 ---------------
 Please see HAWQ wiki page:
 https://cwiki.apache.org/confluence/display/HAWQ/Build+and+Install
 
+It is also ok to use the environment for building OushuDB, which saves time.
+
+```shell
+cd hawq
+make feature-test
+```
+
+To make the output is consistent, please create a newdb and use specific locale.
+
+```
+TEST_DB_NAME="hawq_feature_test_db"
+psql -d postgres -c "create database $TEST_DB_NAME;"
+export PGDATABASE=$TEST_DB_NAME
+psql -c  "alter database $TEST_DB_NAME set lc_messages to 'C';"
+psql -c "alter database $TEST_DB_NAME set lc_monetary to 'C';"
+psql -c  "alter database $TEST_DB_NAME set lc_numeric to 'C';"
+psql -c  "alter database $TEST_DB_NAME set lc_time to 'C';"
+psql -c "alter database $TEST_DB_NAME set timezone_abbreviations to 'Default';"
+psql -c  "alter database $TEST_DB_NAME set timezone to 'PST8PDT';"
+psql -c  "alter database $TEST_DB_NAME set datestyle to 'postgres,MDY';"
+```
+
+To run normal feature test , please use below filter:
+1. Below tests  can only run in sequence mode
+```
+hawq/src/test/feature/feature-test --gtest_filter=-TestHawqRegister.*:TestTPCH.TestStress:TestHdfsFault.*:TestZookeeperFault.*:TestHawqFault.*
+```
+2. Below tests can run in parallel
+```
+cd hawq/src/test/feature/
+mkdir -p testresult
+python ./gtest-parallel --workers=4 --output_dir=./testresult --print_test_times  ./feature-test --gtest_filter=-TestHawqRegister.*:TestTPCH.*:TestHdfsFault.*:TestZookeeperFault.*:TestHawqFault.*:TestQuitQuery.*:TestErrorTable.*:TestExternalTableGpfdist.*:TestExternalTableOptionMultibytesDelimiter.TestGpfdist:TETAuth.*
+```
+
+  TestHawqRegister is not included
+  TestTPCH.TestStress is for TPCH stress test
+  TestHdfsFault       Hdfs fault tests 
+  TestZookeeperFault  Zookeeper fault tests      
+  TestHawqFault       Hawq fault tolerance tests
+
+
 # Export Control
 ----------------
 
@@ -67,3 +352,5 @@ Apache Software Foundation distribution makes it eligible for export under the
 License Exception ENC Technology Software Unrestricted (TSU) exception (see the
 BIS Export Administration Regulations, Section 740.13) for both object code and
 source code.
+
+
diff --git a/commit-msg b/commit-msg
new file mode 100755
index 0000000..7d5783a
--- /dev/null
+++ b/commit-msg
@@ -0,0 +1,25 @@
+#!/bin/sh
+#
+# An example hook script to check the commit log message.
+# Called by "git commit" with one argument, the name of the file
+# that has the commit message.  The hook should exit with non-zero
+# status after issuing an appropriate message if it wants to stop the
+# commit.  The hook is allowed to edit the commit message file.
+#
+# To enable this hook, rename this file to "commit-msg".
+
+# Uncomment the below to add a Signed-off-by line to the message.
+# Doing this in a hook is a bad idea in general, but the prepare-commit-msg
+# hook is more suited to it.
+#
+# SOB=$(git var GIT_AUTHOR_IDENT | sed -n 's/^\(.*>\).*$/Signed-off-by: \1/p')
+# grep -qs "^$SOB" "$1" || echo "$SOB" >> "$1"
+
+# This example catches duplicate Signed-off-by lines.
+GITMESS=`awk '{printf("%s",$0)}' $1`
+CHECKHAWQ="^(HAWQ)-[[:digit:]]+\."
+CHECKSUPPORT="^(SUPPORT)-[[:digit:]]+\."
+if ! [[ "$GITMESS" =~ $CHECKHAWQ ]] && ! [[ "$GITMESS" =~ $CHECKSUPPORT ]] ; then
+    echo "Message should be like HAWQ-XXXX. Message or SUPPORT-XXXX. Mesage"
+    exit 1
+fi
diff --git a/config/programs.m4 b/config/programs.m4
index e1c0584..637f47a 100644
--- a/config/programs.m4
+++ b/config/programs.m4
@@ -4,7 +4,7 @@
 # PGAC_PATH_BISON
 # ---------------
 # Look for Bison, set the output variable BISON to its path if found.
-# Reject versions before 1.875 or after 2.9 (they have bugs or capacity limits).
+# Reject versions before 1.875 (they have bugs or capacity limits).
 # Note we do not accept other implementations of yacc.
 
 AC_DEFUN([PGAC_PATH_BISON],
@@ -16,11 +16,11 @@ fi
 if test "$BISON"; then
   pgac_bison_version=`$BISON --version 2>/dev/null | sed q`
   AC_MSG_NOTICE([using $pgac_bison_version])
-  if echo "$pgac_bison_version" | $AWK '{ if ([$]4 < 1.875 && [$]4 > 2.9) exit 0; else exit 1;}'
+  if echo "$pgac_bison_version" | $AWK '{ if ([$]4 < 1.875) exit 0; else exit 1;}'
   then
     AC_MSG_WARN([
-*** The installed version of Bison, $BISON, is too old or too new to use with HAWQ.
-*** Bison version between 1.875 and 2.9 is required, but this is $pgac_bison_version.])
+*** The installed version of Bison, $BISON, is too old to use with PostgreSQL.
+*** Bison version 1.875 or later is required, but this is $pgac_bison_version.])
     BISON=""
   fi
 fi
diff --git a/config/tomcat.m4 b/config/tomcat.m4
index f2d744a..9b61ad8 100644
--- a/config/tomcat.m4
+++ b/config/tomcat.m4
@@ -13,6 +13,8 @@ AC_DEFUN([PGAC_CATALINA_HOME],
   dnl /usr/lib/bigtop-tomcat
   if test -x "${CATALINA_HOME}/bin/catalina.sh"; then
     TOMCAT="${CATALINA_HOME}"
+  elif test -x "/usr/local/hawq/tomcat/bin/catalina.sh"; then
+    TOMCAT="/usr/local/hawq/tomcat/"
   elif test -x "/usr/local/Cellar/tomcat@6/6.0.45/libexec/bin/catalina.sh"; then
     TOMCAT="/usr/local/Cellar/tomcat@6/6.0.45/libexec/"
   elif test -x "/usr/lib/bigtop-tomcat/bin/catalina.sh"; then
diff --git a/configure b/configure
index 3ca66a6..ec3303e 100755
--- a/configure
+++ b/configure
@@ -723,9 +723,6 @@ with_zlib
 with_system_tzdata
 with_libhdfs3
 with_libyarn
-with_dbcommon
-with_univplan
-with_storage
 with_openssl
 with_bonjour
 with_ldap
@@ -733,18 +730,18 @@ with_pam
 krb_srvtab
 with_krb5
 with_gssapi
-with_pgcrypto
 with_orc
+with_pgcrypto
 with_r
 with_java
 with_python
 with_perl
 with_tcl
+with_magma_sanitize
 enable_thread_safety
 enable_rps
 enable_orca
 enable_email
-enable_gphdfs
 enable_snmp
 autodepend
 TAS
@@ -770,6 +767,7 @@ GENHTML
 LCOV
 GCOV
 enable_profiling
+enable_avx
 enable_debug
 enable_rpath
 enable_shared
@@ -840,6 +838,7 @@ enable_shared
 enable_rpath
 enable_spinlocks
 enable_debug
+enable_avx
 enable_profiling
 enable_coverage
 enable_dtrace
@@ -851,12 +850,12 @@ enable_debugbreak
 enable_debugntuplestore
 enable_testutils
 enable_snmp
-enable_gphdfs
 enable_email
 enable_orca
 enable_rps
 enable_thread_safety
 enable_thread_safety_force
+with_magma_sanitize
 with_tcl
 with_tclconfig
 with_perl
@@ -875,9 +874,6 @@ with_openssl
 with_readline
 with_libyarn
 with_libhdfs3
-with_dbcommon
-with_univplan
-with_storage
 with_libedit_preferred
 with_system_tzdata
 with_zlib
@@ -1523,6 +1519,7 @@ disable 64-bit integer date/time support
   --disable-rpath         do not embed shared library search path in executables
   --disable-spinlocks     do not use spinlocks
   --enable-debug          build with debugging symbols (-g)
+  --enable-avx            build with vector instruction optimization (-mavx -mavx2)
   --enable-profiling      build with profiling enabled
   --enable-coverage       build with coverage testing instrumentation
   --enable-dtrace         build with DTrace support
@@ -1533,7 +1530,6 @@ disable 64-bit integer date/time support
   --enable-ntuplestore      enable debug_ntuplestore (for debugging)
   --enable-testutils        enable testing utilities
   --enable-snmp        enable snmp for MIB and alerts via TRAP/INFORM
-  --enable-gphdfs        enables GPHDFS support
   --enable-email        enable email alerts
   --enable-orca        enable Pivotal Query Optimizer
   --enable-rps         enable hawq ranger plugin
@@ -1552,6 +1548,7 @@ Optional Packages:
   --with-libraries=DIRS   look for additional libraries in DIRS
   --with-libs=DIRS        alternative spelling of --with-libraries
   --with-pgport=PORTNUM   change default port number [5432]
+  --with-magma_sanitize   build with sanitize check
   --with-tcl              build Tcl modules (PL/Tcl)
   --with-tclconfig=DIR    tclConfig.sh is in DIR
   --with-perl             build Perl modules (PL/Perl)
@@ -1559,6 +1556,7 @@ Optional Packages:
   --with-java             build Java modules (PL/Java)
   --with-r                build R modules (PL/R)
   --with-pgcrypto         build with pgcrypto
+  --with-orc         build with orc format
   --with-gssapi           build with GSSAPI support
   --with-krb5             build with Kerberos 5 support
   --with-krb-srvnam=NAME  default service principal name in Kerberos [postgres]
@@ -1569,9 +1567,6 @@ Optional Packages:
   --without-readline      do not use GNU Readline nor BSD Libedit for editing
   --without-libyarn       do not build libyarn
   --without-libhdfs3      do not build libhdfs3
-  --without-dbcommon      do not build dbcommon
-  --without-univplan      do not build univplan
-  --without-storage       do not build storage
   --without-libedit-preferred  Don't prefer BSD Libedit over GNU Readline
   --with-system-tzdata=DIR  use system time zone data in DIR
   --without-zlib          do not use Zlib
@@ -3457,6 +3452,35 @@ fi
 
 
 #
+# --enable-avx adds vector instruction optimization (-mavx -mavx2) to compiler flags
+#
+
+pgac_args="$pgac_args enable_avx"
+
+# Check whether --enable-avx was given.
+if test "${enable_avx+set}" = set; then :
+  enableval=$enable_avx;
+  case $enableval in
+    yes)
+      :
+      ;;
+    no)
+      :
+      ;;
+    *)
+      as_fn_error $? "no argument expected for --enable-avx option" "$LINENO" 5
+      ;;
+  esac
+
+else
+  enable_avx=yes
+
+fi
+
+
+
+
+#
 # --enable-profiling enables gcc profiling
 #
 
@@ -3496,6 +3520,11 @@ if test "${enable_coverage+set}" = set; then :
   enableval=$enable_coverage;
   case $enableval in
     yes)
+
+$as_echo "#define ENABLE_COVERAGE 1" >>confdefs.h
+
+      ;;
+    no)
       for ac_prog in gcov
 do
   # Extract the first word of "$ac_prog", so it can be a program name with args.
@@ -3632,9 +3661,6 @@ if test -z "$GENHTML"; then
   as_fn_error $? "genhtml not found" "$LINENO" 5
 fi
       ;;
-    no)
-      :
-      ;;
     *)
       as_fn_error $? "no argument expected for --enable-coverage option" "$LINENO" 5
       ;;
@@ -3642,8 +3668,142 @@ fi
 
 else
   enable_coverage=no
+for ac_prog in gcov
+do
+  # Extract the first word of "$ac_prog", so it can be a program name with args.
+set dummy $ac_prog; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_GCOV+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  if test -n "$GCOV"; then
+  ac_cv_prog_GCOV="$GCOV" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  test -z "$as_dir" && as_dir=.
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+    ac_cv_prog_GCOV="$ac_prog"
+    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+  done
+IFS=$as_save_IFS
 
 fi
+fi
+GCOV=$ac_cv_prog_GCOV
+if test -n "$GCOV"; then
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $GCOV" >&5
+$as_echo "$GCOV" >&6; }
+else
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+  test -n "$GCOV" && break
+done
+
+if test -z "$GCOV"; then
+  as_fn_error $? "gcov not found" "$LINENO" 5
+fi
+for ac_prog in lcov
+do
+  # Extract the first word of "$ac_prog", so it can be a program name with args.
+set dummy $ac_prog; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_LCOV+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  if test -n "$LCOV"; then
+  ac_cv_prog_LCOV="$LCOV" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  test -z "$as_dir" && as_dir=.
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+    ac_cv_prog_LCOV="$ac_prog"
+    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+  done
+IFS=$as_save_IFS
+
+fi
+fi
+LCOV=$ac_cv_prog_LCOV
+if test -n "$LCOV"; then
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $LCOV" >&5
+$as_echo "$LCOV" >&6; }
+else
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+  test -n "$LCOV" && break
+done
+
+if test -z "$LCOV"; then
+  as_fn_error $? "lcov not found" "$LINENO" 5
+fi
+for ac_prog in genhtml
+do
+  # Extract the first word of "$ac_prog", so it can be a program name with args.
+set dummy $ac_prog; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_GENHTML+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  if test -n "$GENHTML"; then
+  ac_cv_prog_GENHTML="$GENHTML" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  test -z "$as_dir" && as_dir=.
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+    ac_cv_prog_GENHTML="$ac_prog"
+    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+  done
+IFS=$as_save_IFS
+
+fi
+fi
+GENHTML=$ac_cv_prog_GENHTML
+if test -n "$GENHTML"; then
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $GENHTML" >&5
+$as_echo "$GENHTML" >&6; }
+else
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+  test -n "$GENHTML" && break
+done
+
+if test -z "$GENHTML"; then
+  as_fn_error $? "genhtml not found" "$LINENO" 5
+fi
+fi
 
 
 
@@ -4936,6 +5096,12 @@ $as_echo "#define PROFILE_PID_DIR 1" >>confdefs.h
   fi
 fi
 
+# disable avx if --enable-avx=no
+if test "$enable_avx" = no; then
+    CFLAGS="$CFLAGS -mno-avx -mno-avx2"
+    CXXFLAGS="$CXXFLAGS -mno-avx -mno-avx2"
+fi
+
 # We already have this in Makefile.win32, but configure needs it too
 if test "$PORTNAME" = "win32"; then
   CPPFLAGS="$CPPFLAGS -I$srcdir/src/include/port/win32 -DEXEC_BACKEND"
@@ -5141,7 +5307,7 @@ else
     HQ_VERSION="devel"
 fi
 
-GP_VERSION='4.2.0 build 1'
+GP_VERSION='4.2.0'
 GP_VERSION_IN="src/include/catalog/gp_version.in"
 GP_VERSION_HEADER="src/include/catalog/gp_version.h"
 if grep '\$\$' $GP_VERSION_IN > /dev/null 2>&1 ; then
@@ -5156,7 +5322,7 @@ else
 fi
 
 cat >>confdefs.h <<_ACEOF
-#define PG_VERSION_STR "PostgreSQL $PACKAGE_VERSION (Greenplum Database $GP_VERSION) (HAWQ $HQ_VERSION) on $host, compiled by $cc_string"
+#define PG_VERSION_STR "PostgreSQL $PACKAGE_VERSION (Apache HAWQ 3.0.0.0) (Greenplum Database $GP_VERSION) on $host, compiled by $cc_string"
 _ACEOF
 
 
@@ -5421,39 +5587,6 @@ $as_echo "checking whether to build with snmp... $enable_snmp" >&6; }
 
 
 #
-# --enable-gphdfs enables GPHDFS support
-#
-
-pgac_args="$pgac_args enable_gphdfs"
-
-# Check whether --enable-gphdfs was given.
-if test "${enable_gphdfs+set}" = set; then :
-  enableval=$enable_gphdfs;
-  case $enableval in
-    yes)
-
-$as_echo "#define USE_GPHDFS 1" >>confdefs.h
-
-      ;;
-    no)
-      :
-      ;;
-    *)
-      as_fn_error $? "no argument expected for --enable-gphdfs option" "$LINENO" 5
-      ;;
-  esac
-
-else
-  enable_gphdfs=no
-
-fi
-
-
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: checking whether to build with gphdfs...$enable_gphdfs" >&5
-$as_echo "checking whether to build with gphdfs...$enable_gphdfs" >&6; }
-
-
-#
 # --enable-email enables email alerts
 #
 
@@ -5616,6 +5749,40 @@ $as_echo "$enable_thread_safety" >&6; }
 
 
 #
+# Optionally build Magma Sanitize modules
+#
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to build with Magma Sanitize" >&5
+$as_echo_n "checking whether to build with Magma Sanitize... " >&6; }
+
+pgac_args="$pgac_args with_magma_sanitize"
+
+
+# Check whether --with-magma_sanitize was given.
+if test "${with_magma_sanitize+set}" = set; then :
+  withval=$with_magma_sanitize;
+  case $withval in
+    yes)
+      :
+      ;;
+    no)
+      :
+      ;;
+    *)
+      as_fn_error $? "no argument expected for --with-magma_sanitize option" "$LINENO" 5
+      ;;
+  esac
+
+else
+  with_magma_sanitize=no
+
+fi
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $with_magma_sanitize" >&5
+$as_echo "$with_magma_sanitize" >&6; }
+
+
+#
 # Optionally build Tcl modules (PL/Tcl)
 #
 { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to build with Tcl" >&5
@@ -5846,7 +6013,7 @@ $as_echo "$with_pgcrypto" >&6; }
 
 
 #
-# Optionally build with orc in pluggable storage framework.
+# Optionally build with orc format in pluggable storage framework.
 #
 { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to build orc modules" >&5
 $as_echo_n "checking whether to build orc modules... " >&6; }
@@ -5870,7 +6037,7 @@ if test "${with_orc+set}" = set; then :
   esac
 
 else
-  with_orc=no
+  with_orc=yes
 
 fi
 
@@ -5878,8 +6045,9 @@ fi
 { $as_echo "$as_me:${as_lineno-$LINENO}: result: $with_orc" >&5
 $as_echo "$with_orc" >&6; }
 
-
 #
+
+
 # GSSAPI
 #
 { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to build with GSSAPI support" >&5
@@ -6059,7 +6227,9 @@ $as_echo "#define USE_LDAP 1" >>confdefs.h
   esac
 
 else
-  with_ldap=no
+  with_ldap=yes
+
+$as_echo "#define USE_LDAP 1" >>confdefs.h
 
 fi
 
@@ -6243,95 +6413,6 @@ fi
 
 
 #
-# dbcommon
-#
-
-pgac_args="$pgac_args with_dbcommon"
-
-
-# Check whether --with-dbcommon was given.
-if test "${with_dbcommon+set}" = set; then :
-  withval=$with_dbcommon;
-  case $withval in
-    yes)
-      :
-      ;;
-    no)
-      :
-      ;;
-    *)
-      as_fn_error $? "no argument expected for --with-dbcommon option" "$LINENO" 5
-      ;;
-  esac
-
-else
-  with_dbcommon=yes
-
-fi
-
-
-
-
-#
-# univplan
-#
-
-pgac_args="$pgac_args with_univplan"
-
-
-# Check whether --with-univplan was given.
-if test "${with_univplan+set}" = set; then :
-  withval=$with_univplan;
-  case $withval in
-    yes)
-      :
-      ;;
-    no)
-      :
-      ;;
-    *)
-      as_fn_error $? "no argument expected for --with-univplan option" "$LINENO" 5
-      ;;
-  esac
-
-else
-  with_univplan=yes
-
-fi
-
-
-
-
-#
-# storage
-#
-
-pgac_args="$pgac_args with_storage"
-
-
-# Check whether --with-storage was given.
-if test "${with_storage+set}" = set; then :
-  withval=$with_storage;
-  case $withval in
-    yes)
-      :
-      ;;
-    no)
-      :
-      ;;
-    *)
-      as_fn_error $? "no argument expected for --with-storage option" "$LINENO" 5
-      ;;
-  esac
-
-else
-  with_storage=yes
-
-fi
-
-
-
-#
 # Prefer libedit
 #
 # In GPDB we want the default to be yes, because we don't want to link with GPL code.
@@ -7569,14 +7650,14 @@ if test "$BISON"; then
   pgac_bison_version=`$BISON --version 2>/dev/null | sed q`
   { $as_echo "$as_me:${as_lineno-$LINENO}: using $pgac_bison_version" >&5
 $as_echo "$as_me: using $pgac_bison_version" >&6;}
-  if echo "$pgac_bison_version" | $AWK '{ if ($4 < 1.875 && $4 > 2.9) exit 0; else exit 1;}'
+  if echo "$pgac_bison_version" | $AWK '{ if ($4 < 1.875) exit 0; else exit 1;}'
   then
     { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING:
-*** The installed version of Bison, $BISON, is too old or too new to use with HAWQ.
-*** Bison version between 1.875 and 2.9 is required, but this is $pgac_bison_version." >&5
+*** The installed version of Bison, $BISON, is too old to use with PostgreSQL.
+*** Bison version 1.875 or later is required, but this is $pgac_bison_version." >&5
 $as_echo "$as_me: WARNING:
-*** The installed version of Bison, $BISON, is too old or too new to use with HAWQ.
-*** Bison version between 1.875 and 2.9 is required, but this is $pgac_bison_version." >&2;}
+*** The installed version of Bison, $BISON, is too old to use with PostgreSQL.
+*** Bison version 1.875 or later is required, but this is $pgac_bison_version." >&2;}
     BISON=""
   fi
 fi
@@ -8349,6 +8430,8 @@ $as_echo_n "checking CATALINA_HOME... " >&6; }
 
         if test -x "${CATALINA_HOME}/bin/catalina.sh"; then
     TOMCAT="${CATALINA_HOME}"
+  elif test -x "/usr/local/hawq/tomcat/bin/catalina.sh"; then
+    TOMCAT="/usr/local/hawq/tomcat/"
   elif test -x "/usr/local/Cellar/tomcat@6/6.0.45/libexec/bin/catalina.sh"; then
     TOMCAT="/usr/local/Cellar/tomcat@6/6.0.45/libexec/"
   elif test -x "/usr/lib/bigtop-tomcat/bin/catalina.sh"; then
@@ -9307,6 +9390,7 @@ failure.  It is possible the compiler isn't looking in the proper directory.
 Use --without-zlib to disable zlib support." "$LINENO" 5
 fi
 
+LIBS="-lz -llz4 $LIBS"
 fi
 
 if test "$with_r" = yes; then
@@ -10676,7 +10760,7 @@ See \`config.log' for more details" "$LINENO" 5; }
 fi
 
 ac_ext=cpp
-ac_cpp='$CXXCPP $CPPFLAGS'
+ac_cpp='$CXXCPP $CPPFLAGS -w'
 ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5'
 ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
 ac_compiler_gnu=$ac_cv_cxx_compiler_gnu
@@ -10691,7 +10775,7 @@ if test "x$ac_cv_header_thrift_protocol_TBinaryProtocol_h" = xyes; then :
 _ACEOF
 
 else
-  as_fn_error $? "thrift is required" "$LINENO" 5
+  : #as_fn_error $? "thrift is required" "$LINENO" 5
 fi
 
 done
@@ -10707,8 +10791,8 @@ if test "x$ac_cv_header_boost_chrono_hpp" = xyes; then :
 _ACEOF
 
 else
-  as_fn_error $? "boost is required.
-Check config.log for details. It is possible the compiler isn't looking in the proper directory." "$LINENO" 5
+  : #as_fn_error $? "boost is required.
+#Check config.log for details. It is possible the compiler isn't looking in the proper directory." "$LINENO" 5
 fi
 
 done
@@ -11736,6 +11820,80 @@ Check config.log for details. It is possible the compiler isn't looking in the p
 fi
 
 
+# uuid
+for ac_header in uuid/uuid.h
+do :
+  ac_fn_c_check_header_mongrel "$LINENO" "uuid/uuid.h" "ac_cv_header_uuid_uuid_h" "$ac_includes_default"
+if test "x$ac_cv_header_uuid_uuid_h" = xyes; then :
+  cat >>confdefs.h <<_ACEOF
+#define HAVE_UUID_UUID_H 1
+_ACEOF
+
+else
+  as_fn_error $? "header file <uuid/uuid.h> is required.
+Check config.log for details. It is possible the compiler isn't looking in the proper directory." "$LINENO" 5
+fi
+
+done
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing uuid" >&5
+$as_echo_n "checking for library containing uuid... " >&6; }
+if ${ac_cv_search_uuid+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  ac_func_search_save_LIBS=$LIBS
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+
+/* Override any GCC internal prototype to avoid an error.
+   Use char because int might match the return type of a GCC
+   builtin and then its argument prototype would still apply.  */
+#ifdef __cplusplus
+extern "C"
+#endif
+char uuid ();
+int
+main ()
+{
+return uuid ();
+  ;
+  return 0;
+}
+_ACEOF
+for ac_lib in '' ; do
+  if test -z "$ac_lib"; then
+    ac_res="none required"
+  else
+    ac_res=-l$ac_lib
+    LIBS="-l$ac_lib  $ac_func_search_save_LIBS"
+  fi
+  if ac_fn_c_try_link "$LINENO"; then :
+  ac_cv_search_uuid=$ac_res
+fi
+rm -f core conftest.err conftest.$ac_objext \
+    conftest$ac_exeext
+  if ${ac_cv_search_uuid+:} false; then :
+  break
+fi
+done
+if ${ac_cv_search_uuid+:} false; then :
+
+else
+  ac_cv_search_uuid=no
+fi
+rm conftest.$ac_ext
+LIBS=$ac_func_search_save_LIBS
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_uuid" >&5
+$as_echo "$ac_cv_search_uuid" >&6; }
+ac_res=$ac_cv_search_uuid
+if test "$ac_res" != no; then :
+  test "$ac_res" = "none required" || LIBS="$ac_res $LIBS"
+  as_fn_error $? "library 'uuid' is required.
+Check config.log for details. It is possible the compiler isn't looking in the proper directory." "$LINENO" 5
+fi
+
+
 # libxml2
 
 
@@ -15198,7 +15356,6 @@ fi
 fi
 
 
-
 if test x"$HAVE_LONG_LONG_INT_64" = xyes ; then
   cat confdefs.h - <<_ACEOF >conftest.$ac_ext
 /* end confdefs.h.  */
@@ -16935,7 +17092,11 @@ $as_echo "done" >&6; }
 fi
 
 
-ac_config_files="$ac_config_files GNUmakefile src/VERSIONS.mk depends/thirdparty/googletest/Makefile.global depends/libhdfs3/Makefile.global depends/libyarn/Makefile.global depends/dbcommon/Makefile.global depends/univplan/Makefile.global depends/storage/Makefile.global src/Makefile.global src/pl/pljava/src/java/Makefile.global ranger-plugin/Makefile.global"
+ac_config_files="$ac_config_files GNUmakefile src/VERSIONS.mk src/pl/pljava/src/java/Makefile.global"
+
+ac_config_files="$ac_config_files src/Makefile.global"
+
+ac_config_files="$ac_config_files ranger-plugin/Makefile.global"
 
 
 ac_config_links="$ac_config_links src/backend/port/dynloader.c:src/backend/port/dynloader/${template}.c src/backend/port/pg_sema.c:${SEMA_IMPLEMENTATION} src/backend/port/pg_shmem.c:${SHMEM_IMPLEMENTATION} src/include/dynloader.h:src/backend/port/dynloader/${template}.h src/include/pg_config_os.h:src/include/port/${template}.h src/Makefile.port:src/makefiles/Makefile.${template}"
@@ -16946,12 +17107,9 @@ ac_config_commands="$ac_config_commands check_win32_symlinks"
 
 fi
 
-# Remove build_timestamp file to make sure rebuild depends lib: libhdfs & libyarn & dbcommon & univplan & storage
+# Remove build_timestamp file to make sure rebuild depends lib: libhdfs & libyarn
 rm -f depends/libhdfs3/build/libhdfs3_build_timestamp
 rm -f depends/libyarn/build/libyarn_build_timestamp
-rm -f depends/dbcommon/build/dbcommon_build_timestamp
-rm -f depends/univplan/build/univplan_build_timestamp
-rm -f depends/storage/build/storage_build_timestamp
 rm -f depends/thirdparty/gporca_build_timestamp
 rm -f depends/thirdparty/gpos_build_timestamp
 rm -f depends/thirdparty/gp-xerces_build_timestamp
@@ -17666,14 +17824,8 @@ do
     "src/backend/port/tas.s") CONFIG_LINKS="$CONFIG_LINKS src/backend/port/tas.s:src/backend/port/tas/${tas_file}" ;;
     "GNUmakefile") CONFIG_FILES="$CONFIG_FILES GNUmakefile" ;;
     "src/VERSIONS.mk") CONFIG_FILES="$CONFIG_FILES src/VERSIONS.mk" ;;
-    "depends/thirdparty/googletest/Makefile.global") CONFIG_FILES="$CONFIG_FILES depends/thirdparty/googletest/Makefile.global" ;;
-    "depends/libhdfs3/Makefile.global") CONFIG_FILES="$CONFIG_FILES depends/libhdfs3/Makefile.global" ;;
-    "depends/libyarn/Makefile.global") CONFIG_FILES="$CONFIG_FILES depends/libyarn/Makefile.global" ;;
-    "depends/dbcommon/Makefile.global") CONFIG_FILES="$CONFIG_FILES depends/dbcommon/Makefile.global" ;;
-    "depends/univplan/Makefile.global") CONFIG_FILES="$CONFIG_FILES depends/univplan/Makefile.global" ;;
-    "depends/storage/Makefile.global") CONFIG_FILES="$CONFIG_FILES depends/storage/Makefile.global" ;;
-    "src/Makefile.global") CONFIG_FILES="$CONFIG_FILES src/Makefile.global" ;;
     "src/pl/pljava/src/java/Makefile.global") CONFIG_FILES="$CONFIG_FILES src/pl/pljava/src/java/Makefile.global" ;;
+    "src/Makefile.global") CONFIG_FILES="$CONFIG_FILES src/Makefile.global" ;;
     "ranger-plugin/Makefile.global") CONFIG_FILES="$CONFIG_FILES ranger-plugin/Makefile.global" ;;
     "src/backend/port/dynloader.c") CONFIG_LINKS="$CONFIG_LINKS src/backend/port/dynloader.c:src/backend/port/dynloader/${template}.c" ;;
     "src/backend/port/pg_sema.c") CONFIG_LINKS="$CONFIG_LINKS src/backend/port/pg_sema.c:${SEMA_IMPLEMENTATION}" ;;
diff --git a/configure.in b/configure.in
index 2412a1a..f258699 100644
--- a/configure.in
+++ b/configure.in
@@ -214,6 +214,13 @@ PGAC_ARG_BOOL(enable, debug, no,
 AC_SUBST(enable_debug)
 
 #
+# --enable-avx adds vector instruction optimization (-mavx -mavx2) to compiler flags
+#
+PGAC_ARG_BOOL(enable, avx, yes,
+              [  --enable-avx            build with vector instruction optimization (-mavx -mavx2)])
+AC_SUBST(enable_avx)
+
+#
 # --enable-profiling enables gcc profiling
 #
 PGAC_ARG_BOOL(enable, profiling, no,
@@ -225,6 +232,7 @@ AC_SUBST(enable_profiling)
 #
 PGAC_ARG_BOOL(enable, coverage, no,
               [  --enable-coverage       build with coverage testing instrumentation],
+              [AC_DEFINE([ENABLE_COVERAGE], 1, [Define to 1 to build with coverage enabled. (--enable-coverage)])],
 [AC_CHECK_PROGS(GCOV, gcov)
 if test -z "$GCOV"; then
   AC_MSG_ERROR([gcov not found])
@@ -477,6 +485,12 @@ if test "$enable_profiling" = yes && test "$ac_cv_prog_cc_g" = yes; then
   fi
 fi
 
+# disable avx if --enable-avx=no
+if test "$enable_avx" = no; then
+    CFLAGS="$CFLAGS -mno-avx -mno-avx2"
+    CXXFLAGS="$CXXFLAGS -mno-avx -mno-avx2"
+fi
+
 # We already have this in Makefile.win32, but configure needs it too
 if test "$PORTNAME" = "win32"; then
   CPPFLAGS="$CPPFLAGS -I$srcdir/src/include/port/win32 -DEXEC_BACKEND"
@@ -512,7 +526,7 @@ else
     HQ_VERSION="devel"
 fi
 
-GP_VERSION='4.2.0 build 1'
+GP_VERSION='4.2.0'
 GP_VERSION_IN="src/include/catalog/gp_version.in"
 GP_VERSION_HEADER="src/include/catalog/gp_version.h"
 if grep '\$\$' $GP_VERSION_IN > /dev/null 2>&1 ; then
@@ -526,7 +540,7 @@ else
   cc_string=$CC
 fi
 AC_DEFINE_UNQUOTED(PG_VERSION_STR,
-                   ["PostgreSQL $PACKAGE_VERSION (Greenplum Database $GP_VERSION) (HAWQ $HQ_VERSION) on $host, compiled by $cc_string"],
+                   ["PostgreSQL $PACKAGE_VERSION (Apache HAWQ 3.0.0.0) (Greenplum Database $GP_VERSION) on $host, compiled by $cc_string"],
                    [A string containing the version number, platform, and C compiler])
 
 if `echo "$HQ_VERSION" | grep "main build" > /dev/null 2>&1` ; then
@@ -618,15 +632,6 @@ AC_MSG_RESULT([checking whether to build with snmp... $enable_snmp])
 AC_SUBST(enable_snmp)
 
 #
-# --enable-gphdfs enables GPHDFS support
-#
-PGAC_ARG_BOOL(enable, gphdfs, no, [  --enable-gphdfs        enables GPHDFS support],
-              [AC_DEFINE([USE_GPHDFS], 1, 
-                         [Define to 1 to build with gphdfs capabilities. (--enable-gphdfs)])])
-AC_MSG_RESULT([checking whether to build with gphdfs...$enable_gphdfs])
-AC_SUBST(enable_gphdfs)
-
-#
 # --enable-email enables email alerts
 #
 PGAC_ARG_BOOL(enable, email, no,  [  --enable-email        enable email alerts],
@@ -670,6 +675,14 @@ AC_MSG_RESULT([$enable_thread_safety])
 AC_SUBST(enable_thread_safety)
 
 #
+# Optionally build Magma Sanitize modules
+#
+AC_MSG_CHECKING([whether to build with Magma Sanitize])
+PGAC_ARG_BOOL(with, magma_sanitize, no, [  --with-magma_sanitize   build with sanitize check])
+AC_MSG_RESULT([$with_magma_sanitize])
+AC_SUBST([with_magma_sanitize])
+
+#
 # Optionally build Tcl modules (PL/Tcl)
 #
 AC_MSG_CHECKING([whether to build with Tcl])
@@ -731,7 +744,6 @@ AC_MSG_RESULT([$with_orc])
 AC_SUBST(with_orc)
 #
 
-#
 # GSSAPI
 #
 AC_MSG_CHECKING([whether to build with GSSAPI support])
@@ -785,7 +797,7 @@ AC_SUBST(with_pam)
 # LDAP
 #
 AC_MSG_CHECKING([whether to build with LDAP support])
-PGAC_ARG_BOOL(with, ldap, no,
+PGAC_ARG_BOOL(with, ldap, yes,
               [  --with-ldap             build with LDAP support],
               [AC_DEFINE([USE_LDAP], 1, [Define to 1 to build with LDAP support. (--with-ldap)])])
 AC_MSG_RESULT([$with_ldap])
@@ -843,27 +855,6 @@ PGAC_ARG_BOOL(with, libhdfs3, yes,
 AC_SUBST(with_libhdfs3)
 
 #
-# dbcommon
-#
-PGAC_ARG_BOOL(with, dbcommon, yes,
-              [  --without-dbcommon      do not build dbcommon])
-AC_SUBST(with_dbcommon)
-
-#
-# univplan
-#
-PGAC_ARG_BOOL(with, univplan, yes,
-              [  --without-univplan      do not build univplan])
-AC_SUBST(with_univplan)
-
-#
-# storage
-#
-PGAC_ARG_BOOL(with, storage, yes,
-              [  --without-storage      do not build storage])
-AC_SUBST(with_storage)
-
-#
 # Prefer libedit
 #
 # In GPDB we want the default to be yes, because we don't want to link with GPL code.
@@ -1106,6 +1097,7 @@ if test "$with_zlib" = yes; then
 If you have zlib already installed, see config.log for details on the
 failure.  It is possible the compiler isn't looking in the proper directory.
 Use --without-zlib to disable zlib support.])])
+LIBS="-lz -llz4 $LIBS"
 fi
 
 if test "$with_r" = yes; then
@@ -1446,6 +1438,12 @@ Check config.log for details. It is possible the compiler isn't looking in the p
 AC_SEARCH_LIBS(json_tokener_parse, json-c json, [], [AC_MSG_ERROR([library 'json-c' is required.
 Check config.log for details. It is possible the compiler isn't looking in the proper directory.])], [])
 
+# uuid 
+AC_CHECK_HEADERS(uuid/uuid.h, [], [AC_MSG_ERROR([header file <uuid/uuid.h> is required.
+Check config.log for details. It is possible the compiler isn't looking in the proper directory.])])
+AC_SEARCH_LIBS(uuid, [], [AC_MSG_ERROR([library 'uuid' is required.
+Check config.log for details. It is possible the compiler isn't looking in the proper directory.])], [])
+
 # libxml2
 AM_PATH_XML2([2.7.6], [], [AC_MSG_ERROR([libxml2 is required.
 Check config.log for details. It is possible the compiler isn't looking in the proper directory.])])
@@ -2206,7 +2204,9 @@ else
 fi
 AC_SUBST(vpath_build)
 
-AC_CONFIG_FILES([GNUmakefile src/VERSIONS.mk depends/thirdparty/googletest/Makefile.global depends/libhdfs3/Makefile.global depends/libyarn/Makefile.global depends/dbcommon/Makefile.global depends/univplan/Makefile.global depends/storage/Makefile.global src/Makefile.global src/pl/pljava/src/java/Makefile.global ranger-plugin/Makefile.global])
+AC_CONFIG_FILES([GNUmakefile src/VERSIONS.mk src/pl/pljava/src/java/Makefile.global])
+AC_CONFIG_FILES([src/Makefile.global])
+AC_CONFIG_FILES([ranger-plugin/Makefile.global])
 
 AC_CONFIG_LINKS([
   src/backend/port/dynloader.c:src/backend/port/dynloader/${template}.c
@@ -2229,12 +2229,9 @@ for FILE in $CONFIG_LINKS
 ])
 fi
 
-# Remove build_timestamp file to make sure rebuild depends lib: libhdfs & libyarn & dbcommon & univplan & storage
+# Remove build_timestamp file to make sure rebuild depends lib: libhdfs & libyarn 
 rm -f depends/libhdfs3/build/libhdfs3_build_timestamp
 rm -f depends/libyarn/build/libyarn_build_timestamp
-rm -f depends/dbcommon/build/dbcommon_build_timestamp
-rm -f depends/univplan/build/univplan_build_timestamp
-rm -f depends/storage/build/storage_build_timestamp
 rm -f depends/thirdparty/gporca_build_timestamp
 rm -f depends/thirdparty/gpos_build_timestamp
 rm -f depends/thirdparty/gp-xerces_build_timestamp
diff --git a/contrib/Makefile b/contrib/Makefile
index 6490c23..9320d91 100644
--- a/contrib/Makefile
+++ b/contrib/Makefile
@@ -9,7 +9,9 @@ WANTED_DIRS = \
 		extprotocol \
 		gp_cancel_query \
 		formatter_fixedwidth \
-		exthdfs 
+		exthdfs \
+		exthive \
+		extfmtcsv
 
 ifeq ($(with_pgcrypto), yes)
 WANTED_DIRS += pgcrypto
diff --git a/contrib/extfmtcsv/Makefile b/contrib/extfmtcsv/Makefile
new file mode 100644
index 0000000..f188831
--- /dev/null
+++ b/contrib/extfmtcsv/Makefile
@@ -0,0 +1,15 @@
+MODULE_big = extfmtcsv
+OBJS       = extfmtcsv.o
+
+ifdef USE_PGXS
+PGXS := $(shell pg_config --pgxs)
+include $(PGXS)
+else
+subdir = contrib/extfmtcsv
+top_builddir = ../..
+include $(top_builddir)/src/Makefile.global
+include $(top_srcdir)/contrib/contrib-global.mk
+
+override CFLAGS += -ljson-c -luuid
+
+endif
diff --git a/contrib/extfmtcsv/extfmtcsv.c b/contrib/extfmtcsv/extfmtcsv.c
new file mode 100644
index 0000000..39d55a8
--- /dev/null
+++ b/contrib/extfmtcsv/extfmtcsv.c
@@ -0,0 +1,676 @@
+#include <json-c/json.h>
+
+#include "postgres.h"
+#include "fmgr.h"
+#include "funcapi.h"
+#include "c.h"
+#include "access/filesplit.h"
+#include "utils/uri.h"
+#include "cdb/cdbvars.h"
+#include "cdb/cdbfilesystemcredential.h"
+
+#include "storage/cwrapper/hdfs-file-system-c.h"
+#include "storage/cwrapper/text-format-c.h"
+
+/* Do the module magic dance */
+PG_MODULE_MAGIC
+;
+PG_FUNCTION_INFO_V1(extfmtcsv_out);
+PG_FUNCTION_INFO_V1(extfmtcsv_in);
+PG_FUNCTION_INFO_V1(extfmttext_out);
+PG_FUNCTION_INFO_V1(extfmttext_in);
+
+Datum extfmtcsv_out(PG_FUNCTION_ARGS);
+Datum extfmtcsv_in(PG_FUNCTION_ARGS);
+Datum extfmttext_out(PG_FUNCTION_ARGS);
+Datum extfmttext_in(PG_FUNCTION_ARGS);
+
+typedef struct FmtUserData
+{
+	TextFormatC *fmt;
+	char **colNames;
+	int numberOfColumns;
+	char **colRawValues;
+	Datum *colValues;
+	uint64_t *colValLength;
+	bool *colIsNulls;
+	bool *colToReads;
+
+	int nSplits;
+	TextFormatFileSplit *splits;
+} FmtUserData;
+
+char externalFmtType = '\0';
+char externalFmtNameIn[64];
+char externalFmtNameOut[64];
+
+void setExtFormatterTupleDesc(TextFormatC *fmt, TupleDesc tupdesc);
+void buildFormatterOptionsInJson(PG_FUNCTION_ARGS, char **jsonStr);
+void beginFormatterForRead(PG_FUNCTION_ARGS);
+void beginFormatterForWrite(PG_FUNCTION_ARGS);
+
+Datum extfmtcommon_in(PG_FUNCTION_ARGS);
+Datum extfmtcommon_out(PG_FUNCTION_ARGS);
+
+Datum extfmtcommon_in(PG_FUNCTION_ARGS)
+{
+	HeapTuple tuple; /* The result tuple to return at last */
+	TupleDesc tupdesc;
+	MemoryContext oldMemCtx = NULL;
+
+	/* Must be called via the external table format manager */
+	if (!CALLED_AS_FORMATTER(fcinfo))
+		ereport(ERROR,
+				(errcode(ERRCODE_EXTERNAL_ROUTINE_EXCEPTION), errmsg("%s: not called by format manager", externalFmtNameIn)));
+
+	/* Check if this is the first time calling the formatter */
+	if (FORMATTER_GET_MASK(fcinfo) == FMT_UNSET)
+	{
+		FORMATTER_GET_MASK(fcinfo) = FMT_SET;
+		externalFmtType = '\0';
+		PG_RETURN_VOID() ;
+	}
+
+	if (((FormatterData *) (fcinfo->context))->fmt_splits == NULL)
+	{
+		FORMATTER_RETURN_NOTIFICATION(fcinfo, FMT_DONE);
+		PG_RETURN_VOID() ;
+	}
+
+	tupdesc = FORMATTER_GET_TUPDESC(fcinfo);
+
+	/* Check if the user data was created ever */
+	FmtUserData *userData = FORMATTER_GET_USER_CTX(fcinfo);
+	if (userData == NULL)
+	{
+		/* Create user data instance and set in context to keep it */
+		userData = palloc0(sizeof(FmtUserData));
+		FORMATTER_SET_USER_CTX(fcinfo, userData);
+		userData->numberOfColumns = tupdesc->natts;
+		userData->colNames = palloc0(
+				sizeof(char *) * userData->numberOfColumns);
+		userData->colValues = palloc0(
+				sizeof(Datum) * userData->numberOfColumns);
+		userData->colIsNulls = palloc0(
+				sizeof(bool) * userData->numberOfColumns);
+		userData->colRawValues = palloc0(
+				sizeof(char *) * userData->numberOfColumns);
+		userData->colValLength = palloc0(
+				sizeof(uint64_t) * userData->numberOfColumns);
+
+		/* Prepare formatter options */
+		char *fmtOptions = NULL;
+		buildFormatterOptionsInJson(fcinfo, &fmtOptions);
+
+		/* Create formatter instance */
+		userData->fmt = TextFormatNewTextFormatC(externalFmtType, fmtOptions);
+		/* Begin scanning by passing in split and column setting */
+		beginFormatterForRead(fcinfo);
+
+		if (fmtOptions != NULL)
+		{
+			pfree(fmtOptions);
+		}
+	}
+	bool lastBatchRow = false;
+	bool res = TextFormatNextTextFormatC(userData->fmt, userData->colRawValues,
+			userData->colValLength, userData->colIsNulls, &lastBatchRow);
+
+	if (res)
+	{
+		MemoryContext m = FORMATTER_GET_PER_ROW_MEM_CTX(fcinfo);
+		MemoryContext oldcontext = MemoryContextSwitchTo(m);
+
+		/* We have one tuple ready */
+		for (int i = 0; i < userData->numberOfColumns; ++i)
+		{
+			if (userData->colIsNulls[i])
+			{
+				continue;
+			}
+
+			/* Prepare the tuple to return. */
+			if (!((FormatterData *) (fcinfo->context))->fmt_needs_transcoding)
+			{
+				if (!lastBatchRow)
+				{
+					char *val = (char *) (userData->colRawValues[i]);
+					char oldc = *(val + userData->colValLength[i]);
+					*(val + userData->colValLength[i]) = '\0';
+					userData->colValues[i] = InputFunctionCall(
+							&(FORMATTER_GET_CONVERSION_FUNCS(fcinfo)[i]), val,
+							FORMATTER_GET_TYPIOPARAMS(fcinfo)[i],
+							tupdesc->attrs[i]->atttypmod);
+					*(val + userData->colValLength[i]) = oldc;
+				}
+				else
+				{
+					char *val = (char *) palloc(userData->colValLength[i] + 1);
+					memcpy(val, userData->colRawValues[i],
+							userData->colValLength[i]);
+					val[userData->colValLength[i]] = '\0';
+					userData->colValues[i] = InputFunctionCall(
+							&(FORMATTER_GET_CONVERSION_FUNCS(fcinfo)[i]), val,
+							FORMATTER_GET_TYPIOPARAMS(fcinfo)[i],
+							tupdesc->attrs[i]->atttypmod);
+				}
+			}
+			else
+			{
+				char *cvt = NULL;
+				if (!lastBatchRow)
+				{
+					char *val = (char *) (userData->colRawValues[i]);
+					char oldc = *(val + userData->colValLength[i]);
+					*(val + userData->colValLength[i]) = '\0';
+					FORMATTER_ENCODE_STRING(fcinfo, val, userData->colValLength[i],
+							cvt, true); /* is import */
+					Assert(cvt != NULL);
+					userData->colValues[i] = InputFunctionCall(
+							&(FORMATTER_GET_CONVERSION_FUNCS(fcinfo)[i]), cvt,
+							FORMATTER_GET_TYPIOPARAMS(fcinfo)[i],
+							tupdesc->attrs[i]->atttypmod);
+					*(val + userData->colValLength[i]) = oldc;
+				}
+				else
+				{
+					char *val = (char *) palloc(userData->colValLength[i] + 1);
+					memcpy(val, userData->colRawValues[i],
+							userData->colValLength[i]);
+					val[userData->colValLength[i]] = '\0';
+					FORMATTER_ENCODE_STRING(fcinfo, val, userData->colValLength[i],
+							cvt, true); /* is import */
+					Assert(cvt != NULL);
+					userData->colValues[i] = InputFunctionCall(
+							&(FORMATTER_GET_CONVERSION_FUNCS(fcinfo)[i]), cvt,
+							FORMATTER_GET_TYPIOPARAMS(fcinfo)[i],
+							tupdesc->attrs[i]->atttypmod);
+				}
+			}
+		}
+		MemoryContextSwitchTo(oldcontext);
+
+		TextFormatCompleteNextTextFormatC(userData->fmt);
+		tuple = heap_form_tuple(tupdesc, userData->colValues,
+				userData->colIsNulls);
+		FORMATTER_SET_TUPLE(fcinfo, tuple);
+		FORMATTER_RETURN_TUPLE(tuple);
+	}
+	else
+	{
+		externalFmtType = '\0';
+		TextFormatCompleteNextTextFormatC(userData->fmt);
+		/* If there is no error caught, it should be an end of reading split */
+		TextFormatCatchedError *err = TextFormatGetErrorTextFormatC(
+				userData->fmt);
+		if (err->errCode == ERRCODE_SUCCESSFUL_COMPLETION)
+		{
+			TextFormatEndTextFormatC(userData->fmt);
+			err = TextFormatGetErrorTextFormatC(userData->fmt);
+			if (err->errCode != ERRCODE_SUCCESSFUL_COMPLETION)
+			{
+				elog(ERROR, "%s: failed to get next tuple. %s (%d)",
+				externalFmtNameIn,
+				err->errMessage, err->errCode);
+			}
+			TextFormatFreeTextFormatC(&(userData->fmt));
+			pfree(userData->colIsNulls);
+			pfree(userData->colRawValues);
+			pfree(userData->colValues);
+			pfree(userData->colToReads);
+			pfree(userData->colValLength);
+			if (userData->splits != NULL)
+			{
+				for (int i = 0; i < userData->nSplits; ++i)
+				{
+					pfree(userData->splits[i].fileName);
+				}
+				pfree(userData->splits);
+			}
+			for (int i = 0; i < userData->numberOfColumns; ++i)
+			{
+				pfree(userData->colNames[i]);
+			}
+			pfree(userData->colNames);
+			pfree(userData);
+			FORMATTER_RETURN_NOTIFICATION(fcinfo, FMT_DONE);
+		}
+		else
+		{
+			elog(ERROR, "%s: failed to get next tuple. %s (%d)",
+			externalFmtNameIn,
+			err->errMessage, err->errCode);
+		}
+	}
+	PG_RETURN_VOID() ;
+}
+
+/*
+ * extfmtcsv_in. each time this function is called, it builds one tuple from
+ * the input data buffer.
+ */
+Datum extfmtcsv_in(PG_FUNCTION_ARGS)
+{
+	if (externalFmtType == '\0')
+	{
+		externalFmtType = TextFormatTypeCSV;
+		strcpy(externalFmtNameIn, "csv_in");
+		strcpy(externalFmtNameOut, "csv_out");
+	}
+	return extfmtcommon_in(fcinfo);
+}
+
+Datum extfmttext_in(PG_FUNCTION_ARGS)
+{
+	if (externalFmtType == '\0')
+	{
+		externalFmtType = TextFormatTypeTXT;
+		strcpy(externalFmtNameIn, "text_in");
+		strcpy(externalFmtNameOut, "text_out");
+	}
+	return extfmtcommon_in(fcinfo);
+}
+
+Datum extfmtcommon_out(PG_FUNCTION_ARGS)
+{
+	static char DUMMY[1] = "";
+	TupleDesc tupdesc = NULL;
+	HeapTupleData tuple;
+
+	/* Must be called via the external table format manager */
+	if (!CALLED_AS_FORMATTER(fcinfo))
+		ereport(ERROR,
+				(errcode(ERRCODE_EXTERNAL_ROUTINE_EXCEPTION), errmsg("%s: not called by format manager", externalFmtNameOut)));
+
+	/* Check if this is the first time calling the formatter */
+	if (FORMATTER_GET_MASK(fcinfo) == FMT_UNSET)
+	{
+		FORMATTER_GET_MASK(fcinfo) = FMT_SET;
+		externalFmtType = '\0';
+		PG_RETURN_VOID() ;
+	}
+
+	/* Get tuple desc */
+	tupdesc = FORMATTER_GET_TUPDESC(fcinfo);
+
+	/* Get our internal description of the formatter */
+
+	FmtUserData *userData = FORMATTER_GET_USER_CTX(fcinfo);
+	if (userData == NULL)
+	{
+		userData = palloc0(sizeof(FmtUserData));
+		FORMATTER_SET_USER_CTX(fcinfo, userData);
+		userData->numberOfColumns = tupdesc->natts;
+		userData->colValues = palloc0(
+				sizeof(Datum) * userData->numberOfColumns);
+		userData->colIsNulls = palloc0(
+				sizeof(bool) * userData->numberOfColumns);
+		userData->colRawValues = palloc0(
+				sizeof(char *) * userData->numberOfColumns);
+		userData->colNames = palloc0(
+				sizeof(char *) * userData->numberOfColumns);
+		/* Prepare formatter options */
+		char *fmtOptions = NULL;
+		buildFormatterOptionsInJson(fcinfo, &fmtOptions);
+
+		/* Create formatter instance */
+		userData->fmt = TextFormatNewTextFormatC(externalFmtType, fmtOptions);
+
+		if (fmtOptions != NULL)
+		{
+			pfree(fmtOptions);
+		}
+		/* Begin scanning by passing in split and column setting */
+		beginFormatterForWrite(fcinfo);
+	}
+
+	if (FORMATTER_GET_MASK(fcinfo) & FMT_WRITE_END)
+	{
+		externalFmtType = '\0';
+		TextFormatEndInsertTextFormatC(userData->fmt);
+		TextFormatCatchedError *err = TextFormatGetErrorTextFormatC(
+				userData->fmt);
+		if (err->errCode != ERRCODE_SUCCESSFUL_COMPLETION)
+		{
+			elog(ERROR, "%s: failed to insert: %s(%d)",
+			externalFmtNameOut,
+			err->errMessage, err->errCode);
+		}
+
+		TextFormatFreeTextFormatC(&(userData->fmt));
+		pfree(userData->colIsNulls);
+		pfree(userData->colRawValues);
+		pfree(userData->colValues);
+		for (int i = 0; i < userData->numberOfColumns; ++i)
+		{
+			pfree(userData->colNames[i]);
+		}
+		pfree(userData->colNames);
+		pfree(userData);
+		PG_RETURN_VOID() ;
+	}
+
+	/* break the input tuple into fields */
+	HeapTupleHeader rec = PG_GETARG_HEAPTUPLEHEADER(0);
+	tuple.t_len = HeapTupleHeaderGetDatumLength(rec);
+	ItemPointerSetInvalid(&(tuple.t_self));
+	tuple.t_data = rec;
+	heap_deform_tuple(&tuple, tupdesc, userData->colValues,
+			userData->colIsNulls);
+
+	MemoryContext m = FORMATTER_GET_PER_ROW_MEM_CTX(fcinfo);
+	MemoryContext oldcontext = MemoryContextSwitchTo(m);
+	/* convert to string */
+	for (int i = 0; i < userData->numberOfColumns; ++i)
+	{
+		userData->colRawValues[i] = DUMMY;
+		if (userData->colIsNulls[i])
+		{
+			continue;
+		}
+		userData->colRawValues[i] = OutputFunctionCall(
+				&(FORMATTER_GET_CONVERSION_FUNCS(fcinfo)[i]),
+				userData->colValues[i]);
+		if (((FormatterData *) (fcinfo->context))->fmt_needs_transcoding)
+		{
+			char *cvt = NULL;
+			FORMATTER_ENCODE_STRING(fcinfo,
+					(char * )(userData->colRawValues[i]),
+					strlen(userData->colRawValues[i]), cvt, false); /* is export */
+			userData->colRawValues[i] = cvt;
+		}
+	}
+
+	/* pass to formatter to output */
+	TextFormatInsertTextFormatC(userData->fmt, userData->colRawValues,
+			userData->colIsNulls);
+	TextFormatCatchedError *e = TextFormatGetErrorTextFormatC(userData->fmt);
+	if (e->errCode != ERRCODE_SUCCESSFUL_COMPLETION)
+	{
+		elog(ERROR, "%s: failed to insert: %s(%d)",
+		externalFmtNameOut,
+		e->errMessage, e->errCode);
+	}
+
+	MemoryContextSwitchTo(oldcontext);
+	PG_RETURN_VOID() ;
+}
+
+Datum extfmtcsv_out(PG_FUNCTION_ARGS)
+{
+	if (externalFmtType == '\0')
+	{
+		externalFmtType = TextFormatTypeCSV;
+		strcpy(externalFmtNameIn, "csv_in");
+		strcpy(externalFmtNameOut, "csv_out");
+	}
+	return extfmtcommon_out(fcinfo);
+}
+
+Datum extfmttext_out(PG_FUNCTION_ARGS)
+{
+	if (externalFmtType == '\0')
+	{
+		externalFmtType = TextFormatTypeTXT;
+		strcpy(externalFmtNameIn, "text_in");
+		strcpy(externalFmtNameOut, "text_out");
+	}
+	return extfmtcommon_out(fcinfo);
+}
+
+void buildFormatterOptionsInJson(PG_FUNCTION_ARGS, char **jsonStr)
+{
+	struct json_object *optJsonObject = json_object_new_object();
+	/* add those predefined */
+	char *keyStr = NULL;
+	char *valStr = NULL;
+	int nArgs = FORMATTER_GET_NUM_ARGS(fcinfo);
+	for (int i = 1; i <= nArgs; ++i)
+	{
+		keyStr = FORMATTER_GET_NTH_ARG_KEY(fcinfo, i);
+		valStr = FORMATTER_GET_NTH_ARG_VAL(fcinfo, i);
+		/* convert the delimiter and null to external table encoding */
+		valStr = pg_do_encoding_conversion(valStr, strlen(valStr),
+				GetDatabaseEncoding(),
+				((FormatterData*) fcinfo->context)->fmt_external_encoding);
+
+		if (strcmp(keyStr, "reject_limit") == 0)
+		{
+			json_object_object_add(optJsonObject, "reject_limit",
+					json_object_new_int(atoi(valStr)));
+		}
+		else if (strcmp(keyStr, "force_notnull") == 0
+				|| strcmp(keyStr, "force_quote") == 0)
+		{
+			/* ext formatter accepts commar splitted column names instead of dot */
+			int l = strlen(valStr);
+			for (int i = 0; i < l; ++i)
+			{
+				if (valStr[i] == '.')
+					valStr[i] = ',';
+				json_object_object_add(optJsonObject, keyStr,
+						json_object_new_string(valStr));
+			}
+		}
+		else
+		{
+			json_object_object_add(optJsonObject, keyStr,
+					json_object_new_string(valStr));
+		}
+	}
+
+	/* add default settings for this formatter */
+	if (json_object_object_get(optJsonObject, "delimiter") == NULL)
+	{
+		json_object_object_add(optJsonObject, "delimiter",
+				json_object_new_string(
+						(externalFmtType == TextFormatTypeTXT) ? "\t" : ","));
+	}
+
+	if (json_object_object_get(optJsonObject, "null") == NULL)
+	{
+		json_object_object_add(optJsonObject, "null",
+				json_object_new_string(
+						(externalFmtType == TextFormatTypeTXT) ? "\\N" : ""));
+	}
+
+	if (json_object_object_get(optJsonObject, "fill_missing_fields") == NULL)
+	{
+		json_object_object_add(optJsonObject, "fill_missing_fields",
+				json_object_new_boolean(0));
+	}
+	else
+	{
+		json_object_object_del(optJsonObject, "fill_missing_fields");
+		json_object_object_add(optJsonObject, "fill_missing_fields",
+				json_object_new_boolean(1));
+	}
+
+	if (json_object_object_get(optJsonObject, "header") == NULL)
+	{
+		json_object_object_add(optJsonObject, "header",
+				json_object_new_boolean(0));
+	}
+	else
+	{
+		json_object_object_del(optJsonObject, "header");
+		json_object_object_add(optJsonObject, "header",
+				json_object_new_boolean(1));
+	}
+
+	if (json_object_object_get(optJsonObject, "reject_limit") == NULL)
+	{
+		json_object_object_add(optJsonObject, "reject_limit",
+				json_object_new_int(0));
+	}
+
+	if (json_object_object_get(optJsonObject, "err_table") == NULL)
+	{
+		json_object_object_add(optJsonObject, "err_table",
+				json_object_new_string(""));
+	}
+
+	if (json_object_object_get(optJsonObject, "newline") == NULL)
+	{
+		json_object_object_add(optJsonObject, "newline",
+				json_object_new_string("lf"));
+	}
+
+	if (json_object_object_get(optJsonObject, "encoding") == NULL)
+	{
+		const char *encodingStr = pg_encoding_to_char(
+				((FormatterData*) fcinfo->context)->fmt_external_encoding);
+		char lowerCaseEncodingStr[64];
+		strcpy(lowerCaseEncodingStr, encodingStr);
+		for (char *p = lowerCaseEncodingStr; *p != '\0'; ++p)
+		{
+			*p = tolower(*p);
+		}
+
+		json_object_object_add(optJsonObject, "encoding",
+				json_object_new_string(lowerCaseEncodingStr));
+	}
+
+	if (externalFmtType == TextFormatTypeCSV
+			&& json_object_object_get(optJsonObject, "quote") == NULL)
+	{
+		json_object_object_add(optJsonObject, "quote",
+				json_object_new_string("\""));
+	}
+
+	if (json_object_object_get(optJsonObject, "escape") == NULL)
+	{
+		if (externalFmtType == TextFormatTypeCSV)
+		{
+			/* Let escape follow quote's setting */
+			struct json_object *val = json_object_object_get(optJsonObject,
+					"quote");
+			json_object_object_add(optJsonObject, "escape",
+					json_object_new_string(json_object_get_string(val)));
+		}
+		else
+		{
+			json_object_object_add(optJsonObject, "escape",
+					json_object_new_string("\\"));
+		}
+	}
+
+	if (json_object_object_get(optJsonObject, "force_quote") == NULL)
+	{
+		json_object_object_add(optJsonObject, "force_quote",
+				json_object_new_string(""));
+	}
+
+	/* This is for csv formatter only */
+	if (externalFmtType == TextFormatTypeCSV
+			&& json_object_object_get(optJsonObject, "force_notnull") == NULL)
+	{
+		json_object_object_add(optJsonObject, "force_notnull",
+				json_object_new_string(""));
+	}
+
+	*jsonStr = NULL;
+	if (optJsonObject != NULL)
+	{
+		const char *str = json_object_to_json_string(optJsonObject);
+		*jsonStr = (char *) palloc0(strlen(str) + 1);
+		strcpy(*jsonStr, str);
+		json_object_put(optJsonObject);
+		// jsonStr is already in table encoding, elog needs db encoding
+		// elog(LOG, "formatter options are %s", *jsonStr);
+	}
+}
+
+void beginFormatterForRead(PG_FUNCTION_ARGS)
+{
+	FmtUserData *userData = FORMATTER_GET_USER_CTX(fcinfo);
+	FormatterData *fmtData = (FormatterData *) (fcinfo->context);
+
+	/* parse URL to get server location etc. */
+	Uri *uri = ParseExternalTableUri(fmtData->fmt_url);
+	userData->nSplits = list_length(fmtData->fmt_splits);
+	userData->splits = palloc0(sizeof(TextFormatFileSplit) * userData->nSplits);
+	ListCell *cell = NULL;
+	int i = 0;
+	foreach(cell, fmtData->fmt_splits)
+	{
+		FileSplit origFS = (FileSplit) lfirst(cell);
+		userData->splits[i].len = origFS->lengths;
+		userData->splits[i].start = origFS->offsets;
+
+		/* build file path containing host address */
+		int fileNameLen = 7 +   // "hdfs://"
+		                  (uri->hostname == NULL ? 0 : strlen(uri->hostname)) +
+		                  1 +   // ':'
+		                  5 +   // "65535"
+		                  (origFS->ext_file_uri_string == NULL ? 0 : strlen(origFS->ext_file_uri_string)) +
+		                  1;    // '\0'
+
+		userData->splits[i].fileName = palloc(fileNameLen * sizeof(char));
+		sprintf(userData->splits[i].fileName, "hdfs://%s:%d%s",
+		        uri->hostname == NULL ? "" : uri->hostname, uri->port,
+		        origFS->ext_file_uri_string == NULL ? "" : origFS->ext_file_uri_string);
+		i++;
+	}
+
+	if (enable_secure_filesystem && Gp_role == GP_ROLE_EXECUTE)
+	{
+		char *token = find_filesystem_credential_with_uri(fmtData->fmt_url);
+		SetToken(fmtData->fmt_url, token);
+	}
+
+	FreeExternalTableUri(uri);
+
+	userData->colToReads = palloc0(sizeof(bool) * userData->numberOfColumns);
+	for (int i = 0; i < userData->numberOfColumns; ++i)
+	{
+		userData->colToReads[i] = true;
+		/* 64 is the name type length */
+		userData->colNames[i] = palloc(sizeof(char) * 64);
+		strcpy(userData->colNames[i],
+				fmtData->fmt_relation->rd_att->attrs[i]->attname.data);
+	}
+
+	TextFormatBeginTextFormatC(userData->fmt, userData->splits,
+			userData->nSplits, userData->colToReads, userData->colNames,
+			userData->numberOfColumns);
+	TextFormatCatchedError *e = TextFormatGetErrorTextFormatC(userData->fmt);
+	if (e->errCode != ERRCODE_SUCCESSFUL_COMPLETION)
+	{
+		elog(ERROR, "%s: failed to begin scan: %s(%d)",
+		externalFmtNameIn,
+		e->errMessage, e->errCode);
+	}
+}
+
+void beginFormatterForWrite(PG_FUNCTION_ARGS)
+{
+	FmtUserData *userData = FORMATTER_GET_USER_CTX(fcinfo);
+	FormatterData *fmtData = (FormatterData *) (fcinfo->context);
+
+	/* prepare column names */
+	for (int i = 0; i < userData->numberOfColumns; ++i)
+	{
+		/* 64 is the name type length */
+		userData->colNames[i] = palloc(sizeof(char) * 64);
+		strcpy(userData->colNames[i],
+				fmtData->fmt_relation->rd_att->attrs[i]->attname.data);
+	}
+
+	if (enable_secure_filesystem && Gp_role == GP_ROLE_EXECUTE)
+	{
+		char *token = find_filesystem_credential_with_uri(fmtData->fmt_url);
+		SetToken(fmtData->fmt_url, token);
+	}
+
+	TextFormatBeginInsertTextFormatC(userData->fmt, fmtData->fmt_url,
+			userData->colNames, userData->numberOfColumns);
+
+	TextFormatCatchedError *e = TextFormatGetErrorTextFormatC(userData->fmt);
+	if (e->errCode != ERRCODE_SUCCESSFUL_COMPLETION)
+	{
+		elog(ERROR, "%s: failed to begin insert: %s(%d)",
+		externalFmtNameOut,
+		e->errMessage, e->errCode);
+	}
+}
diff --git a/contrib/exthdfs/Makefile b/contrib/exthdfs/Makefile
index 355b74e..cffb139 100644
--- a/contrib/exthdfs/Makefile
+++ b/contrib/exthdfs/Makefile
@@ -1,36 +1,13 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
 MODULE_big = exthdfs
 OBJS       = exthdfs.o
 
-top_builddir = ../..
-include $(top_builddir)/src/Makefile.global
-
-PG_CPPFLAGS = -I$(libpq_srcdir)
-PG_LIBS = $(libpq_pgport)
-
-override CFLAGS += -lhdfs3 -lstorage -L$(prefix)/lib -I$(prefix)/include
-
 ifdef USE_PGXS
 PGXS := $(shell pg_config --pgxs)
 include $(PGXS)
 else
 subdir = contrib/exthdfs
+top_builddir = ../..
+include $(top_builddir)/src/Makefile.global
 include $(top_srcdir)/contrib/contrib-global.mk
+
 endif
diff --git a/contrib/exthdfs/common.h b/contrib/exthdfs/common.h
index 0908649..50e811d 100644
--- a/contrib/exthdfs/common.h
+++ b/contrib/exthdfs/common.h
@@ -1,22 +1,3 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
 #ifndef _EXTHDFS_COMMON_H_
 #define _EXTHDFS_COMMON_H_
 
@@ -24,6 +5,7 @@
 #include "fmgr.h"
 #include "funcapi.h"
 #include "access/extprotocol.h"
+#include "access/fileam.h"
 #include "catalog/pg_proc.h"
 #include "catalog/pg_exttable.h"
 #include "utils/array.h"
@@ -34,4 +16,3 @@
 #include <fcntl.h>
 
 #endif  // _EXTHDFS_COMMON_H_
-
diff --git a/contrib/exthdfs/exthdfs.c b/contrib/exthdfs/exthdfs.c
index 09f60f2..4a531a2 100644
--- a/contrib/exthdfs/exthdfs.c
+++ b/contrib/exthdfs/exthdfs.c
@@ -1,33 +1,15 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-#include "postgres.h"
-
 #include "common.h"
 #include "access/extprotocol.h"
 #include "cdb/cdbdatalocality.h"
+#include "utils/uri.h"
+
+#include "storage/cwrapper/hdfs-file-system-c.h"
 #include "cdb/cdbfilesystemcredential.h"
 #include "cdb/cdbvars.h"
-#include "storage/cwrapper/hdfs-file-system-c.h"
-#include "utils/uri.h"
+#include "postgres.h"
 
-PG_MODULE_MAGIC;
+PG_MODULE_MAGIC
+;
 
 PG_FUNCTION_INFO_V1(hdfsprotocol_blocklocation);
 PG_FUNCTION_INFO_V1(hdfsprotocol_validate);
@@ -35,188 +17,238 @@ PG_FUNCTION_INFO_V1(hdfsprotocol_validate);
 Datum hdfsprotocol_blocklocation(PG_FUNCTION_ARGS);
 Datum hdfsprotocol_validate(PG_FUNCTION_ARGS);
 
+static char * getIpBySocket(const char * socket);
+#define EXPECTED_MAX_HDFS_CONNECTIONS 10
 Datum hdfsprotocol_blocklocation(PG_FUNCTION_ARGS)
 {
-	ExtProtocolBlockLocationData *bldata =
-		palloc0(sizeof(ExtProtocolBlockLocationData));
+	/* Build the result instance */
+	ExtProtocolBlockLocationData *bldata = palloc0(
+			sizeof(ExtProtocolBlockLocationData));
 	if (bldata == NULL)
 	{
 		elog(ERROR, "hdfsprotocol_blocklocation : "
-                    "cannot allocate due to no memory");
+		"cannot allocate due to no memory");
 	}
 	bldata->type = T_ExtProtocolBlockLocationData;
 	fcinfo->resultinfo = bldata;
 
-	ExtProtocolValidatorData *pvalidator_data = (ExtProtocolValidatorData *)
-												(fcinfo->context);
-
-
-	 // Parse URI of the first location, we expect all locations uses the same
-	 // name node server. This is checked in validation function.
+	ExtProtocolValidatorData *pvalidator_data =
+			(ExtProtocolValidatorData *) (fcinfo->context);
 
-	char *first_uri_str = (char *)strVal(lfirst(list_head(pvalidator_data->url_list)));
+	/*
+	 * Parse URI of the first location, we expect all locations uses the same
+	 * name node server. This is checked in validation function.
+	 */
+	char *first_uri_str = (char *) strVal(
+			lfirst(list_head(pvalidator_data->url_list)));
 	Uri *uri = ParseExternalTableUri(first_uri_str);
 
 	elog(DEBUG3, "hdfsprotocol_blocklocation : "
-				 "extracted HDFS name node address %s:%d",
-				 uri->hostname, uri->port);
+	"extracted HDFS name node address %s:%d",
+	uri->hostname, uri->port);
+
+	if (enable_secure_filesystem && Gp_role != GP_ROLE_EXECUTE)
+	{
+		char *ccname = NULL;
+		/*
+		 * refresh kerberos ticket
+		 */
+		if (!login())
+		{
+			errno = EACCES;
+		}
+		ccname = pstrdup(krb5_ccname);
+		SetCcname(ccname);
+		if (ccname)
+		  pfree(ccname);
+	}
 
-	// Create file system instance
+	/* Create file system instance */
 	FscHdfsFileSystemC *fs = FscHdfsNewFileSystem(uri->hostname, uri->port);
 	if (fs == NULL)
 	{
 		elog(ERROR, "hdfsprotocol_blocklocation : "
-					"failed to create HDFS instance to connect to %s:%d",
-					uri->hostname, uri->port);
+		"failed to create HDFS instance to connect to %s:%d",
+		uri->hostname, uri->port);
 	}
 
-	// Clean up uri instance as we don't need it any longer
-	FreeExternalTableUri(uri);
+	/* Clean up uri instance as we don't need it any longer */
+	pfree(uri);
 
-	// Check all locations to get files to fetch location.
+	/* Check all locations to get files to fetch location. */
 	ListCell *lc = NULL;
 	foreach(lc, pvalidator_data->url_list)
 	{
-		// Parse current location URI.
-		char *url = (char *)strVal(lfirst(lc));
+		/* Parse current location URI. */
+		char *url = (char *) strVal(lfirst(lc));
 		Uri *uri = ParseExternalTableUri(url);
 		if (uri == NULL)
 		{
 			elog(ERROR, "hdfsprotocol_blocklocation : "
-						"invalid URI encountered %s", url);
+			"invalid URI encountered %s", url);
 		}
 
-		 //
-		 // NOTICE: We temporarily support only directories as locations. We plan
-		 //        to extend the logic to specifying single file as one location
-		 //         very soon.
-
+		/*
+		 * NOTICE: We temporarily support only directories as locations. We plan
+		 *         to extend the logic to specifying single file as one location
+		 *         very soon.
+		 */
 
-		// get files contained in the path.
+		/* get files contained in the path. */
 		FscHdfsFileInfoArrayC *fiarray = FscHdfsDirPath(fs, uri->path);
 		if (FscHdfsHasErrorRaised(fs))
 		{
-		  Assert(fiarray == NULL);
-		  CatchedError *ce = FscHdfsGetFileSystemError(fs);
-		  elog(ERROR, "hdfsprotocol_blocklocation : "
-		       "failed to get files of path %s. %s (%d)",
-		       uri->path,
-		       ce->errMessage, ce->errCode);
+			Assert(fiarray == NULL);
+			CatchedError *ce = FscHdfsGetFileSystemError(fs);
+			elog(ERROR, "hdfsprotocol_blocklocation : "
+			"failed to get files of path %s. %s (%d)",
+			uri->path,
+			ce->errMessage, ce->errCode);
 		}
 
-		// Call block location api to get data location for each file
-		for (int i = 0 ; true ; i++)
+		/* Call block location api to get data location for each file */
+		for (int i = 0; true; i++)
 		{
-		  FscHdfsFileInfoC *fi = FscHdfsGetFileInfoFromArray(fiarray, i);
+			FscHdfsFileInfoC *fi = FscHdfsGetFileInfoFromArray(fiarray, i);
 
-			// break condition of this for loop
-			if (fi == NULL) {break;}
+			/* break condition of this for loop */
+			if (fi == NULL)
+			{
+				break;
+			}
 
-			// Build file name full path.
+			/* Build file name full path. */
 			const char *fname = FscHdfsGetFileInfoName(fi);
 			char *fullpath = palloc0(strlen(uri->path) + /* path  */
-			                         1 + /* slash */
-			                         strlen(fname) + /* name  */
-			                         1); /* \0    */
+			1 + /* slash */
+			strlen(fname) + /* name  */
+			1); /* \0    */
 			sprintf(fullpath, "%s/%s", uri->path, fname);
 
 			elog(DEBUG3, "hdfsprotocol_blocklocation : "
-						 "built full path file %s", fullpath);
+			"built full path file %s", fullpath);
 
-			// Get file full length.
+			/* Get file full length. */
 			int64_t len = FscHdfsGetFileInfoLength(fi);
 
 			elog(DEBUG3, "hdfsprotocol_blocklocation : "
-					     "got file %s length " INT64_FORMAT,
-					     fullpath, len);
+			"got file %s length " INT64_FORMAT,
+			fullpath, len);
 
-			if (len == 0) {
+			if (len == 0)
+			{
 				pfree(fullpath);
 				continue;
 			}
 
-			// Get block location data for this file
+			/* Get block location data for this file */
 			FscHdfsFileBlockLocationArrayC *bla =
-			    FscHdfsGetPathFileBlockLocation(fs, fullpath, 0, len);
+					FscHdfsGetPathFileBlockLocation(fs, fullpath, 0, len);
 			if (FscHdfsHasErrorRaised(fs))
 			{
-			  Assert(bla == NULL);
-			  CatchedError *ce = FscHdfsGetFileSystemError(fs);
-			  elog(ERROR, "hdfsprotocol_blocklocation : "
-			       "failed to get block location of path %s. %s (%d)"
-			       "It is reported generally due to HDFS service errors or "
-			       "another session's ongoing writing.",
-			       fullpath,
-			       ce->errMessage,
-			       ce->errCode);
+				Assert(bla == NULL);
+				CatchedError *ce = FscHdfsGetFileSystemError(fs);
+				elog(ERROR, "hdfsprotocol_blocklocation : "
+				"failed to get block location of path %s. "
+				"It is reported generally due to HDFS service errors or "
+				"another session's ongoing writing.",
+				fullpath);
 			}
 
-			// Add file full path and its block number as result.
+			/* Add file full path and its block number as result. */
 			blocklocation_file *blf = palloc0(sizeof(blocklocation_file));
 			blf->file_uri = pstrdup(fullpath);
 			blf->block_num = FscHdfsGetFileBlockLocationArraySize(bla);
 			blf->locations = palloc0(sizeof(BlockLocation) * blf->block_num);
 
 			elog(DEBUG3, "hdfsprotocol_blocklocation : file %s has %d blocks",
-			     fullpath, blf->block_num);
+			fullpath, blf->block_num);
 
-			// We don't need it any longer
+			/* We don't need it any longer */
 			pfree(fullpath);
 
-			// Add block information as a list.
-			for (int bidx = 0 ; bidx < blf->block_num ; bidx++)
+			/* Add block information as a list. */
+			for (int bidx = 0; bidx < blf->block_num; bidx++)
 			{
-			  FscHdfsFileBlockLocationC *blo =
-			      FscHdfsGetFileBlockLocationFromArray(bla, bidx);
-			  BlockLocation *bl = &(blf->locations[bidx]);
-			  bl->numOfNodes = FscHdfsGetFileBlockLocationNNodes(blo);
-			  bl->hosts = (char **) palloc0(sizeof(char *) * bl->numOfNodes);
-			  bl->names = (char **) palloc0(sizeof(char *) * bl->numOfNodes);
-			  bl->topologyPaths = (char **) palloc0(
-			      sizeof(char *) * bl->numOfNodes);
-			  bl->offset = FscHdfsGetFileBlockLocationOffset(blo);
-			  bl->length = FscHdfsGetFileBlockLocationLength(blo);
-			  bl->corrupt = FscHdfsGetFileBlockLocationCorrupt(blo);
-
-			  for (int nidx = 0; nidx < bl->numOfNodes; nidx++)
-			  {
-			    bl->hosts[nidx] = pstrdup(
-			        FscHdfsGetFileBlockLocationNodeHost(blo, nidx));
-			    bl->names[nidx] = pstrdup(
-			        FscHdfsGetFileBlockLocationNodeName(blo, nidx));
-//          elog (LOG,"the host of bidx %d nidx %d is %s, name is %s.",bidx, nidx, bl->hosts[nidx], bl->names[nidx]);
-			    bl->topologyPaths[nidx] = pstrdup(
-			        FscHdfsGetFileBlockLocationNodeTopoPath(blo, nidx));
-			  }
+				FscHdfsFileBlockLocationC *blo =
+						FscHdfsGetFileBlockLocationFromArray(bla, bidx);
+				BlockLocation *bl = &(blf->locations[bidx]);
+				bl->numOfNodes = FscHdfsGetFileBlockLocationNNodes(blo);
+				bl->rangeId = -1;
+				bl->replicaGroupId = -1;
+				bl->hosts = (char **) palloc0(sizeof(char *) * bl->numOfNodes);
+				bl->names = (char **) palloc0(sizeof(char *) * bl->numOfNodes);
+				bl->topologyPaths = (char **) palloc0(
+						sizeof(char *) * bl->numOfNodes);
+				bl->offset = FscHdfsGetFileBlockLocationOffset(blo);
+				bl->length = FscHdfsGetFileBlockLocationLength(blo);
+				bl->corrupt = FscHdfsGetFileBlockLocationCorrupt(blo);
+
+				for (int nidx = 0; nidx < bl->numOfNodes; nidx++)
+				{
+					bl->hosts[nidx] = pstrdup(
+							FscHdfsGetFileBlockLocationNodeHost(blo, nidx));
+					bl->names[nidx] = pstrdup(
+							FscHdfsGetFileBlockLocationNodeName(blo, nidx));
+//					elog (LOG,"the host of bidx %d nidx %d is %s, name is %s.",bidx, nidx, bl->hosts[nidx], bl->names[nidx]);
+					bl->topologyPaths[nidx] = pstrdup(
+							FscHdfsGetFileBlockLocationNodeTopoPath(blo, nidx));
+				}
 			}
 
-			bldata->files = lappend(bldata->files, (void *)(blf));
+			bldata->files = lappend(bldata->files, (void *) (blf));
 
-			// Clean up block location instances created by the lib.
+			/* Clean up block location instances created by the lib. */
 			FscHdfsFreeFileBlockLocationArrayC(&bla);
 		}
 
 		/* Clean up URI instance in loop as we don't need it any longer */
-		FreeExternalTableUri(uri);
+		pfree(uri);
 
 		/* Clean up file info array created by the lib for this location. */
 		FscHdfsFreeFileInfoArrayC(&fiarray);
 	}
 
-	// destroy fs instance
+	/* destroy fs instance */
 	FscHdfsFreeFileSystemC(&fs);
-
-	PG_RETURN_VOID();
-
+	PG_RETURN_VOID() ;
 }
 
 Datum hdfsprotocol_validate(PG_FUNCTION_ARGS)
 {
+  if(nodeTag(fcinfo->context) == T_ExtProtocolRenameData) {
+    if (enable_secure_filesystem && Gp_role != GP_ROLE_EXECUTE)
+    {
+      char *ccname = NULL;
+      /*
+       * refresh kerberos ticket
+       */
+      if (!login())
+      {
+        errno = EACCES;
+      }
+      ccname = pstrdup(krb5_ccname);
+      SetCcname(ccname);
+      if (ccname)
+        pfree(ccname);
+    }
+    ExtProtocolRenameData *renameData= (ExtProtocolRenameData *)fcinfo->context;
+    FscHdfsFileSystemC *fs = FscHdfsNewFileSystem(renameData->olduri->hostname,
+                                                  renameData->olduri->port);
+    if (fs == NULL) {
+        elog(ERROR, "RenamePathInHDFS : "
+        "failed to create HDFS instance to connect to %s:%d",
+        renameData->olduri->hostname, renameData->olduri->port);
+      }
+    FscHdfsRenamePath(fs, renameData->olduri->path, renameData->newduri->path);
+    FscHdfsFreeFileSystemC(&fs);
+    PG_RETURN_VOID() ;
+  }
 	elog(DEBUG3, "hdfsprotocol_validate() begin");
 
 	/* Check which action should perform. */
 	ExtProtocolValidatorData *pvalidator_data =
-       (ExtProtocolValidatorData *)(fcinfo->context);
+			(ExtProtocolValidatorData *) (fcinfo->context);
 
 	if (pvalidator_data->forceCreateDir)
 		Assert(pvalidator_data->url_list && pvalidator_data->url_list->length == 1);
@@ -227,20 +259,22 @@ Datum hdfsprotocol_validate(PG_FUNCTION_ARGS)
 		if (list_length(pvalidator_data->url_list) != 1)
 		{
 			ereport(ERROR,
-					(errcode(ERRCODE_SYNTAX_ERROR),
-					 errmsg("hdfsprotocol_validate : "
-							"only one location url is supported for writable external hdfs")));
+					(errcode(ERRCODE_SYNTAX_ERROR), errmsg("hdfsprotocol_validate : " "only one location url is supported for writable external hdfs")));
 		}
 	}
+	char *first_uri_str = (char *) strVal(
+			lfirst(list_head(pvalidator_data->url_list)));
+	Uri *uri = ParseExternalTableUri(first_uri_str);
 
 	/* Go through first round to get formatter type */
 	bool isCsv = false;
 	bool isText = false;
 	bool isOrc = false;
 	ListCell *optcell = NULL;
+
 	foreach(optcell, pvalidator_data->format_opts)
 	{
-		DefElem *de = (DefElem *)lfirst(optcell);
+		DefElem *de = (DefElem *) lfirst(optcell);
 		if (strcasecmp(de->defname, "formatter") == 0)
 		{
 			char *val = strVal(de->arg);
@@ -258,20 +292,16 @@ Datum hdfsprotocol_validate(PG_FUNCTION_ARGS)
 			}
 		}
 	}
-
 	if (!isCsv && !isText && !isOrc)
 	{
 		ereport(ERROR,
-				(errcode(ERRCODE_SYNTAX_ERROR),
-				 errmsg("hdfsprotocol_validate : "
-						"only 'csv', 'text' and 'orc' formatter is supported for external hdfs")));
-	}
-	Assert(isCsv || isText || isOrc);
+				(errcode(ERRCODE_SYNTAX_ERROR), errmsg("hdfsprotocol_validate : " "only 'csv', 'text' and 'orc' formatter is supported for external hdfs")));
+	}Assert(isCsv || isText || isOrc);
 
 	/* Validate formatter options */
 	foreach(optcell, pvalidator_data->format_opts)
 	{
-		DefElem *de = (DefElem *)lfirst(optcell);
+		DefElem *de = (DefElem *) lfirst(optcell);
 		if (strcasecmp(de->defname, "delimiter") == 0)
 		{
 			char *val = strVal(de->arg);
@@ -279,38 +309,30 @@ Datum hdfsprotocol_validate(PG_FUNCTION_ARGS)
 			if (strcasecmp(val, "off") == 0)
 			{
 				ereport(ERROR,
-						(errcode(ERRCODE_SYNTAX_ERROR),
-						 errmsg("hdfsprotocol_validate : "
-								"'off' value of 'delimiter' option is not supported")));
+						(errcode(ERRCODE_SYNTAX_ERROR), errmsg("hdfsprotocol_validate : " "'off' value of 'delimiter' option is not supported")));
 			}
 			/* Validation 2. Can specify multibytes characters */
 			if (strlen(val) < 1)
 			{
 				ereport(ERROR,
-						(errcode(ERRCODE_SYNTAX_ERROR),
-								 errmsg("hdfsprotocol_validate : "
-										"'delimiter' option accepts multibytes characters")));
+						(errcode(ERRCODE_SYNTAX_ERROR), errmsg("hdfsprotocol_validate : " "'delimiter' option accepts multibytes characters")));
 			}
 		}
 
 		if (strcasecmp(de->defname, "escape") == 0)
 		{
 			char *val = strVal(de->arg);
-			/* Validation 3. User can not specify 'OFF' in delimiter */
-			if (strcasecmp(val, "off") == 0)
-			{
-				ereport(ERROR,
-						(errcode(ERRCODE_SYNTAX_ERROR),
-						 errmsg("hdfsprotocol_validate : "
-								"'off' value of 'escape' option is not supported")));
-			}
+			/* Validation 3. User can not specify 'OFF' in escape except for TEXT format */
+		  if (strcasecmp(val, "off") == 0 && !isText)
+      {
+        ereport(ERROR,
+            (errcode(ERRCODE_SYNTAX_ERROR), errmsg("hdfsprotocol_validate : " "'off' value of 'escape' option is not supported")));
+      }
 			/* Validation 4. Can only specify one character */
-			if (strlen(val) != 1)
+			if (strlen(val) != 1 && strcasecmp(val, "off") != 0)
 			{
 				ereport(ERROR,
-						(errcode(ERRCODE_SYNTAX_ERROR),
-								 errmsg("hdfsprotocol_validate : "
-										"'escape' option accepts single character")));
+						(errcode(ERRCODE_SYNTAX_ERROR), errmsg("hdfsprotocol_validate : " "'escape' option accepts single character")));
 			}
 		}
 
@@ -318,15 +340,11 @@ Datum hdfsprotocol_validate(PG_FUNCTION_ARGS)
 		{
 			char *val = strVal(de->arg);
 			/* Validation 5. only accept 'lf', 'cr', 'crlf' */
-			if (strcasecmp(val, "lf") != 0 &&
-				strcasecmp(val, "cr") != 0 &&
-				strcasecmp(val, "crlf") != 0)
+			if (strcasecmp(val, "lf") != 0 && strcasecmp(val, "cr") != 0
+					&& strcasecmp(val, "crlf") != 0)
 			{
 				ereport(ERROR,
-						(errcode(ERRCODE_SYNTAX_ERROR),
-						 errmsg("hdfsprotocol_validate : "
-								"the value of 'newline' option can only be "
-								"'lf', 'cr' or 'crlf'")));
+						(errcode(ERRCODE_SYNTAX_ERROR), errmsg("hdfsprotocol_validate : " "the value of 'newline' option can only be " "'lf', 'cr' or 'crlf'")));
 			}
 		}
 
@@ -336,9 +354,7 @@ Datum hdfsprotocol_validate(PG_FUNCTION_ARGS)
 			if (!isCsv)
 			{
 				ereport(ERROR,
-						(errcode(ERRCODE_SYNTAX_ERROR),
-								 errmsg("hdfsprotocol_validate : "
-										"'quote' option is only available in 'csv' formatter")));
+						(errcode(ERRCODE_SYNTAX_ERROR), errmsg("hdfsprotocol_validate : " "'quote' option is only available in 'csv' formatter")));
 			}
 
 			char *val = strVal(de->arg);
@@ -346,9 +362,7 @@ Datum hdfsprotocol_validate(PG_FUNCTION_ARGS)
 			if (strlen(val) != 1)
 			{
 				ereport(ERROR,
-						(errcode(ERRCODE_SYNTAX_ERROR),
-								 errmsg("hdfsprotocol_validate : "
-										"'quote' option accepts single character")));
+						(errcode(ERRCODE_SYNTAX_ERROR), errmsg("hdfsprotocol_validate : " "'quote' option accepts single character")));
 			}
 		}
 
@@ -358,9 +372,7 @@ Datum hdfsprotocol_validate(PG_FUNCTION_ARGS)
 			if (!isCsv)
 			{
 				ereport(ERROR,
-						(errcode(ERRCODE_SYNTAX_ERROR),
-								 errmsg("hdfsprotocol_validate : "
-										"'force_notnull' option is only available in 'csv' formatter")));
+						(errcode(ERRCODE_SYNTAX_ERROR), errmsg("hdfsprotocol_validate : " "'force_notnull' option is only available in 'csv' formatter")));
 			}
 		}
 
@@ -370,9 +382,7 @@ Datum hdfsprotocol_validate(PG_FUNCTION_ARGS)
 			if (!isCsv)
 			{
 				ereport(ERROR,
-						(errcode(ERRCODE_SYNTAX_ERROR),
-								 errmsg("hdfsprotocol_validate : "
-										"'force_quote' option is only available in 'csv' formatter")));
+						(errcode(ERRCODE_SYNTAX_ERROR), errmsg("hdfsprotocol_validate : " "'force_quote' option is only available in 'csv' formatter")));
 			}
 		}
 	}
@@ -388,20 +398,25 @@ Datum hdfsprotocol_validate(PG_FUNCTION_ARGS)
 	foreach(lc, pvalidator_data->url_list)
 	{
 		/* Parse current location URI. */
-		char *url = (char *)strVal(lfirst(lc));
+		char *url = (char *) strVal(lfirst(lc));
 		Uri *uri = ParseExternalTableUri(url);
 		if (uri == NULL)
 		{
 			elog(ERROR, "hdfsprotocol_validate : "
-						"invalid URI encountered %s", url);
+			"invalid URI encountered %s", url);
 		}
 
 		if (uri->protocol != URI_HDFS)
 		{
 			elog(ERROR, "hdfsprotocol_validate : "
-						"invalid URI protocol encountered in %s, "
-						"hdfs:// protocol is required",
-						url);
+			"invalid URI protocol encountered in %s, "
+			"hdfs:// protocol is required",
+			url);
+		}
+
+		if(uri->path[1] == '/'){
+		  elog(ERROR, "hdfsprotocol_validate : "
+		      "invalid files path in %s", uri->path);
 		}
 
 		if (nnaddr == NULL)
@@ -414,23 +429,39 @@ Datum hdfsprotocol_validate(PG_FUNCTION_ARGS)
 			if (strcmp(nnaddr, uri->hostname) != 0)
 			{
 				elog(ERROR, "hdfsprotocol_validate : "
-							"different name server addresses are detected, "
-							"both %s and %s are found",
-							nnaddr, uri->hostname);
+				"different name server addresses are detected, "
+				"both %s and %s are found",
+				nnaddr, uri->hostname);
 			}
 			if (nnport != uri->port)
 			{
 				elog(ERROR, "hdfsprotocol_validate : "
-							"different name server ports are detected, "
-							"both %d and %d are found",
-							nnport, uri->port);
+				"different name server ports are detected, "
+				"both %d and %d are found",
+				nnport, uri->port);
 			}
 		}
 
 		/* SHOULD ADD LOGIC HERE TO CREATE UNEXISTING PATH */
 		if (pvalidator_data->forceCreateDir)
 		{
-		  elog(LOG, "hdfs_validator() forced creating dir");
+
+			elog(LOG, "hdfs_validator() forced creating dir");
+			if (enable_secure_filesystem && Gp_role != GP_ROLE_EXECUTE)
+			{
+				char *ccname = NULL;
+				/*
+				 * refresh kerberos ticket
+				 */
+				if (!login())
+				{
+					errno = EACCES;
+				}
+				ccname = pstrdup(krb5_ccname);
+				SetCcname(ccname);
+				if (ccname)
+				  pfree(ccname);
+			}
 
 			/* Create file system instance */
 			FscHdfsFileSystemC *fs = FscHdfsNewFileSystem(uri->hostname,
@@ -468,7 +499,7 @@ Datum hdfsprotocol_validate(PG_FUNCTION_ARGS)
 		}
 
 		/* Clean up temporarily created instances */
-		FreeExternalTableUri(uri);
+		pfree(uri);
 		if (nnaddr != NULL)
 		{
 			pfree(nnaddr);
@@ -482,6 +513,17 @@ Datum hdfsprotocol_validate(PG_FUNCTION_ARGS)
 	 * be moved to call formatter specific validation UDFs.
 	 **************************************************************************/
 
-	PG_RETURN_VOID();
+	PG_RETURN_VOID() ;
+}
+
+static char * getIpBySocket(const char * socket) {
+  if (socket == NULL) {
+    return NULL;
+  }
+  int len = 0;
+  while (socket[len] != ':' && socket[len] != '\0') {
+    len++;
+  }
+  return pnstrdup(socket,len);
 }
 
diff --git a/contrib/exthive/Makefile b/contrib/exthive/Makefile
new file mode 100644
index 0000000..012d3e3
--- /dev/null
+++ b/contrib/exthive/Makefile
@@ -0,0 +1,13 @@
+MODULE_big = exthive
+OBJS       = exthive.o
+
+ifdef USE_PGXS
+PGXS := $(shell pg_config --pgxs)
+include $(PGXS)
+else
+subdir = contrib/exthive
+top_builddir = ../..
+include $(top_builddir)/src/Makefile.global
+include $(top_srcdir)/contrib/contrib-global.mk
+
+endif
diff --git a/contrib/exthive/common.h b/contrib/exthive/common.h
new file mode 100644
index 0000000..6af866f
--- /dev/null
+++ b/contrib/exthive/common.h
@@ -0,0 +1,18 @@
+#ifndef _EXTHIVE_COMMON_H_
+#define _EXTHIVE_COMMON_H_
+
+#include "postgres.h"
+#include "fmgr.h"
+#include "funcapi.h"
+#include "access/extprotocol.h"
+#include "access/fileam.h"
+#include "catalog/pg_proc.h"
+#include "catalog/pg_exttable.h"
+#include "utils/array.h"
+#include "utils/builtins.h"
+#include "utils/memutils.h"
+#include "miscadmin.h"
+
+#include <fcntl.h>
+
+#endif  // _EXTHIVE_COMMON_H_
diff --git a/contrib/exthive/exthive.c b/contrib/exthive/exthive.c
new file mode 100644
index 0000000..8b75a84
--- /dev/null
+++ b/contrib/exthive/exthive.c
@@ -0,0 +1,493 @@
+#include "c.h"
+#include "cdb/cdbdatalocality.h"
+#include "cdb/cdbfilesystemcredential.h"
+#include "cdb/cdbvars.h"
+#include "common.h"
+#include "postgres.h"
+
+#include "fmgr.h"
+#include "funcapi.h"
+#include "storage/cwrapper/hdfs-file-system-c.h"
+#include "storage/cwrapper/hive-file-system-c.h"
+#include "utils/uri.h"
+
+PG_FUNCTION_INFO_V1(hiveprotocol_validate);
+PG_FUNCTION_INFO_V1(hiveprotocol_blocklocation);
+
+PG_MODULE_MAGIC;
+
+Datum hiveprotocol_validate(PG_FUNCTION_ARGS);
+Datum hiveprotocol_blocklocation(PG_FUNCTION_ARGS);
+
+Datum hiveprotocol_blocklocation(PG_FUNCTION_ARGS) {
+  /* Build the result instance */
+  ExtProtocolBlockLocationData *bldata =
+      palloc0(sizeof(ExtProtocolBlockLocationData));
+  if (bldata == NULL) {
+    elog(ERROR,
+         "hiveprotocol_blocklocation : "
+         "cannot allocate due to no memory");
+  }
+  bldata->type = T_ExtProtocolBlockLocationData;
+  fcinfo->resultinfo = bldata;
+
+  ExtProtocolValidatorData *pvalidator_data =
+      (ExtProtocolValidatorData *)(fcinfo->context);
+
+  /*
+   * Parse URI of the first location, we expect all locations uses the same
+   * name node server. This is checked in validation function.
+   */
+  char *first_uri_str =
+      (char *)strVal(lfirst(list_head(pvalidator_data->url_list)));
+  Uri *uri = ParseExternalTableUri(first_uri_str);
+
+  elog(DEBUG3,
+       "hiveprotocol_blocklocation : "
+       "extracted HDFS name node address %s:%d where store hive table",
+       uri->hostname, uri->port);
+
+  /* Create file system instance */
+  FscHdfsFileSystemC *fs = FscHdfsNewFileSystem(uri->hostname, uri->port);
+  if (fs == NULL) {
+    elog(ERROR,
+         "hiveprotocol_blocklocation : "
+         "failed to create HIVE instance to connect to %s:%d",
+         uri->hostname, uri->port);
+  }
+
+  /* Clean up uri instance as we don't need it any longer */
+  pfree(uri);
+
+  /* Check all locations to get files to fetch location. */
+  ListCell *lc = NULL;
+  foreach (lc, pvalidator_data->url_list) {
+    /* Parse current location URI. */
+    char *url = (char *)strVal(lfirst(lc));
+    Uri *uri = ParseExternalTableUri(url);
+    if (uri == NULL) {
+      elog(ERROR,
+           "hiveprotocol_blocklocation : "
+           "invalid URI encountered %s",
+           url);
+    }
+
+    /*
+     * NOTICE: We temporarily support only directories as locations. We plan
+     *         to extend the logic to specifying single file as one location
+     *         very soon.
+     */
+
+    /* get files contained in the path. */
+    FscHdfsFileInfoArrayC *fiarray = FscHdfsDirPath(fs, uri->path);
+    if (FscHdfsHasErrorRaised(fs)) {
+      Assert(fiarray == NULL);
+      CatchedError *ce = FscHdfsGetFileSystemError(fs);
+      elog(ERROR,
+           "hdfsprotocol_blocklocation : "
+           "failed to get files of path %s. %s (%d)",
+           uri->path, ce->errMessage, ce->errCode);
+    }
+
+    /* Call block location api to get data location for each file */
+    for (int i = 0; true; i++) {
+      FscHdfsFileInfoC *fi = FscHdfsGetFileInfoFromArray(fiarray, i);
+
+      /* break condition of this for loop */
+      if (fi == NULL) {
+        break;
+      }
+
+      /* Build file name full path. */
+      const char *fname = FscHdfsGetFileInfoName(fi);
+      char *fullpath = palloc0(strlen(uri->path) + /* path  */
+                               1 +                 /* slash */
+                               strlen(fname) +     /* name  */
+                               1);                 /* \0    */
+      sprintf(fullpath, "%s/%s", uri->path, fname);
+
+      elog(DEBUG3,
+           "hiveprotocol_blocklocation : "
+           "built full path file %s",
+           fullpath);
+
+      /* Get file full length. */
+      int64_t len = FscHdfsGetFileInfoLength(fi);
+
+      elog(DEBUG3,
+           "hiveprotocol_blocklocation : "
+           "got file %s length " INT64_FORMAT,
+           fullpath, len);
+
+      if (len == 0) {
+        pfree(fullpath);
+        continue;
+      }
+
+      /* Get block location data for this file */
+      FscHdfsFileBlockLocationArrayC *bla =
+          FscHdfsGetPathFileBlockLocation(fs, fullpath, 0, len);
+      if (FscHdfsHasErrorRaised(fs)) {
+        Assert(bla == NULL);
+        CatchedError *ce = FscHdfsGetFileSystemError(fs);
+        elog(ERROR,
+             "hiveprotocol_blocklocation : "
+             "failed to get block location of path %s. "
+             "It is reported generally due to HDFS service errors or "
+             "another session's ongoing writing.",
+             fullpath);
+      }
+
+      /* Add file full path and its block number as result. */
+      blocklocation_file *blf = palloc0(sizeof(blocklocation_file));
+      blf->file_uri = pstrdup(fullpath);
+      blf->block_num = FscHdfsGetFileBlockLocationArraySize(bla);
+      blf->locations = palloc0(sizeof(BlockLocation) * blf->block_num);
+
+      elog(DEBUG3, "hiveprotocol_blocklocation : file %s has %d blocks",
+           fullpath, blf->block_num);
+
+      /* We don't need it any longer */
+      pfree(fullpath);
+
+      /* Add block information as a list. */
+      for (int bidx = 0; bidx < blf->block_num; bidx++) {
+        FscHdfsFileBlockLocationC *blo =
+            FscHdfsGetFileBlockLocationFromArray(bla, bidx);
+        BlockLocation *bl = &(blf->locations[bidx]);
+        bl->numOfNodes = FscHdfsGetFileBlockLocationNNodes(blo);
+        bl->rangeId = -1;
+        bl->replicaGroupId = -1;
+        bl->hosts = (char **)palloc0(sizeof(char *) * bl->numOfNodes);
+        bl->names = (char **)palloc0(sizeof(char *) * bl->numOfNodes);
+        bl->topologyPaths = (char **)palloc0(sizeof(char *) * bl->numOfNodes);
+        bl->offset = FscHdfsGetFileBlockLocationOffset(blo);
+        bl->length = FscHdfsGetFileBlockLocationLength(blo);
+        bl->corrupt = FscHdfsGetFileBlockLocationCorrupt(blo);
+
+        for (int nidx = 0; nidx < bl->numOfNodes; nidx++) {
+          bl->hosts[nidx] =
+              pstrdup(FscHdfsGetFileBlockLocationNodeHost(blo, nidx));
+          bl->names[nidx] =
+              pstrdup(FscHdfsGetFileBlockLocationNodeName(blo, nidx));
+          bl->topologyPaths[nidx] =
+              pstrdup(FscHdfsGetFileBlockLocationNodeTopoPath(blo, nidx));
+        }
+      }
+
+      bldata->files = lappend(bldata->files, (void *)(blf));
+
+      /* Clean up block location instances created by the lib. */
+      FscHdfsFreeFileBlockLocationArrayC(&bla);
+    }
+
+    /* Clean up URI instance in loop as we don't need it any longer */
+    pfree(uri);
+
+    /* Clean up file info array created by the lib for this location. */
+    FscHdfsFreeFileInfoArrayC(&fiarray);
+  }
+
+  /* destroy fs instance */
+  FscHdfsFreeFileSystemC(&fs);
+  PG_RETURN_VOID();
+}
+
+Datum hiveprotocol_validate(PG_FUNCTION_ARGS) {
+  elog(DEBUG3, "hiveprotocol_validate() begin");
+
+  /* Check which action should perform. */
+  ExtProtocolValidatorData *pvalidator_data =
+      (ExtProtocolValidatorData *)(fcinfo->context);
+
+  statusHiveC status;
+
+  if (pvalidator_data->forceCreateDir)
+    Assert(pvalidator_data->url_list && pvalidator_data->url_list->length == 1);
+
+  if (pvalidator_data->direction == EXT_VALIDATE_WRITE) {
+    /* accept only one directory location */
+    if (list_length(pvalidator_data->url_list) != 1) {
+      ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR),
+                      errmsg("hiveprotocol_validate : "
+                             "only one location url is supported for writable "
+                             "external hive")));
+    }
+  }
+
+  /* Go through first round to get formatter type */
+  bool isCsv = false;
+  bool isText = false;
+  bool isOrc = false;
+  ListCell *optcell = NULL;
+  foreach (optcell, pvalidator_data->format_opts) {
+    DefElem *de = (DefElem *)lfirst(optcell);
+    if (strcasecmp(de->defname, "formatter") == 0) {
+      char *val = strVal(de->arg);
+      if (strcasecmp(val, "csv") == 0) {
+        isCsv = true;
+      } else if (strcasecmp(val, "text") == 0) {
+        isText = true;
+      } else if (strcasecmp(val, "orc") == 0) {
+        isOrc = true;
+      }
+    }
+  }
+  if (!isCsv && !isText && !isOrc) {
+    ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR),
+                    errmsg("hiveprotocol_validate : "
+                           "only 'csv', 'text' and 'orc' formatter is "
+                           "supported for external hive")));
+  }
+  Assert(isCsv || isText || isOrc);
+
+  /* Validate formatter options */
+  foreach (optcell, pvalidator_data->format_opts) {
+    DefElem *de = (DefElem *)lfirst(optcell);
+    if (strcasecmp(de->defname, "delimiter") == 0) {
+      char *val = strVal(de->arg);
+      /* Validation 1. User can not specify 'OFF' in delimiter */
+      if (strcasecmp(val, "off") == 0) {
+        ereport(ERROR,
+                (errcode(ERRCODE_SYNTAX_ERROR),
+                 errmsg("hiveprotocol_validate : "
+                        "'off' value of 'delimiter' option is not supported")));
+      }
+      /* Validation 2. Can specify multibytes characters */
+      if (strlen(val) < 1) {
+        ereport(ERROR,
+                (errcode(ERRCODE_SYNTAX_ERROR),
+                 errmsg("hiveprotocol_validate : "
+                        "'delimiter' option accepts multibytes characters")));
+      }
+    }
+
+    if (strcasecmp(de->defname, "escape") == 0) {
+      char *val = strVal(de->arg);
+      /* Validation 3. User can not specify 'OFF' in escape except for TEXT
+       * format */
+      if (strcasecmp(val, "off") == 0 && !isText) {
+        ereport(ERROR,
+                (errcode(ERRCODE_SYNTAX_ERROR),
+                 errmsg("hdfsprotocol_validate : "
+                        "'off' value of 'escape' option is not supported")));
+      }
+      /* Validation 4. Can only specify one character */
+      if (strlen(val) != 1 && strcasecmp(val, "off") != 0) {
+        ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR),
+                        errmsg("hiveprotocol_validate : "
+                               "'escape' option accepts single character")));
+      }
+    }
+
+    if (strcasecmp(de->defname, "newline") == 0) {
+      char *val = strVal(de->arg);
+      /* Validation 5. only accept 'lf', 'cr', 'crlf' */
+      if (strcasecmp(val, "lf") != 0 && strcasecmp(val, "cr") != 0 &&
+          strcasecmp(val, "crlf") != 0) {
+        ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR),
+                        errmsg("hiveprotocol_validate : "
+                               "the value of 'newline' option can only be "
+                               "'lf', 'cr' or 'crlf'")));
+      }
+    }
+
+    if (strcasecmp(de->defname, "quote") == 0) {
+      /* This is allowed only for csv mode formatter */
+      if (!isCsv) {
+        ereport(
+            ERROR,
+            (errcode(ERRCODE_SYNTAX_ERROR),
+             errmsg("hiveprotocol_validate : "
+                    "'quote' option is only available in 'csv' formatter")));
+      }
+
+      char *val = strVal(de->arg);
+      /* Validation 5. Can only specify one character */
+      if (strlen(val) != 1) {
+        ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR),
+                        errmsg("hiveprotocol_validate : "
+                               "'quote' option accepts single character")));
+      }
+    }
+
+    if (strcasecmp(de->defname, "force_notnull") == 0) {
+      /* This is allowed only for csv mode formatter */
+      if (!isCsv) {
+        ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR),
+                        errmsg("hiveprotocol_validate : "
+                               "'force_notnull' option is only available in "
+                               "'csv' formatter")));
+      }
+    }
+
+    if (strcasecmp(de->defname, "force_quote") == 0) {
+      /* This is allowed only for csv mode formatter */
+      if (!isCsv) {
+        ereport(
+            ERROR,
+            (errcode(ERRCODE_SYNTAX_ERROR),
+             errmsg(
+                 "hiveprotocol_validate : "
+                 "'force_quote' option is only available in 'csv' formatter")));
+      }
+    }
+  }
+
+  /* All urls should
+   * 1) have the same protocol name 'hdfs',
+   * 2) the same hdfs namenode server address
+   */
+  /* Check all locations to get files to fetch location. */
+  char *nnaddr = NULL;
+  int nnport = -1;
+  ListCell *lc = NULL;
+  foreach (lc, pvalidator_data->url_list) {
+    /* Parse current location URI. */
+    char *url = (char *)strVal(lfirst(lc));
+    Uri *uri = ParseExternalTableUri(url);
+    if (uri == NULL) {
+      elog(ERROR,
+           "hiveprotocol_validate : "
+           "invalid URI encountered %s",
+           url);
+    }
+
+    /*get hdfs path of hive table by dbname and tablename*/
+    uint32_t pathLen = strlen(uri->path);
+    char dbname[pathLen];
+    memset(dbname, 0, pathLen);
+    char tblname[pathLen];
+    memset(tblname, 0, pathLen);
+    statusHiveC status;
+
+    if (sscanf(uri->path, "/%[^/]/%s", dbname, tblname) != 2) {
+      elog(ERROR, "incorrect url format, it should be /databasename/tablename");
+    }
+
+    char *hiveUrl = NULL;
+    getHiveDataDirectoryC(uri->hostname, uri->port, dbname, tblname, &hiveUrl, &status);
+    /*check whether the url is right and the table exist*/
+    if (status.errorCode != ERRCODE_SUCCESSFUL_COMPLETION) {
+      FreeExternalTableUri(uri);
+      elog(ERROR,
+           "hiveprotocol_validate : "
+           "failed to get table info, %s ",
+           status.errorMessage);
+    }
+    FreeExternalTableUri(uri);
+
+    Uri *hiveUri = ParseExternalTableUri(hiveUrl);
+    if (hiveUri == NULL) {
+      elog(ERROR,
+           "hiveprotocol_validate : "
+           "invalid URI encountered %s",
+           url);
+    }
+
+    if (pvalidator_data->direction == EXT_VALIDATE_WRITE) {
+      elog(WARNING, "recommend to create readable table or make sure "
+          "user have write permission to the file");
+    }
+
+    if (hiveUri->protocol != URI_HDFS) {
+      elog(ERROR,
+           "hiveprotocol_validate : "
+           "invalid URI protocol encountered in %s, "
+           "hdfs:// protocol is required",
+           url);
+    }
+
+    if (hiveUri->path[1] == '/') {
+      elog(ERROR,
+           "hiveprotocol_validate : "
+           "invalid files path in %s",
+           uri->path);
+    }
+
+    if (nnaddr == NULL) {
+      nnaddr = pstrdup(hiveUri->hostname);
+      nnport = hiveUri->port;
+    } else {
+      if (strcmp(nnaddr, hiveUri->hostname) != 0) {
+        elog(ERROR,
+             "hvieprotocol_validate : "
+             "different name server addresses are detected, "
+             "both %s and %s are found",
+             nnaddr, hiveUri->hostname);
+      }
+      if (nnport != hiveUri->port) {
+        elog(ERROR,
+             "hiveprotocol_validate : "
+             "different name server ports are detected, "
+             "both %d and %d are found",
+             nnport, hiveUri->port);
+      }
+    }
+
+    /* SHOULD ADD LOGIC HERE TO CREATE UNEXISTING PATH */
+    if (pvalidator_data->forceCreateDir) {
+      elog(LOG, "hive_validator() forced creating dir");
+      if (enable_secure_filesystem && Gp_role != GP_ROLE_EXECUTE) {
+        char *ccname = NULL;
+        /*
+         * refresh kerberos ticket
+         */
+        if (!login()) {
+          errno = EACCES;
+        }
+        ccname = pstrdup(krb5_ccname);
+        SetCcname(ccname);
+        if (ccname) pfree(ccname);
+      }
+
+      /* Create file system instance */
+      FscHdfsFileSystemC *fs =
+          FscHdfsNewFileSystem(hiveUri->hostname, hiveUri->port);
+      if (fs == NULL) {
+        elog(ERROR,
+             "hiveprotocol_validate : "
+             "failed to create HDFS instance to connect to %s:%d",
+             hiveUri->hostname, hiveUri->port);
+      }
+
+      if (FscHdfsExistPath(fs, hiveUri->path) &&
+          FscHdfsGetFileKind(fs, hiveUri->path) == 'F')
+        elog(ERROR,
+             "hdfsprotocol_validate : "
+             "Location \"%s\" is a file, not supported yet. "
+             "Only support directory now",
+             hiveUri->path);
+      if (pvalidator_data->direction == EXT_VALIDATE_WRITE &&
+          !FscHdfsExistInsertPath(fs, hiveUri->path)) {
+        elog(LOG, "hive_validator() to create url %s", hiveUri->path);
+        FscHdfsCreateInsertDir(fs, hiveUri->path);
+        if (FscHdfsHasErrorRaised(fs)) {
+          CatchedError *ce = FscHdfsGetFileSystemError(fs);
+          elog(ERROR,
+               "hiveprotocol_validate : "
+               "failed to create directory %s : %s(%d)",
+               hiveUri->path, ce->errMessage, ce->errCode);
+        }
+      }
+
+      /* destroy fs instance */
+      FscHdfsFreeFileSystemC(&fs);
+    }
+
+    /* Clean up temporarily created instances */
+    FreeExternalTableUri(hiveUri);
+    if (nnaddr != NULL) {
+      pfree(nnaddr);
+    }
+  }
+
+  elog(LOG, "passed validating hive protocol options");
+
+  /**************************************************************************
+   * This is a bad implementation that we check formatter options here. Should
+   * be moved to call formatter specific validation UDFs.
+   **************************************************************************/
+
+  PG_RETURN_VOID();
+}
diff --git a/contrib/extprotocol/gpextprotocol.c b/contrib/extprotocol/gpextprotocol.c
index 07f1731..419b923 100644
--- a/contrib/extprotocol/gpextprotocol.c
+++ b/contrib/extprotocol/gpextprotocol.c
@@ -324,5 +324,5 @@ void FreeDemoUri(DemoUri *uri)
 	if (uri->protocol)
 		pfree(uri->protocol);
 	
-	FreeExternalTableUri(uri);
+	pfree(uri);
 }
diff --git a/contrib/formatter_fixedwidth/fixedwidth.c b/contrib/formatter_fixedwidth/fixedwidth.c
index 35742fb..2f17ff8 100644
--- a/contrib/formatter_fixedwidth/fixedwidth.c
+++ b/contrib/formatter_fixedwidth/fixedwidth.c
@@ -409,7 +409,7 @@ validate_format_params(FormatConfig *format_in_config, TupleDesc tupdesc)
 				break;
 			}			
 		}
-		
+
 		if (is_in_both_lists == false)
 		{
 			ereport(ERROR,
@@ -701,7 +701,7 @@ fixedwidth_in(PG_FUNCTION_ARGS)
 	tupdesc = FORMATTER_GET_TUPDESC(fcinfo);
 	
 	/* Get our internal description of the formatter */
-	ncolumns = tupdesc->natts;	
+	ncolumns = tupdesc->natts;
 	myData = (format_t *) FORMATTER_GET_USER_CTX(fcinfo);
 	
 	if (myData == NULL)
diff --git a/contrib/hawq-ambari-plugin/README.md b/contrib/hawq-ambari-plugin/README.md
index 11770b4..5aac4e0 100644
--- a/contrib/hawq-ambari-plugin/README.md
+++ b/contrib/hawq-ambari-plugin/README.md
@@ -57,7 +57,7 @@ Properties specified in the [build.properties](build.properties) file:
 To build the rpm for hawq-ambari-plugin, change the [build.properties](build.properties) file with the required parameters and run ```mvn install``` command under hawq-ambari-plugin directory:
 ```
 $ pwd
-hawq/contrib/hawq-ambari-plugin
+incubator-hawq/contrib/hawq-ambari-plugin
 $ mvn clean resources:copy-resources rpm:rpm -Dbuild_number=1
 ```
 
@@ -95,4 +95,4 @@ $ ./add-hawq.py --user <ambari-username> --password <ambari-password> --stack HD
 **Please restart ambari-server after running the script so that the changes take effect:**
 ```
 $ ambari-server restart
-```
+```
\ No newline at end of file
diff --git a/contrib/hawq-ambari-plugin/build.properties b/contrib/hawq-ambari-plugin/build.properties
index 3e35d22..808b6b4 100644
--- a/contrib/hawq-ambari-plugin/build.properties
+++ b/contrib/hawq-ambari-plugin/build.properties
@@ -1,8 +1,8 @@
-hawq.release.version=2.5.0
+hawq.release.version=4.0.0.0
 hawq.common.services.version=2.0.0
 pxf.release.version=3.2.1
 pxf.common.services.version=3.0.0
 hawq.repo.prefix=hawq
 hawq.addons.repo.prefix=hawq-add-ons
-repository.version=2.5.0.0
+repository.version=4.0.0.0
 default.stack=HDP-2.5
diff --git a/contrib/hawq-ambari-plugin/pom.xml b/contrib/hawq-ambari-plugin/pom.xml
index 0f0f390..5dfe393 100644
--- a/contrib/hawq-ambari-plugin/pom.xml
+++ b/contrib/hawq-ambari-plugin/pom.xml
@@ -20,9 +20,9 @@
 
   <modelVersion>4.0.0</modelVersion>
   <groupId>org.apache.hawq</groupId>
-  <artifactId>hawq-ambari-plugin</artifactId>
-  <version>2.4.0.0</version>
-  <name>hawq-ambari-plugin</name>
+  <artifactId>oushu-database-ambari-plugin</artifactId>
+  <version>4.0.0.0</version>
+  <name>oushu-database-ambari-plugin</name>
   <url>http://maven.apache.org</url>
 
   <properties>
diff --git a/contrib/hawq-ambari-plugin/src/main/resources/utils/add-hawq.py b/contrib/hawq-ambari-plugin/src/main/resources/utils/add-hawq.py
index 4ae07a7..8721ec2 100755
--- a/contrib/hawq-ambari-plugin/src/main/resources/utils/add-hawq.py
+++ b/contrib/hawq-ambari-plugin/src/main/resources/utils/add-hawq.py
@@ -30,7 +30,7 @@ from optparse import OptionParser
 
 PLUGIN_VERSION = '${release}'
 DEFAULT_STACK = '${default.stack}'
-SUPPORTED_OS_LIST = ['redhat6', 'redhat7']
+SUPPORTED_OS_LIST = ['redhat7']
 HAWQ_LIB_STAGING_DIR = '${hawq.lib.staging.dir}'
 REPO_VERSION = '${repository.version}'
 HAWQ_REPO = '${hawq.repo.prefix}'
@@ -103,7 +103,7 @@ class APIClient:
     Returns stack information (stack name, stack version, repository version) of stack installed on cluster
     """
     _, response_json = self.__request('GET',
-                                      '/clusters/{0}/stack_versions?ClusterStackVersions/state.matches(CURRENT)'.format(
+                                          '/clusters/{0}/stack_versions'.format(
                                           cluster_name))
     if 'items' not in response_json or len(response_json['items']) == 0:
       raise Exception('No Stack found to be installed on the cluster {0}'.format(cluster_name))
diff --git a/contrib/hawq-docker/Makefile b/contrib/hawq-docker/Makefile
index 6895658..120ebe2 100644
--- a/contrib/hawq-docker/Makefile
+++ b/contrib/hawq-docker/Makefile
@@ -25,14 +25,11 @@ OS_VERSION := centos7
 # Do not use underscore "_" in CLUSTER_ID
 CLUSTER_ID := $(OS_VERSION)
 # Monut this local directory to /data in data container and share with other containers
-LOCAL :=
+LOCAL := 
 # networks used in docker
 NETWORK := $(CLUSTER_ID)_hawq_network
-HAWQ_HOME := "/data/hawq-devel"
-PXF_CLASSPATH_TEMPLATE = "hdp"
-JAVA_TOOL_OPTIONS := -Dfile.encoding=UTF8
 
-all:
+all: 
 	@echo " Usage:"
 	@echo "    To setup a build and test environment:         make run"
 	@echo "    To start all containers:                       make start"
@@ -40,21 +37,10 @@ all:
 	@echo "    To remove hdfs containers:                     make clean"
 	@echo "    To remove all containers:                      make distclean"
 	@echo ""
-	@echo "    To build images locally:                       make build-image"
+	@echo "    To build images locally:                       make build"
 	@echo "    To pull latest images:                         make pull"
-	@echo ""
-	@echo "    To build Hawq runtime:                         make build-hawq"
-	@echo "    To initialize Hawq on Namenode:                make init-hawq"
-	@echo "    To start Hawq on Namenode:                     make start-hawq"
-	@echo "    To stop Hawq on Namenode:                      make stop-hawq"
-	@echo "    To check Hawq status on Namenode:              make status-hawq"
-	@echo "    To build PXF runtime:                          make build-pxf"
-	@echo "    To initialize PXF on Namenode/Datanodes:       make init-pxf"
-	@echo "    To start PXF on Namenode/Datanodes:            make start-pxf"
-	@echo "    To stop PXF on on Namenode/Datanodes:          make stop-hawq"
-	@echo "    To check PXF status on Namenode/Datanodes:     make status-pxf"
 
-build-image:
+build:
 	@make -f $(THIS_MAKEFILE_PATH) build-hawq-dev-$(OS_VERSION)
 	@make -f $(THIS_MAKEFILE_PATH) build-hawq-test-$(OS_VERSION)
 	@echo "Build Images Done!"
@@ -65,10 +51,7 @@ build-hawq-dev-$(OS_VERSION): $(TOP_DIR)/$(OS_VERSION)-docker/hawq-dev/Dockerfil
 
 build-hawq-test-$(OS_VERSION): $(TOP_DIR)/$(OS_VERSION)-docker/hawq-test/Dockerfile
 	@echo build hawq-test:$(OS_VERSION) image
-	docker build \
-		--build-arg=PXF_CLASSPATH_TEMPLATE="`cat ../../pxf/pxf-service/src/configs/templates/pxf-private-${PXF_CLASSPATH_TEMPLATE}.classpath.template`" \
-		--build-arg=PXF_LOG4J_PROPERTIES="`cat ../../pxf/pxf-service/src/main/resources/pxf-log4j.properties`" \
-		-t hawq/hawq-test:$(OS_VERSION) $(TOP_DIR)/$(OS_VERSION)-docker/hawq-test/
+	docker build -t hawq/hawq-test:$(OS_VERSION) $(TOP_DIR)/$(OS_VERSION)-docker/hawq-test/
 
 create-data-container:
 	@echo create ${CLUSTER_ID}-data container
@@ -220,15 +203,8 @@ remove-data:
 
 pull:
 	@echo latest images
-	#There is no hawq repo in docker.io currently, we just build up a local repo to mimic the docker registry here.
-	#For remote registry.
-	#docker pull hawq/hawq-dev:$(OS_VERSION)
-	#docker pull hawq/hawq-test:$(OS_VERSION)
-	#For local registry, user need to install local registry and push images before following steps.
-	docker pull localhost:5000/hawq-dev:$(OS_VERSION)
-	docker pull localhost:5000/hawq-test:$(OS_VERSION)
-	docker tag localhost:5000/hawq-dev:$(OS_VERSION) hawq/hawq-dev:$(OS_VERSION)
-	docker tag localhost:5000/hawq-test:$(OS_VERSION) hawq/hawq-test:$(OS_VERSION)
+	docker pull hawq/hawq-dev:$(OS_VERSION)
+	docker pull hawq/hawq-test:$(OS_VERSION)
 
 clean:
 	@make -f $(THIS_MAKEFILE_PATH) stop 2>&1 >/dev/null || true
@@ -244,218 +220,3 @@ distclean:
 		docker network rm $(NETWORK) 2>&1 >/dev/null || true; \
 	fi
 	@echo "Distclean Done!"
-
-build-hawq:
-	@echo "Make sure you have executed make build-image"
-	@echo "Make sure you have executed make run"
-	@echo "Logging into ${CLUSTER_ID}-namenode container"
-	@if [ ! -z "`docker ps -a --filter="name=${CLUSTER_ID}-namenode" | grep -v CONTAINER`" ]; then \
-		docker exec \
-			-e "HAWQ_HOME=$(HAWQ_HOME)" \
-			-e "JAVA_TOOL_OPTIONS=$(JAVA_TOOL_OPTIONS)" \
-			-u gpadmin --privileged -it ${CLUSTER_ID}-namenode /bin/bash -c "service-hawq.sh --build"; \
-	else \
-		echo "${CLUSTER_ID}-namenode container does not exist!"; \
-	fi
-
-init-hawq:
-	@echo "Make sure you have executed make build-hawq"
-	@echo "Logging into ${CLUSTER_ID}-namenode container"
-	@if [ ! -z "`docker ps -a --filter="name=${CLUSTER_ID}-namenode" | grep -v CONTAINER`" ]; then \
-		docker exec \
-			-e "HAWQ_HOME=$(HAWQ_HOME)" \
-			-e "NAMENODE=${CLUSTER_ID}-namenode" \
-			-u gpadmin --privileged -it ${CLUSTER_ID}-namenode /bin/bash -c "service-hawq.sh --init"; \
-	else \
-		echo "${CLUSTER_ID}-namenode container does not exist!"; \
-	fi
-
-start-hawq:
-	@echo "Make sure you have executed make init-hawq"
-	@echo "Logging into ${CLUSTER_ID}-namenode container"
-	@if [ ! -z "`docker ps -a --filter="name=${CLUSTER_ID}-namenode" | grep -v CONTAINER`" ]; then \
-		docker exec \
-			-e "HAWQ_HOME=$(HAWQ_HOME)" \
-			-e "JAVA_TOOL_OPTIONS=$(JAVA_TOOL_OPTIONS)" \
-			-u gpadmin --privileged -it ${CLUSTER_ID}-namenode /bin/bash -c "service-hawq.sh --start"; \
-	else \
-		echo "${CLUSTER_ID}-namenode container does not exist!"; \
-	fi
-
-stop-hawq:
-	@echo "Logging into ${CLUSTER_ID}-namenode container"
-	@if [ ! -z "`docker ps -a --filter="name=${CLUSTER_ID}-namenode" | grep -v CONTAINER`" ]; then \
-		docker exec \
-			-e "HAWQ_HOME=$(HAWQ_HOME)" \
-			-e "JAVA_TOOL_OPTIONS=$(JAVA_TOOL_OPTIONS)" \
-			-u gpadmin --privileged -it ${CLUSTER_ID}-namenode /bin/bash -c "service-hawq.sh --stop"; \
-	else \
-		echo "${CLUSTER_ID}-namenode container does not exist!"; \
-	fi
-
-status-hawq:
-	@echo "Logging into ${CLUSTER_ID}-namenode container"
-	@if [ ! -z "`docker ps -a --filter="name=${CLUSTER_ID}-namenode" | grep -v CONTAINER`" ]; then \
-		docker exec \
-			-e "HAWQ_HOME=$(HAWQ_HOME)" \
-			-e "JAVA_TOOL_OPTIONS=$(JAVA_TOOL_OPTIONS)" \
-			-e  "USER=gpadmin" \
-			-u gpadmin --privileged -it ${CLUSTER_ID}-namenode /bin/bash -c "service-hawq.sh --status"; \
-	else \
-		echo "${CLUSTER_ID}-namenode container does not exist!"; \
-	fi
-
-build-pxf:
-	@echo "Make sure you have executed make build-image"
-	@echo "Make sure you have executed make run"
-	@make -f $(THIS_MAKEFILE_PATH) pxf-namenode
-	@i=1; \
-	while [ $$i -le $(NDATANODES) ] ; do \
-		make -f $(THIS_MAKEFILE_PATH) CUR_DATANODE=$$i pxf-datanode; \
-		i=$$((i+1)); \
-	done
-
-pxf-namenode:
-	@echo "Logging into ${CLUSTER_ID}-namenode container"
-	@if [ ! -z "`docker ps -a --filter="name=${CLUSTER_ID}-namenode" | grep -v CONTAINER`" ]; then \
-		docker exec \
-			-e "HAWQ_HOME=$(HAWQ_HOME)" \
-			-e "JAVA_TOOL_OPTIONS=$(JAVA_TOOL_OPTIONS)" \
-			-e "PXF_CLASSPATH_TEMPLATE=$(PXF_CLASSPATH_TEMPLATE)" \
-			-u gpadmin --privileged -it ${CLUSTER_ID}-namenode /bin/bash -c "service-pxf.sh --build"; \
-	else \
-		echo "${CLUSTER_ID}-namenode container does not exist!" && exit 1; \
-	fi
-
-pxf-datanode:
-	@echo "Logging into ${CLUSTER_ID}-datanode$(CUR_DATANODE) container"
-	@if [ ! -z "`docker ps -a --filter="name=${CLUSTER_ID}-datanode$(CUR_DATANODE)" | grep -v CONTAINER`" ]; then \
-		docker exec \
-			-e "HAWQ_HOME=$(HAWQ_HOME)" \
-			-e "JAVA_TOOL_OPTIONS=$(JAVA_TOOL_OPTIONS)" \
-			-u gpadmin --privileged -it ${CLUSTER_ID}-datanode$(CUR_DATANODE) /bin/bash -c "service-pxf.sh --build"; \
-	else \
-		echo "${CLUSTER_ID}-datanode$(CUR_DATANODE) container does not exist!" && exit 1; \
-	fi
-
-init-pxf:
-	@echo "Make sure you have executed make build-pxf"
-	@make -f $(THIS_MAKEFILE_PATH) init-pxf-namenode
-	@i=1; \
-	while [ $$i -le $(NDATANODES) ] ; do \
-		make -f $(THIS_MAKEFILE_PATH) CUR_DATANODE=$$i init-pxf-datanode; \
-		i=$$((i+1)); \
-	done
-
-init-pxf-namenode:
-	@echo "Logging into ${CLUSTER_ID}-namenode container"
-	@if [ ! -z "`docker ps -a --filter="name=${CLUSTER_ID}-namenode" | grep -v CONTAINER`" ]; then \
-		docker exec \
-			-e "HAWQ_HOME=$(HAWQ_HOME)" \
-			-e "NAMENODE=${CLUSTER_ID}-namenode" \
-			-u gpadmin --privileged -it ${CLUSTER_ID}-namenode /bin/bash -c "service-pxf.sh --init"; \
-	else \
-		echo "${CLUSTER_ID}-namenode container does not exist!" && exit 1; \
-	fi
-
-init-pxf-datanode:
-	@echo "Logging into ${CLUSTER_ID}-datanode$(CUR_DATANODE) container"
-	@if [ ! -z "`docker ps -a --filter="name=${CLUSTER_ID}-datanode$(CUR_DATANODE)" | grep -v CONTAINER`" ]; then \
-		docker exec \
-			-e "HAWQ_HOME=$(HAWQ_HOME)" \
-			-e "NAMENODE=${CLUSTER_ID}-namenode" \
-			-u gpadmin --privileged -it ${CLUSTER_ID}-datanode$(CUR_DATANODE) /bin/bash -c "service-pxf.sh --init"; \
-	else \
-		echo "${CLUSTER_ID}-datanode$(CUR_DATANODE) container does not exist!" && exit 1; \
-	fi
-
-start-pxf:
-	@echo "Make sure you have executed make init-pxf"
-	@make -f $(THIS_MAKEFILE_PATH) start-pxf-namenode
-	@i=1; \
-	while [ $$i -le $(NDATANODES) ] ; do \
-		make -f $(THIS_MAKEFILE_PATH) CUR_DATANODE=$$i start-pxf-datanode; \
-		i=$$((i+1)); \
-	done
-
-start-pxf-namenode:
-	@echo "Logging into ${CLUSTER_ID}-namenode container"
-	@if [ ! -z "`docker ps -a --filter="name=${CLUSTER_ID}-namenode" | grep -v CONTAINER`" ]; then \
-		docker exec \
-			-e "HAWQ_HOME=$(HAWQ_HOME)" \
-			-e "NAMENODE=${CLUSTER_ID}-namenode" \
-			-u gpadmin --privileged -it ${CLUSTER_ID}-namenode /bin/bash -c "service-pxf.sh --start"; \
-	else \
-		echo "${CLUSTER_ID}-namenode container does not exist!" && exit 1; \
-	fi
-
-start-pxf-datanode:
-	@echo "Logging into ${CLUSTER_ID}-datanode$(CUR_DATANODE) container"
-	@if [ ! -z "`docker ps -a --filter="name=${CLUSTER_ID}-datanode$(CUR_DATANODE)" | grep -v CONTAINER`" ]; then \
-		docker exec \
-			-e "HAWQ_HOME=$(HAWQ_HOME)" \
-			-e "NAMENODE=${CLUSTER_ID}-namenode" \
-			-u gpadmin --privileged -it ${CLUSTER_ID}-datanode$(CUR_DATANODE) /bin/bash -c "service-pxf.sh --start"; \
-	else \
-		echo "${CLUSTER_ID}-datanode$(CUR_DATANODE) container does not exist!" && exit 1; \
-	fi
-
-stop-pxf:
-	@make -f $(THIS_MAKEFILE_PATH) stop-pxf-namenode
-	@i=1; \
-	while [ $$i -le $(NDATANODES) ] ; do \
-		make -f $(THIS_MAKEFILE_PATH) CUR_DATANODE=$$i stop-pxf-datanode; \
-		i=$$((i+1)); \
-	done
-
-stop-pxf-namenode:
-	@echo "Logging into ${CLUSTER_ID}-namenode container"
-	@if [ ! -z "`docker ps -a --filter="name=${CLUSTER_ID}-namenode" | grep -v CONTAINER`" ]; then \
-		docker exec \
-			-e "HAWQ_HOME=$(HAWQ_HOME)" \
-			-e "NAMENODE=${CLUSTER_ID}-namenode" \
-			-u gpadmin --privileged -it ${CLUSTER_ID}-namenode /bin/bash -c "service-pxf.sh --stop"; \
-	else \
-		echo "${CLUSTER_ID}-namenode container does not exist!" && exit 1; \
-	fi
-
-stop-pxf-datanode:
-	@echo "Logging into ${CLUSTER_ID}-datanode$(CUR_DATANODE) container"
-	@if [ ! -z "`docker ps -a --filter="name=${CLUSTER_ID}-datanode$(CUR_DATANODE)" | grep -v CONTAINER`" ]; then \
-		docker exec \
-			-e "HAWQ_HOME=$(HAWQ_HOME)" \
-			-e "NAMENODE=${CLUSTER_ID}-namenode" \
-			-u gpadmin --privileged -it ${CLUSTER_ID}-datanode$(CUR_DATANODE) /bin/bash -c "service-pxf.sh --stop"; \
-	else \
-		echo "${CLUSTER_ID}-datanode$(CUR_DATANODE) container does not exist!" && exit 1; \
-	fi
-
-status-pxf:
-	@make -f $(THIS_MAKEFILE_PATH) status-pxf-namenode
-	@i=1; \
-	while [ $$i -le $(NDATANODES) ] ; do \
-		make -f $(THIS_MAKEFILE_PATH) CUR_DATANODE=$$i status-pxf-datanode; \
-		i=$$((i+1)); \
-	done
-
-status-pxf-namenode:
-	@echo "Logging into ${CLUSTER_ID}-namenode container"
-	@if [ ! -z "`docker ps -a --filter="name=${CLUSTER_ID}-namenode" | grep -v CONTAINER`" ]; then \
-		docker exec \
-			-e "HAWQ_HOME=$(HAWQ_HOME)" \
-			-e "NAMENODE=${CLUSTER_ID}-namenode" \
-			-u gpadmin --privileged -it ${CLUSTER_ID}-namenode /bin/bash -c "service-pxf.sh --status"; \
-	else \
-		echo "${CLUSTER_ID}-namenode container does not exist!" && exit 1; \
-	fi
-
-status-pxf-datanode:
-	@echo "Logging into ${CLUSTER_ID}-datanode$(CUR_DATANODE) container"
-	@if [ ! -z "`docker ps -a --filter="name=${CLUSTER_ID}-datanode$(CUR_DATANODE)" | grep -v CONTAINER`" ]; then \
-		docker exec \
-			-e "HAWQ_HOME=$(HAWQ_HOME)" \
-			-e "NAMENODE=${CLUSTER_ID}-namenode" \
-			-u gpadmin --privileged -it ${CLUSTER_ID}-datanode$(CUR_DATANODE) /bin/bash -c "service-pxf.sh --status"; \
-	else \
-		echo "${CLUSTER_ID}-datanode$(CUR_DATANODE) container does not exist!" && exit 1; \
-	fi
\ No newline at end of file
diff --git a/contrib/hawq-docker/README.md b/contrib/hawq-docker/README.md
index d8e456c..4adeaaf 100644
--- a/contrib/hawq-docker/README.md
+++ b/contrib/hawq-docker/README.md
@@ -14,14 +14,16 @@ https://docs.docker.com/
 # Setup build and test environment
 * clone hawq repository
 ```
-git clone https://github.com/apache/hawq.git .
-cd hawq/contrib/hawq-docker
+git clone https://github.com/apache/incubator-hawq.git .
+cd incubator-hawq/contrib/hawq-docker
 ```
-* Build the docker images
+* Get the docker images
 ```
+  make pull (recommended)
+OR
   make build
 ``` 
-(Command `make build` is to build docker images locally.)
+(Command `make pull` is to pull docker images from Docker Hub, while command `make build` is to build docker images locally. In general, `make pull` is faster than `make build`.)
 * setup a 5 nodes virtual cluster for Apache HAWQ build and test.
 ```
 make run
@@ -49,7 +51,7 @@ sudo -u hdfs hdfs dfsadmin -report
 ```
 * clone Apache HAWQ code to /data direcotry
 ```
-git clone https://github.com/apache/hawq.git /data/hawq
+git clone https://github.com/apache/incubator-hawq.git /data/hawq
 ```
 * build Apache HAWQ
 ```
@@ -81,38 +83,6 @@ Type "help" for help.
 
 postgres=# 
 ```
-# Store docker images in local docker registry
-
-After your hawq environment is up and running, you could draft a local docker registry to store your hawq images locally for further usage.
-* pull and run a docker registry
-```
-docker pull registry
-docker run -d -p 127.0.0.1:5000:5000 registry
-```
-Make sure you could get the following output
-```
-curl http://localhost:5000/v2/_catalog
-{"repositories":[]}
-```
-You could push your local hawq images to local repository, let us use "centos7" as example
-```
-docker tag  hawq/hawq-test:centos7  localhost:5000/hawq-test:centos7
-docker tag  hawq/hawq-dev:centos7  localhost:5000/hawq-dev:centos7
-docker push localhost:5000/hawq-test
-docker push localhost:5000/hawq-dev
-```
-Now the local registry has images in it
-```
-curl http://localhost:5000/v2/_catalog
-{"repositories":["hawq-dev","hawq-test"]}
-```
-
-If we want to pull the images from local repo
-```
-make pull
-``` 
-
-
 # More command with this script
 ```
  Usage:
@@ -121,16 +91,7 @@ make pull
     To stop all containers:                        make stop
     To remove hdfs containers:                     make clean
     To remove all containers:                      make distclean
-    To build images locally:                       make build-image
+    To build images locally:                       make build
     To pull latest images:                         make pull
-    To build Hawq runtime:                         make build-hawq
-    To initialize Hawq on Namenode:                make init-hawq
-    To start Hawq on Namenode:                     make start-hawq
-    To stop Hawq on Namenode:                      make stop-hawq
-    To check Hawq status on Namenode:              make status-hawq
-    To build PXF runtime:                          make build-pxf
-    To initialize PXF on Namenode/Datanodes:       make init-pxf
-    To start PXF on Namenode/Datanodes:            make start-pxf
-    To stop PXF on on Namenode/Datanodes:          make stop-hawq
-    To check PXF status on Namenode/Datanodes:     make status-hawq
 ```
+
diff --git a/contrib/hawq-docker/centos6-docker/hawq-dev/Dockerfile b/contrib/hawq-docker/centos6-docker/hawq-dev/Dockerfile
index 59393bc..7905723 100644
--- a/contrib/hawq-docker/centos6-docker/hawq-dev/Dockerfile
+++ b/contrib/hawq-docker/centos6-docker/hawq-dev/Dockerfile
@@ -96,7 +96,6 @@ RUN mkdir -p /tmp/build/ && \
 
 # install python module 
 RUN pip --retries=50 --timeout=300 install pycrypto
-RUN pip --retries=50 --timeout=300 install cogapp
 
 # create user gpadmin since HAWQ cannot run under root
 RUN groupadd -g 1000 gpadmin && \
diff --git a/contrib/hawq-docker/centos7-docker/hawq-dev/Dockerfile b/contrib/hawq-docker/centos7-docker/hawq-dev/Dockerfile
index 1003543..58d4ef0 100644
--- a/contrib/hawq-docker/centos7-docker/hawq-dev/Dockerfile
+++ b/contrib/hawq-docker/centos7-docker/hawq-dev/Dockerfile
@@ -22,31 +22,18 @@ MAINTAINER Richard Guo <ri...@pivotal.io>
 # install all software we need
 RUN yum install -y epel-release && \
  yum makecache && \
- yum install -y man glog-devel jsoncpp-devel json gflags-devel lz4-devel passwd sudo tar which git mlocate links make bzip2 net-tools \
- autoconf automake libtool m4 gcc gcc-c++ gdb flex gperf maven indent \
+ yum install -y man passwd sudo tar which git mlocate links make bzip2 net-tools \
+ autoconf automake libtool m4 gcc gcc-c++ gdb bison flex cmake gperf maven indent \
  libuuid-devel krb5-devel libgsasl-devel expat-devel libxml2-devel \
  perl-ExtUtils-Embed pam-devel python-devel libcurl-devel snappy-devel \
  thrift-devel libyaml-devel libevent-devel bzip2-devel openssl-devel \
- openldap-devel readline-devel net-snmp-devel apr-devel \
+ openldap-devel protobuf-devel readline-devel net-snmp-devel apr-devel \
  libesmtp-devel python-pip json-c-devel \
- java-1.7.0-openjdk-devel lcov cmake3 \
- wget rpm-build \
- https://forensics.cert.org/centos/cert/7/x86_64//libiconv-1.15-1.el7.x86_64.rpm \
- https://forensics.cert.org/centos/cert/7/x86_64//libiconv-devel-1.15-1.el7.x86_64.rpm \
- openssh-clients openssh-server perl-JSON unzip && \
+ java-1.7.0-openjdk-devel lcov cmake \
+ openssh-clients openssh-server perl-JSON && \
  yum clean all
 
-RUN ln -s /usr/bin/cmake3 /usr/bin/cmake
-
-RUN rpm -ivh --nodeps   https://rpmfind.net/linux/centos/6.10/os/x86_64/Packages/bison-2.4.1-5.el6.x86_64.rpm
-
-RUN cd ~ && git clone https://github.com/protocolbuffers/protobuf.git && \
-	cd protobuf && git submodule update --init --recursive && \
-	./autogen.sh && ./configure && make && \
-	make check && make install && ldconfig && cd -
-
 RUN pip --retries=50 --timeout=300 install pycrypto
-RUN pip --retries=50 --timeout=300 install cogapp
 
 # OS requirement
 RUN echo "kernel.sem = 250 512000 100 2048" >> /etc/sysctl.conf
@@ -68,8 +55,7 @@ RUN sed -i -e 's|Defaults    requiretty|#Defaults    requiretty|' /etc/sudoers
 # setup JAVA_HOME for all users
 RUN echo "#!/bin/sh" > /etc/profile.d/java.sh && \
  echo "export JAVA_HOME=/etc/alternatives/java_sdk" >> /etc/profile.d/java.sh && \
- chmod a+x /etc/profile.d/java.sh && \
- ln -s /usr/lib/jvm/java-1.7.0-openjdk-1.7.0.221-2.6.18.0.el7_6.x86_64/jre/lib/amd64/server/libjvm.so /usr/local/lib/libjvm.so
+ chmod a+x /etc/profile.d/java.sh
 
 # set USER env
 RUN echo "#!/bin/bash" > /etc/profile.d/user.sh && \
@@ -87,6 +73,3 @@ RUN ssh-keygen -t rsa -N "" -f ~/.ssh/id_rsa && \
  chmod 0600 ~/.ssh/authorized_keys
 
 WORKDIR /data
-
-ENTRYPOINT ["bash", "/data/hawq-docker/apache-hawq/entrypoint.sh"]
-
diff --git a/contrib/hawq-docker/centos7-docker/hawq-test/Dockerfile b/contrib/hawq-docker/centos7-docker/hawq-test/Dockerfile
index 497e6a6..ea5e22c 100644
--- a/contrib/hawq-docker/centos7-docker/hawq-test/Dockerfile
+++ b/contrib/hawq-docker/centos7-docker/hawq-test/Dockerfile
@@ -21,9 +21,6 @@ MAINTAINER Richard Guo <ri...@pivotal.io>
 
 USER root
 
-ARG PXF_CLASSPATH_TEMPLATE
-ARG PXF_LOG4J_PROPERTIES
-
 ## install HDP 2.5.0
 RUN curl -L "http://public-repo-1.hortonworks.com/HDP/centos7/2.x/updates/2.5.0.0/hdp.repo" -o /etc/yum.repos.d/hdp.repo && \
  yum install -y hadoop hadoop-hdfs hadoop-libhdfs hadoop-yarn hadoop-mapreduce hadoop-client hdp-select && \
@@ -31,16 +28,9 @@ RUN curl -L "http://public-repo-1.hortonworks.com/HDP/centos7/2.x/updates/2.5.0.
 
 RUN ln -s /usr/hdp/current/hadoop-hdfs-namenode/../hadoop/sbin/hadoop-daemon.sh /usr/bin/hadoop-daemon.sh
 
-RUN touch /tmp/pxf-private.classpath && \
- touch /tmp/log4j.properties && \
- echo "$PXF_CLASSPATH_TEMPLATE" > /tmp/pxf-private.classpath && \
- echo "$PXF_LOG4J_PROPERTIES" > /tmp/pxf-log4j.properties
-
 COPY conf/* /etc/hadoop/conf/
 
 COPY entrypoint.sh /usr/bin/entrypoint.sh
-COPY service-hawq.sh /usr/bin/service-hawq.sh
-COPY service-pxf.sh /usr/bin/service-pxf.sh
 COPY start-hdfs.sh /usr/bin/start-hdfs.sh
 
 USER gpadmin
diff --git a/contrib/hawq-docker/centos7-docker/hawq-test/conf/core-site.xml b/contrib/hawq-docker/centos7-docker/hawq-test/conf/core-site.xml
index 69ce7c9..afc37fc 100644
--- a/contrib/hawq-docker/centos7-docker/hawq-test/conf/core-site.xml
+++ b/contrib/hawq-docker/centos7-docker/hawq-test/conf/core-site.xml
@@ -19,6 +19,6 @@
 <configuration>
 	<property>
 		<name>fs.defaultFS</name>
-		<value>hdfs://@hdfs.namenode@:8020</value>
+		<value>hdfs://${hdfs.namenode}:8020</value>
 	</property>
 </configuration>
diff --git a/contrib/hawq-docker/centos7-docker/hawq-test/entrypoint.sh b/contrib/hawq-docker/centos7-docker/hawq-test/entrypoint.sh
index 308eb1e..abdc508 100755
--- a/contrib/hawq-docker/centos7-docker/hawq-test/entrypoint.sh
+++ b/contrib/hawq-docker/centos7-docker/hawq-test/entrypoint.sh
@@ -27,9 +27,6 @@ if [ ! -f /etc/profile.d/hadoop.sh ]; then
   sudo chmod a+x /etc/profile.d/hadoop.sh
 fi
 
-sudo chmod 777 /etc/hadoop/conf/core-site.xml
-sudo sed "s/@hdfs.namenode@/$NAMENODE/g" -i /etc/hadoop/conf/core-site.xml
-
 sudo start-hdfs.sh
 sudo sysctl -p
 
diff --git a/contrib/hawq-hadoop/Makefile b/contrib/hawq-hadoop/Makefile
index d017a16..aa582c5 100644
--- a/contrib/hawq-hadoop/Makefile
+++ b/contrib/hawq-hadoop/Makefile
@@ -40,11 +40,7 @@ all: $(DEFAULTTARGET)
 
 $(DEFAULTTARGET):
 ifdef MAVEN
-ifeq ($(shell java -version 2>&1 | grep 1.7.0 > /dev/null; printf $$?),0)
-	$(MAVEN) package -DskipTests -Dhttps.protocols=TLSv1.2
-else
 	$(MAVEN) package -DskipTests
-endif
 else
 	@$(missing) mvn $< $@
 endif
diff --git a/contrib/hawq-package/README b/contrib/hawq-package/README
index bcd2add..8fb34fd 100644
--- a/contrib/hawq-package/README
+++ b/contrib/hawq-package/README
@@ -20,9 +20,9 @@ Build HAWQ rpm package tarball steps:
 
 1. Prepare a HAWQ source code tarball for rpm building.
    a) Make sure the source code tarball name format is: 
-      apache-hawq-src-%{hawq_version}-.tar.gz
+      apache-hawq-src-%{hawq_version}-incubating.tar.gz
    b) Make sure the extracted directory name format is:
-      apache-hawq-src-%{hawq_version}
+      apache-hawq-src-%{hawq_version}-incubating
    c) Put the HAWQ source tarball to %{top_dir}/contrib/hawq-package folder.
       Or use "HAWQ_SOURCE_TARBALL_PATH" to specify the find path of HAWQ source tarball.
 
@@ -35,4 +35,4 @@ Build HAWQ rpm package tarball steps:
    You can set environment variable "HAWQ_RELEASE_VERSION" to specify HAWQ version.
 
 5. You can find HAWQ/PXF/Ranger-plugin rpms tarball as:
-   %{top_dir}/contrib/hawq-package/apache-hawq-rpm-%{hawq_version}.tar.gz 
+   %{top_dir}/contrib/hawq-package/apache-hawq-rpm-%{hawq_version}-incubating.tar.gz 
diff --git a/contrib/hawq-package/build_hawq_rpm.sh b/contrib/hawq-package/build_hawq_rpm.sh
index 7b569b4..c3c3309 100755
--- a/contrib/hawq-package/build_hawq_rpm.sh
+++ b/contrib/hawq-package/build_hawq_rpm.sh
@@ -45,7 +45,7 @@ mkdir -p rpmbuild/BUILD rpmbuild/RPMS rpmbuild/SOURCES rpmbuild/SPECS rpmbuild/S
 # Copy HAWQ RPM configuration file for the build
 cp hawq.spec rpmbuild/SPECS/
 
-HAWQ_SOURCE_TARBALL_FILE=apache-hawq-src-${HAWQ_RELEASE_VERSION}.tar.gz
+HAWQ_SOURCE_TARBALL_FILE=apache-hawq-src-${HAWQ_RELEASE_VERSION}-incubating.tar.gz
 
 # Get PATH where to find HAWQ source code tarball
 if [ -z ${HAWQ_SOURCE_TARBALL_PATH} ]; then
diff --git a/contrib/hawq-package/hawq.spec b/contrib/hawq-package/hawq.spec
index e33eb4a..0e335bd 100644
--- a/contrib/hawq-package/hawq.spec
+++ b/contrib/hawq-package/hawq.spec
@@ -25,15 +25,15 @@
 %define    installdir             /usr/local/%{name}
 
 Name:       apache-hawq
-Summary:    Hadoop Native SQL powered by Apache HAWQ
+Summary:    Hadoop Native SQL powered by Apache HAWQ (incubating)
 Version:    %{hawq_version}
 Release:    %{rpm_os_version}
 License:    ASL 2.0
 Group:      Applications/Databases
-URL:        http://hawq.apache.org
+URL:        http://hawq.incubator.apache.org
 Prefix:     /usr/local
 BuildArch:  %{arch}
-SOURCE0 :   apache-hawq-src-%{hawq_version}.tar.gz
+SOURCE0 :   apache-hawq-src-%{hawq_version}-incubating.tar.gz
 Requires:   libgsasl, krb5-libs, libicu, protobuf >= 2.5.0, json-c >= 0.9, net-snmp-libs, thrift >= 0.9.1, boost >= 1.53.0
 %if %{rpm_os_version} == el6
 Requires: openssl
@@ -45,7 +45,7 @@ Requires(pre): shadow-utils
 AutoReqProv:    no
 
 %description
-Apache HAWQ combines exceptional MPP-based analytics
+Apache HAWQ (incubating) combines exceptional MPP-based analytics
 performance, robust ANSI SQL compliance, Hadoop ecosystem
 integration and manageability, and flexible data-store format
 support, all natively in Hadoop, no connectors required.
@@ -56,7 +56,7 @@ source Greenplum® Database and PostgreSQL, HAWQ enables you to
 swiftly and interactively query Hadoop data, natively via HDFS.
 
 %prep
-%setup -n %{name}-src-%{version}
+%setup -n %{name}-src-%{version}-incubating
 
 %build
 export CFLAGS="-O3 -g"
diff --git a/contrib/hawq-package/make_rpm_tarball.sh b/contrib/hawq-package/make_rpm_tarball.sh
index 33ca573..8501534 100755
--- a/contrib/hawq-package/make_rpm_tarball.sh
+++ b/contrib/hawq-package/make_rpm_tarball.sh
@@ -69,15 +69,15 @@ echo "Copied all the HAWQ/PXF/Range-plugin rpm packages."
 ls ${RPM_PKG_DIR}/
 
 # Make tarball for all the HAWQ/PXF/RANGER rpms
-tar czvf apache-hawq-rpm-${HAWQ_RELEASE_VERSION}.tar.gz  hawq_rpm_packages
+tar czvf apache-hawq-rpm-${HAWQ_RELEASE_VERSION}-incubating.tar.gz  hawq_rpm_packages
 if [ $? != 0 ]; then
     echo "Make HAWQ/PXF/Ranger-plugin rpm tarball failed."
     exit $?
 else
     echo "Make HAWQ/PXF/Ranger-plugin rpm tarball successfully."
     echo "You can find the rpm binary tarball at:"
-    echo "${CUR_DIR}/apache-hawq-rpm-${HAWQ_RELEASE_VERSION}.tar.gz"
-    ls -l apache-hawq-rpm-${HAWQ_RELEASE_VERSION}.tar.gz
+    echo "${CUR_DIR}/apache-hawq-rpm-${HAWQ_RELEASE_VERSION}-incubating.tar.gz"
+    ls -l apache-hawq-rpm-${HAWQ_RELEASE_VERSION}-incubating.tar.gz
 fi
 
 exit 0
diff --git a/contrib/magma/Makefile b/contrib/magma/Makefile
new file mode 100644
index 0000000..4a8639b
--- /dev/null
+++ b/contrib/magma/Makefile
@@ -0,0 +1,17 @@
+MODULE_big = magma
+OBJS       = magma.o
+
+ifdef USE_PGXS
+PGXS := $(shell pg_config --pgxs)
+include $(PGXS)
+else
+subdir = contrib/magma
+top_builddir = ../..
+include $(top_builddir)/src/Makefile.global
+include $(top_srcdir)/contrib/contrib-global.mk
+override CFLAGS += -lmagma-client -lstorage -lstorage-magma-format -lunivplan -ljson-c
+
+override CPPFLAGS := $(CPPFLAGS) $(LIBASAN)
+override CFLAGS := $(CFLAGS) $(LIBASAN)
+
+endif
diff --git a/contrib/magma/magma.c b/contrib/magma/magma.c
new file mode 100644
index 0000000..dc60ab7
--- /dev/null
+++ b/contrib/magma/magma.c
@@ -0,0 +1,3885 @@
+#include <json-c/json.h>
+
+#include "c.h"
+#include "port.h"
+#include "postgres.h"
+#include "fmgr.h"
+#include "funcapi.h"
+#include "miscadmin.h"
+
+#include "access/extprotocol.h"
+#include "access/filesplit.h"
+#include "access/fileam.h"
+#include "access/genam.h"
+#include "access/heapam.h"
+#include "access/htup.h"
+#include "access/plugstorage.h"
+#include "access/tupdesc.h"
+#include "access/transam.h"
+#include "catalog/namespace.h"
+#include "catalog/pg_exttable.h"
+#include "catalog/pg_attribute.h"
+#include "cdb/cdbdatalocality.h"
+#include "cdb/cdbhash.h"
+#include "cdb/cdbvars.h"
+#include "commands/copy.h"
+#include "commands/defrem.h"
+#include "commands/dbcommands.h"
+#include "mb/pg_wchar.h"
+#include "nodes/makefuncs.h"
+#include "nodes/pg_list.h"
+#include "nodes/plannodes.h"
+#include "optimizer/newPlanner.h"
+#include "parser/parse_type.h"
+#include "postmaster/identity.h"
+#include "utils/acl.h"
+#include "utils/array.h"
+#include "utils/builtins.h"
+#include "utils/datetime.h"
+#include "utils/elog.h"
+#include "utils/fmgroids.h"
+#include "utils/formatting.h"
+#include "utils/hawq_type_mapping.h"
+#include "utils/lsyscache.h"
+#include "utils/memutils.h"
+#include "utils/numeric.h"
+#include "utils/rel.h"
+#include "utils/relcache.h"
+#include "utils/uri.h"
+
+#include "storage/cwrapper/magma-format-c.h"
+#include "magma/cwrapper/magma-client-c.h"
+#include "univplan/cwrapper/univplan-c.h"
+
+/*
+ * Do the module magic dance
+ */
+PG_MODULE_MAGIC;
+
+/*
+ * Validators for magma protocol in pluggable storage
+ */
+PG_FUNCTION_INFO_V1(magma_protocol_blocklocation);
+PG_FUNCTION_INFO_V1(magma_protocol_tablesize);
+PG_FUNCTION_INFO_V1(magma_protocol_databasesize);
+PG_FUNCTION_INFO_V1(magma_protocol_validate);
+
+/*
+ * Validators for magma format in pluggable storage
+ */
+PG_FUNCTION_INFO_V1(magma_validate_interfaces);
+PG_FUNCTION_INFO_V1(magma_validate_options);
+PG_FUNCTION_INFO_V1(magma_validate_encodings);
+PG_FUNCTION_INFO_V1(magma_validate_datatypes);
+
+/*
+ * Accessors for magma format in pluggable storage
+ */
+PG_FUNCTION_INFO_V1(magma_createtable);
+PG_FUNCTION_INFO_V1(magma_droptable);
+PG_FUNCTION_INFO_V1(magma_beginscan);
+PG_FUNCTION_INFO_V1(magma_getnext_init);
+PG_FUNCTION_INFO_V1(magma_getnext);
+PG_FUNCTION_INFO_V1(magma_rescan);
+PG_FUNCTION_INFO_V1(magma_endscan);
+PG_FUNCTION_INFO_V1(magma_stopscan);
+PG_FUNCTION_INFO_V1(magma_begindelete);
+PG_FUNCTION_INFO_V1(magma_delete);
+PG_FUNCTION_INFO_V1(magma_enddelete);
+PG_FUNCTION_INFO_V1(magma_beginupdate);
+PG_FUNCTION_INFO_V1(magma_update);
+PG_FUNCTION_INFO_V1(magma_endupdate);
+PG_FUNCTION_INFO_V1(magma_insert_init);
+PG_FUNCTION_INFO_V1(magma_insert);
+PG_FUNCTION_INFO_V1(magma_insert_finish);
+
+/*
+ * Transaction for magma format
+ */
+PG_FUNCTION_INFO_V1(magma_transaction);
+
+/*
+ * Definitions of validators for magma protocol in pluggable storage
+ */
+Datum magma_protocol_blocklocation(PG_FUNCTION_ARGS);
+Datum magma_protocol_validate(PG_FUNCTION_ARGS);
+Datum magma_getstatus(PG_FUNCTION_ARGS);
+/*
+ * Definitions of validators for magma format in pluggable storage
+ */
+Datum magma_validate_interfaces(PG_FUNCTION_ARGS);
+Datum magma_validate_options(PG_FUNCTION_ARGS);
+Datum magma_validate_encodings(PG_FUNCTION_ARGS);
+Datum magma_validate_datatypes(PG_FUNCTION_ARGS);
+
+/*
+ * Definitions of accessors for magma format in pluggable storage
+ */
+Datum magma_createtable(PG_FUNCTION_ARGS);
+Datum magma_droptable(PG_FUNCTION_ARGS);
+Datum magma_beginscan(PG_FUNCTION_ARGS);
+Datum magma_getnext_init(PG_FUNCTION_ARGS);
+Datum magma_getnext(PG_FUNCTION_ARGS);
+Datum magma_rescan(PG_FUNCTION_ARGS);
+Datum magma_endscan(PG_FUNCTION_ARGS);
+Datum magma_stopscan(PG_FUNCTION_ARGS);
+Datum magma_begindelete(PG_FUNCTION_ARGS);
+Datum magma_delete(PG_FUNCTION_ARGS);
+Datum magma_enddelete(PG_FUNCTION_ARGS);
+Datum magma_beginupdate(PG_FUNCTION_ARGS);
+Datum magma_update(PG_FUNCTION_ARGS);
+Datum magma_endupdate(PG_FUNCTION_ARGS);
+Datum magma_insert_init(PG_FUNCTION_ARGS);
+Datum magma_insert(PG_FUNCTION_ARGS);
+Datum magma_insert_finish(PG_FUNCTION_ARGS);
+
+/*
+ * Definitions of accessors for magma format index in pluggable storage
+ */
+Datum magma_createindex(PG_FUNCTION_ARGS);
+Datum magma_dropindex(PG_FUNCTION_ARGS);
+Datum magma_reindex_index(PG_FUNCTION_ARGS);
+
+/*
+ * Definition of transaction for magma format
+ */
+Datum magma_transaction(PG_FUNCTION_ARGS);
+
+typedef struct {
+  int64_t second;
+  int64_t nanosecond;
+} TimestampType;
+
+typedef struct MagmaTidC {
+  uint64_t rowid;
+  uint16_t rangeid;
+} MagmaTidC;
+
+typedef struct GlobalFormatUserData {
+  MagmaFormatC *fmt;
+  char *dbname;
+  char *schemaname;
+  char *tablename;
+  bool isMagmatp;
+  int *colIndexes;
+  bool *colIsNulls;
+
+  char **colNames;
+  int *colDatatypes;
+  int64_t *colDatatypeMods;
+  int32_t numberOfColumns;
+  char **colRawValues;
+  Datum *colValues;
+  uint64_t *colValLength;
+  bool *colToReads;
+  char *colRawTid;
+  MagmaTidC colTid;
+
+  // for insert/update/delete
+  TimestampType *colTimestamp;
+} GlobalFormatUserData;
+
+static MagmaClientC* global_magma_client;
+
+/*
+ * Utility functions for magma in pluggable storage
+ */
+static void init_common_plan_context(CommonPlanContext *ctx);
+static void free_common_plan_context(CommonPlanContext *ctx);
+static FmgrInfo *get_magma_function(char *formatter_name, char *function_name);
+static void get_magma_category_info(char *fmtoptstr, bool *isexternal);
+static void get_magma_scan_functions(char *formatter_name,
+                                     FileScanDesc file_scan_desc);
+static void get_magma_insert_functions(char *formatter_name,
+                                       ExternalInsertDesc ext_insert_desc);
+static void get_magma_delete_functions(char *formatter_name,
+                                       ExternalInsertDesc ext_delete_desc);
+static void get_magma_update_functions(char *formatter_name,
+                                       ExternalInsertDesc ext_update_desc);
+
+static MagmaFormatC *create_magma_formatter_instance(List *fmt_opts_defelem,
+                                                     char *serializeSchema,
+                                                     int serializeSchemaLen,
+                                                     int fmt_encoding,
+                                                     char *formatterName,
+                                                     int rangeNum);
+
+static MagmaClientC *create_magma_client_instance();
+static void init_magma_format_user_data_for_read(
+    TupleDesc tup_desc, GlobalFormatUserData *user_data);
+static void init_magma_format_user_data_for_write(
+    TupleDesc tup_desc, GlobalFormatUserData *user_data, Relation relation);
+
+static void build_options_in_json(char *serializeSchema, int serializeSchemaLen,
+                                  List *fmt_opts_defelem, int encoding, int rangeNum,
+                                  char *formatterName, char **json_str);
+static void build_magma_tuple_descrition_for_read(
+    Plan *plan, Relation relation, GlobalFormatUserData *user_data, bool skipTid);
+
+static void magma_scan_error_callback(void *arg);
+
+static List *magma_parse_format_string(char *fmtname, char **fmtstr);
+static char *magma_strtokx2(const char *s, const char *whitespace,
+                            const char *delim, const char *quote, char escape,
+                            bool e_strings, bool del_quotes, int encoding);
+static void magma_strip_quotes(char *source, char quote, char escape,
+                               int encoding);
+
+static void magma_check_result(MagmaClientC **client);
+
+static bool checkUnsupportedDataTypeMagma(int32_t hawqTypeID);
+
+int32_t map_hawq_type_to_magma_type(int32_t hawqTypeID, bool isMagmatp);
+
+char *search_hostname_by_ipaddr(const char *ipaddr);
+
+static void getHostNameByIp(const char *ipaddr, char *hostname);
+
+static void magma_clear(PlugStorage ps, bool clearSlot) {
+  FileScanDesc fsd = ps->ps_file_scan_desc;
+  GlobalFormatUserData *user_data = (GlobalFormatUserData *)(fsd->fs_ps_user_data);
+  TupleTableSlot *slot = ps->ps_tuple_table_slot;
+
+  if (user_data->fmt) {
+    MagmaFormatCatchedError *e = MagmaFormatGetErrorMagmaFormatC(user_data->fmt);
+    if (e->errCode == ERRCODE_SUCCESSFUL_COMPLETION) {
+      MagmaFormatEndScanMagmaFormatC(user_data->fmt);
+      e = MagmaFormatGetErrorMagmaFormatC(user_data->fmt);
+      if (e->errCode != ERRCODE_SUCCESSFUL_COMPLETION) {
+        ereport(ERROR, (errcode(e->errCode), errmsg("MAGMA:%s", e->errMessage)));
+      }
+
+      MagmaFormatFreeMagmaFormatC(&(user_data->fmt));
+
+      // call getnext should clear data resource
+      if (clearSlot) {
+        pfree(user_data->colRawValues);
+        pfree(user_data->colValues);
+        pfree(user_data->colToReads);
+        pfree(user_data->colValLength);
+        for (int i = 0; i < user_data->numberOfColumns; ++i)
+          pfree(user_data->colNames[i]);
+        pfree(user_data->colNames);
+        pfree(user_data->colDatatypes);
+        pfree(user_data->colDatatypeMods);
+        pfree(user_data->colIsNulls);
+        pfree(user_data);
+        fsd->fs_ps_user_data = NULL;
+
+        ps->ps_has_tuple = false;
+        slot->PRIVATE_tts_values = NULL;
+        ExecClearTuple(slot);
+      }
+    } else {
+      ereport(ERROR, (errcode(e->errCode), errmsg("MAGMA:%s", e->errMessage)));
+    }
+  }
+}
+
+static inline void ConvertTidToCtidAndRangeid(const MagmaTidC tid,
+                                                ItemPointerData *ctid,
+                                                uint32_t *tts_rangeid) {
+  // MagmaTidC tidVal = *(MagmaTidC *)DatumGetPointer(tid);
+  /* put low 48 bits rowid in ctid and high 16 bits rowid in tts_rangeid. */
+  ctid->ip_blkid.bi_hi = (uint16) (tid.rowid >> 32);
+  ctid->ip_blkid.bi_lo = (uint16) (tid.rowid >> 16);
+  ctid->ip_posid = tid.rowid;
+  *tts_rangeid = ((uint32)(tid.rowid >> 32) & 0xFFFF0000) | (uint32)tid.rangeid;
+  return;
+}
+
+static inline List *SortMagmaFilesByRangeId(List *files, int32_t length) {
+  List *sortedFiles = list_copy(files);
+  ListCell *cell;
+  blocklocation_file *blf;
+  uint16_t rangeId;
+  for (int i = 0; i < length; i++) {
+    cell = list_nth_cell(files, i);
+    blf = (blocklocation_file *)(cell->data.ptr_value);
+    Assert(blf->block_num > 0 && blf->locations);
+    rangeId = blf->locations[0].rangeId;
+    list_nth_replace(sortedFiles, rangeId, blf);
+  }
+  list_free(files);
+  return sortedFiles;
+}
+
+/*
+ * Get magma node status
+ */
+Datum magma_getstatus(PG_FUNCTION_ARGS) {
+  elog(DEBUG1, "magma_getstatus begin");
+  ExtProtocolMagmaInfo magmadata =
+      palloc0(sizeof(ExtProtocolMagmaStatusData));
+  if (magmadata == NULL) {
+    elog(ERROR, "magma_getstatus: failed to allocate new space");
+  }
+  magmadata->type = T_ExtProtocolMagmaStatusData;
+  fcinfo->resultinfo = magmadata;
+
+  MagmaClientC *client = create_magma_client_instance();
+  if (client == NULL) {
+    elog(ERROR, "magma_getstatus failed to connect to magma service");
+  }
+  magmadata->magmaNodes = MagmaClientC_GetMagmaStatus(client, &(magmadata->size));
+  magma_check_result(&client);
+
+  elog(DEBUG1, "magma_getstatus end");
+  PG_RETURN_VOID();
+}
+
+/*
+ * Implementation of blocklocation for magma protocol in pluggable storage
+ */
+Datum magma_protocol_blocklocation(PG_FUNCTION_ARGS) {
+  elog(DEBUG3, "magma_protocol_blocklocation begin");
+  /*
+   * Step 1. prepare instances
+   */
+  /* Build the result instance and basic properties */
+  ExtProtocolBlockLocation bldata =
+      palloc0(sizeof(ExtProtocolBlockLocationData));
+
+  if (bldata == NULL) {
+    elog(ERROR,
+         "magma_protocol_blocklocation: failed to allocate new space");
+  }
+  bldata->type = T_ExtProtocolBlockLocationData;
+  fcinfo->resultinfo = bldata;
+
+  /* Build validator data */
+  ExtProtocolValidator pvalidator_data =
+      (ExtProtocolValidator)(fcinfo->context);
+  List *fmt_opts = pvalidator_data->format_opts;
+  char *dbname = pvalidator_data->dbname;
+  char *schemaname = pvalidator_data->schemaname;
+  char *tablename = pvalidator_data->tablename;
+  bool useClientCacheDirectly = pvalidator_data->useClientCacheDirectly;
+
+  MagmaSnapshot *snapshot = &(pvalidator_data->snapshot);
+
+  char *format_str = pstrdup((char *)strVal(linitial(fmt_opts)));
+  /*
+   * Step 2. get table schema and range distribution
+   */
+  char *fmt_name = NULL;
+  List *l = magma_parse_format_string(format_str, &fmt_name);
+  pfree(format_str);
+
+  MagmaClientC *client = create_magma_client_instance();
+  if (client == NULL) {
+    elog(ERROR, "failed to connect to magma service");
+  }
+
+  int16_t tableType = 0;
+  if (pg_strncasecmp(
+      fmt_name, MAGMA_STORAGE_TYPE_TP, MAGMA_STORAGE_TYPE_TP_LEN) == 0) {
+    tableType = MAGMACLIENTC_TABLETYPE_TP;
+  } else if (pg_strncasecmp(fmt_name, MAGMA_STORAGE_TYPE_AP,
+                            MAGMA_STORAGE_TYPE_AP_LEN) == 0) {
+    tableType = MAGMACLIENTC_TABLETYPE_AP;
+  } else {
+    elog(ERROR,
+         "magma_get_blocklocation: failed to recognize table format type: [%s]",
+         fmt_name);
+  }
+  MagmaClientC_SetupTableInfo(client, dbname, schemaname, tablename, tableType);
+  MagmaTablePtr table = MagmaClientC_FetchTable(client, snapshot, useClientCacheDirectly);
+  magma_check_result(&client);
+
+  elog(LOG, "magma_protocol_blocklocation pass fetch table");
+
+  /*
+   * Step 3. map ranges to block locations
+   */
+  bldata->serializeSchemaLen = MagmaClientC_MTGetSerializeSchemaLen(table);
+  bldata->serializeSchema = palloc0(bldata->serializeSchemaLen);
+  memcpy(bldata->serializeSchema, MagmaClientC_MTGetSerializeSchema(table),
+         bldata->serializeSchemaLen);
+
+  bldata->files = NIL;
+  blocklocation_file *blf = NULL;
+
+  // build block location files which reference cached range location
+  MagmaRangeDistPtr rangeDist = MagmaClientC_FetchRangeDist(client);
+  magma_check_result(&client);
+
+  uint32_t rgNum = MagmaClientC_RDGetNumOfRgs(rangeDist);
+  int32_t totalGroupNum = 0;
+  elog(DEBUG3, "rg num %d", rgNum);
+  for ( int rgIndex = 0 ; rgIndex < rgNum ; ++rgIndex ) {
+    uint32_t rangeNum = MagmaClientC_RDGetNumOfRangesByRg(rangeDist, rgIndex);
+    elog(DEBUG3, "rangeNum num %d", rangeNum);
+    for ( int rangeIndex = 0 ; rangeIndex < rangeNum ; ++rangeIndex ) {
+      // create block location file instance
+      blocklocation_file *blf = palloc0(sizeof(blocklocation_file));
+      blf->block_num = 1;
+      blf->file_uri = NULL;  // not used field, set NULL to make it tidy
+      blf->locations = palloc0(sizeof(BlockLocation));
+      BlockLocation *bl = &(blf->locations[0]);
+      MagmaRangePtr rangePtr = MagmaClientC_RDGetRangeByRg(rangeDist,
+                                                           rgIndex,
+                                                           rangeIndex);
+      bl->replicaGroupId = MagmaClientC_RangeGetLeaderRgId(rangePtr);
+      bl->rangeId = MagmaClientC_RangeGetRangeId(rangePtr);
+      bl->length = 1;      // always one range as one block
+      bl->offset = 0;      // no offet
+      bl->corrupt = 0;     // no corrupt setting
+      bl->numOfNodes = 1;  // we save leader node only
+      bl->hosts = palloc0(sizeof(char *) * bl->numOfNodes);
+      bl->names = palloc0(sizeof(char *) * bl->numOfNodes);
+      bl->topologyPaths = palloc0(sizeof(char *) * bl->numOfNodes);
+      bl->hosts[0] = search_hostname_by_ipaddr(
+          MagmaClientC_RangeGetLeaderRgAddress(rangePtr));
+      bl->names[0] = pstrdup(MagmaClientC_RangeGetLeaderRgFullAddress(rangePtr));
+      bl->topologyPaths[0] = bl->names[0];
+
+      // connect block location file instance to the list
+      bldata->files = lappend(bldata->files, (void *)blf);
+      totalGroupNum++;
+    }
+  }
+
+  bldata->files = SortMagmaFilesByRangeId(bldata->files, totalGroupNum);
+
+  /*
+   * 4. return range locations
+   */
+  elog(DEBUG3, "magma_protocol_blocklocation pass");
+
+  PG_RETURN_VOID();
+}
+
+/*
+ * Implementation of tablesize caculation for magma protocol in pluggable storage
+ */
+Datum magma_protocol_tablesize(PG_FUNCTION_ARGS) {
+  elog(DEBUG3, "magma_protocol_tablesize begin");
+  /*
+   * Step 1. prepare instances
+   */
+  /* Build the result instance and basic properties */
+  ExtProtocolTableSize tsdata =
+      palloc0(sizeof(ExtProtocolTableSizeData));
+
+  if (tsdata == NULL) {
+    elog(ERROR,
+         "magma_protocol_blocklocation: failed to allocate new space");
+  }
+  tsdata->type = T_ExtProtocolTableSizeData;
+  fcinfo->resultinfo = tsdata;
+
+  /* Build validator data */
+  ExtProtocolValidator pvalidator_data =
+      (ExtProtocolValidator)(fcinfo->context);
+  List *fmt_opts = pvalidator_data->format_opts;
+  char *dbname = pvalidator_data->dbname;
+  char *schemaname = pvalidator_data->schemaname;
+  char *tablename = pvalidator_data->tablename;
+
+  MagmaSnapshot *snapshot = &(pvalidator_data->snapshot);
+
+  char *format_str = pstrdup((char *)strVal(linitial(fmt_opts)));
+  /*
+   * Step 2. get table size
+   */
+  char *fmt_name = NULL;
+  List *l = magma_parse_format_string(format_str, &fmt_name);
+  pfree(format_str);
+
+  MagmaClientC *client = create_magma_client_instance();
+  if (client == NULL) {
+    elog(ERROR, "failed to connect to magma service");
+  }
+
+  int16_t tableType = 0;
+  if (pg_strncasecmp(
+      fmt_name, MAGMA_STORAGE_TYPE_TP, MAGMA_STORAGE_TYPE_TP_LEN) == 0) {
+    tableType = MAGMACLIENTC_TABLETYPE_TP;
+  } else if (pg_strncasecmp(fmt_name, MAGMA_STORAGE_TYPE_AP,
+                            MAGMA_STORAGE_TYPE_AP_LEN) == 0) {
+    tableType = MAGMACLIENTC_TABLETYPE_AP;
+  } else {
+    elog(ERROR,
+         "magma_get_tablesize: failed to recognize table format type: [%s]",
+         fmt_name);
+  }
+  MagmaClientC_SetupTableInfo(client, dbname, schemaname, tablename, tableType);
+
+  // set size of table in tp type to zero.
+  if (tableType == MAGMACLIENTC_TABLETYPE_AP) {
+    tsdata->tablesize = MagmaClientC_GetTableSize(client, snapshot);
+  } else {
+    tsdata->tablesize = 0;
+  }
+
+  elog(LOG,"table size in magma.c is %llu", tsdata->tablesize);
+  magma_check_result(&client);
+
+  elog(LOG, "magma_protocol_tablesize psss get tablesize.");
+
+  elog(DEBUG3, "magma_protocol_tablesize pass");
+
+  PG_RETURN_VOID();
+}
+
+/*
+ * Implementation of database calculation for magma protocol in pluggable storage
+ */
+
+Datum magma_protocol_databasesize(PG_FUNCTION_ARGS) {
+  elog(DEBUG3, "magma_protocol_databasesize begin");
+  /*
+   * Step 1. prepare instances
+   */
+  /* Build the result instance and basic properties */
+  ExtProtocolDatabaseSize dbsdata =
+      palloc0(sizeof(ExtProtocolDatabaseSizeData));
+
+  if (dbsdata == NULL) {
+    elog(ERROR,
+         "magma_protocol_databasesize: failed to allocate new space");
+  }
+  dbsdata->type = T_ExtProtocolDatabaseSizeData;
+  fcinfo->resultinfo = dbsdata;
+
+  /* Build validator data */
+  ExtProtocolValidator pvalidator_data =
+      (ExtProtocolValidator)(fcinfo->context);
+  char *dbname = pvalidator_data->dbname;
+
+  MagmaSnapshot *snapshot = &(pvalidator_data->snapshot);
+
+  /*
+   * Step 2. get database size
+   */
+
+  MagmaClientC *client = create_magma_client_instance();
+  if (client == NULL) {
+    elog(ERROR, "failed to connect to magma service");
+  }
+
+  MagmaClientC_SetupDatabaseInfo(client, dbname);
+  dbsdata->dbsize = MagmaClientC_GetDatabaseSize(client, snapshot);
+  elog(LOG,"dbsize in magma.c is %llu", dbsdata->dbsize);
+  magma_check_result(&client);
+
+  elog(LOG, "magma_protocol_databasesize psss get databasesize.");
+
+  elog(DEBUG3, "magma_protocol_tablesize pass");
+
+  PG_RETURN_VOID();
+}
+
+/*
+ * Implementation of validators for magma protocol in pluggable storage
+ */
+
+Datum magma_protocol_validate(PG_FUNCTION_ARGS) {
+  elog(DEBUG3, "magma_protocol_validate begin");
+
+  /* Check action to be performed */
+  ExtProtocolValidatorData *pvalidator_data =
+      (ExtProtocolValidatorData *)(fcinfo->context);
+  /* Validate formatter options, url, and create directory in magma */
+
+  List *locs = pvalidator_data->url_list;
+
+  ListCell *cell;
+  foreach (cell, locs) {
+    char *url = (char *)strVal(lfirst(cell));
+    Uri *uri = ParseExternalTableUri(url);
+    if (uri == NULL) {
+      elog(ERROR,
+           "magma_protocol_validate :"
+           "invalid URI encountered %s",
+           url);
+    }
+    if (uri->protocol != URI_MAGMA) {
+      elog(ERROR,
+           "magma_protocol_validate :"
+           "invalid URI protocol encountered in %s, "
+           "magma:// protocol is required",
+           url);
+    }
+    FreeExternalTableUri(uri);
+  }
+
+  elog(DEBUG3, "magma_protocol_validate pass");
+
+  PG_RETURN_VOID();
+}
+
+/*
+ * Implementation of validators for magma format in pluggable storage
+ */
+
+/*
+ * void
+ * magma_validate_interfaces(char *formatName)
+ */
+Datum magma_validate_interfaces(PG_FUNCTION_ARGS) {
+  PlugStorageValidator psv_interface = (PlugStorageValidator)(fcinfo->context);
+
+  if (pg_strncasecmp(psv_interface->format_name, "magma",
+                     sizeof("magma") - 1) != 0) {
+    ereport(ERROR,
+            (errcode(ERRCODE_SYNTAX_ERROR),
+             errmsg("magma_validate_interfaces : incorrect format name \'%s\'",
+                    psv_interface->format_name)));
+  }
+
+  PG_RETURN_VOID();
+}
+
+/*
+ * void
+ * magma_validate_options(List *formatOptions,
+ *                     char *formatStr,
+ *                     bool isWritable)
+ */
+Datum magma_validate_options(PG_FUNCTION_ARGS) {
+  PlugStorageValidator psv = (PlugStorageValidator)(fcinfo->context);
+
+  List *format_opts = psv->format_opts;
+  char *format_str = psv->format_str;
+  // bool is_writable  = psv->is_writable;
+
+  char *formatter = NULL;
+  char *category = NULL;
+  // char *bucketnum = NULL;
+
+  ListCell *opt;
+
+  const int maxlen = 8 * 1024 - 1;
+  int len = 0;
+
+  foreach (opt, format_opts) {
+    DefElem *defel = (DefElem *)lfirst(opt);
+    char *key = defel->defname;
+    bool need_free_value = false;
+    char *val = (char *)defGetString(defel, &need_free_value);
+
+    /* check formatter */
+    if (strncasecmp(key, "formatter", strlen("formatter")) == 0) {
+      char *formatter_values[] = {"magmaap", "magmatp"};
+      checkPlugStorageFormatOption(&formatter, key, val, true, 2,
+                                   formatter_values);
+    }
+
+    /* check category */
+    if (strncasecmp(key, "category", strlen("category")) == 0) {
+      char *category_values[] = {"internal", "external"};
+      checkPlugStorageFormatOption(&category, key, val, true, 2,
+                                   category_values);
+    }
+
+    if (strncasecmp(key, "bucketnum", strlen("bucketnum")) == 0) {
+      ereport(ERROR,
+             (errcode(ERRCODE_SYNTAX_ERROR),
+              errmsg("bucketnum of magmatp/magmaap table are not supported by "
+                     "user defined yet"),
+              errOmitLocation(true)));
+    }
+
+    if (strncasecmp(key, "formatter", strlen("formatter")) &&
+        strncasecmp(key, "category", strlen("category")) &&
+        strncasecmp(key, "bucketnum", strlen("bucketnum"))) {
+      ereport(ERROR,
+              (errcode(ERRCODE_SYNTAX_ERROR),
+               errmsg("format options for magma table must be formatter"),
+               errOmitLocation(true)));
+    }
+
+    sprintf((char *)format_str + len, "%s '%s' ", key, val);
+    len += strlen(key) + strlen(val) + 4;
+
+    if (need_free_value) {
+      pfree(val);
+      val = NULL;
+    }
+
+    AssertImply(need_free_value, NULL == val);
+
+    if (len > maxlen) {
+      ereport(
+          ERROR,
+          (errcode(ERRCODE_SYNTAX_ERROR),
+           errmsg("format options must be less than %d bytes in size", maxlen),
+           errOmitLocation(true)));
+    }
+  }
+
+  if (!formatter) {
+    ereport(ERROR,
+            (errcode(ERRCODE_SYNTAX_ERROR),
+             errmsg("no formatter function specified"), errOmitLocation(true)));
+  }
+
+  PG_RETURN_VOID();
+}
+
+/*
+ * void
+ * magma_validate_encodings(char *encodingName)
+ */
+Datum magma_validate_encodings(PG_FUNCTION_ARGS) {
+  PlugStorageValidator psv = (PlugStorageValidator)(fcinfo->context);
+  char *encoding_name = psv->encoding_name;
+
+  if (strncasecmp(encoding_name, "SQL_ASCII", strlen("SQL_ASCII"))) {
+    ereport(
+        ERROR,
+        (errcode(ERRCODE_SYNTAX_ERROR),
+         errmsg("\"%s\" is not a valid encoding for external table with magma. "
+                "Encoding for external table with magma must be SQL_ASCII.",
+                encoding_name),
+         errOmitLocation(true)));
+  }
+
+  PG_RETURN_VOID();
+}
+
+/*
+ * void
+ * magma_validate_datatypes(TupleDesc tupDesc)
+ */
+Datum magma_validate_datatypes(PG_FUNCTION_ARGS) {
+  PlugStorageValidator psv = (PlugStorageValidator)(fcinfo->context);
+  TupleDesc tup_desc = psv->tuple_desc;
+
+  for (int i = 0; i < tup_desc->natts; ++i) {
+    int32_t datatype =
+        (int32_t)(((Form_pg_attribute)(tup_desc->attrs[i]))->atttypid);
+
+    if (checkUnsupportedDataTypeMagma(datatype)) {
+      ereport(
+          ERROR,
+          (errcode(ERRCODE_SYNTAX_ERROR),
+           errmsg("unsupported data types %s for columns of table with magma "
+                  "format is specified.",
+                  TypeNameToString(makeTypeNameFromOid(datatype, -1))),
+           errOmitLocation(true)));
+    }
+
+    // for numeric, it must set precisions when create table
+    if (HAWQ_TYPE_NUMERIC == datatype)
+    {
+      // get type modifier
+      int4 tmp_typmod =
+        ((Form_pg_attribute) (tup_desc->attrs[i]))->atttypmod - VARHDRSZ;
+
+      // get precision and scale values
+      int precision = (tmp_typmod >> 16) & 0xffff;
+      int scale = tmp_typmod & 0xffff;
+      if (precision < 1 || 38 < precision){
+        ereport(ERROR,
+            (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+             errmsg("For Magma Format, DECIMAL precision must be between 1 and 38")));
+      }
+      if (scale == 0){
+        ereport(NOTICE,
+            (errmsg("Using a scale of zero for DECIMAL in Magma Format")));
+      }
+    }
+  }
+
+  PG_RETURN_VOID();
+}
+
+Datum magma_createindex(PG_FUNCTION_ARGS) {
+  PlugStorage ps = (PlugStorage)(fcinfo->context);
+  char *dbname = ps->ps_db_name;
+  char *schemaname = ps->ps_schema_name;
+  char *tablename = ps->ps_table_name;
+  MagmaIndex *magmaidx = &(ps->magma_idx);
+  MagmaSnapshot *snapshot = &(ps->ps_snapshot);
+
+  elog(DEBUG1, "create index use index name:%s, index type:%s,"
+      " columns counts:%d, unique:%d, primary:%d",
+      magmaidx->indexName, magmaidx->indexType, magmaidx->colCount,
+      magmaidx->unique, magmaidx->primary);
+
+  /* create index in magma */
+  MagmaClientC *client = create_magma_client_instance();
+  if (client == NULL) {
+    elog(ERROR, "failed to create to magma service when create index.");
+  }
+
+  int16_t tableType = 0;
+  MagmaClientC_SetupTableInfo(client, dbname, schemaname, tablename, tableType);
+  MagmaClientC_CreateIndex(client, snapshot, magmaidx);
+  magma_check_result(&client);
+  PG_RETURN_VOID();
+}
+
+Datum magma_dropindex(PG_FUNCTION_ARGS) {
+  PlugStorage ps = (PlugStorage)(fcinfo->context);
+  char *dbname = ps->ps_db_name;
+  char *schemaname = ps->ps_schema_name;
+  char *tablename = ps->ps_table_name;
+  char *indexname = ps->magma_idx.indexName;
+  MagmaSnapshot *snapshot = &(ps->ps_snapshot);
+
+  elog(DEBUG1, "drop index use index name:%s,", indexname);
+
+  /* drop index in magma */
+  MagmaClientC *client = create_magma_client_instance();
+  if (client == NULL) {
+    elog(ERROR, "failed to create to magma service when drop index.");
+  }
+
+  int16_t tableType = 0;
+  MagmaClientC_SetupTableInfo(client, dbname, schemaname, tablename, tableType);
+  MagmaClientC_DropIndex(client, snapshot, indexname);
+  magma_check_result(&client);
+  PG_RETURN_VOID();
+}
+
+Datum magma_reindex_index(PG_FUNCTION_ARGS) {
+  PlugStorage ps = (PlugStorage)(fcinfo->context);
+  char *dbname = ps->ps_db_name;
+  char *schemaname = ps->ps_schema_name;
+  char *tablename = ps->ps_table_name;
+  char *indexname = ps->magma_idx.indexName;
+  MagmaSnapshot *snapshot = &(ps->ps_snapshot);
+
+  elog(DEBUG1, "reindex index use index name:%s,", indexname);
+
+  /* reindex index in magma */
+  MagmaClientC *client = create_magma_client_instance();
+  if (client == NULL) {
+    elog(ERROR, "failed to create to magma service when reindex index.");
+  }
+
+  int16_t tableType = 0;
+  MagmaClientC_SetupTableInfo(client, dbname, schemaname, tablename, tableType);
+  MagmaClientC_Reindex(client, snapshot, indexname);
+  magma_check_result(&client);
+  PG_RETURN_VOID();
+}
+
+/*
+ * Implementations of accessors for magma format in pluggable storage
+ */
+
+/*
+ * void
+ * magma_createtable(char *dbname,
+ *                char *schemaname,
+ *                char *tablename,
+ *                List *tableelements,
+ *                IndexStmt *primarykey)
+ */
+Datum magma_createtable(PG_FUNCTION_ARGS) {
+  PlugStorage ps = (PlugStorage)(fcinfo->context);
+
+  char *dbname = ps->ps_db_name;
+  char *schemaname = ps->ps_schema_name;
+  char *tablename = ps->ps_table_name;
+  char *fmtName = ps->ps_formatter_name;
+  MagmaSnapshot *snapshot = &(ps->ps_snapshot);
+
+
+  List *tableelements = ps->ps_table_elements;
+  IndexStmt *primarykey = ps->ps_primary_key;
+  List *distributedkey = ps->ps_distributed_key;
+  // bool isexternal = ps->ps_is_external;
+  // List *locations = ps->ps_ext_locations;
+
+  /* get primary key */
+  List *pk_names = NIL;
+  // process 1 or multi primary keys.
+  if (primarykey != NULL) {
+    ListCell *lc;
+    foreach (lc, primarykey->indexParams) {
+      IndexElem *idx = (IndexElem *)lfirst(lc);
+      Assert(IsA(idx, IndexElem));
+
+      pk_names = lappend(pk_names, makeString(idx->name));
+    }
+  }
+  /* count number of keys and values of table */
+  MagmaColumn *cols = NULL;
+  int ncols = 0;
+
+  int nkeys = primarykey == NULL ? 0 : list_length(primarykey->indexParams);
+
+  Assert(nkeys == list_length(pk_names));
+
+  /* prepare keys and values for table creation */
+  cols =
+      (MagmaColumn *)palloc0(sizeof(MagmaColumn) * list_length(tableelements));
+  ListCell *element;
+
+  int16_t tableType = 0;
+  if (pg_strncasecmp(fmtName, "magmatp", strlen("magmatp")) == 0) {
+    tableType = 0;
+  } else if (pg_strncasecmp(fmtName, "magmaap", strlen("magmaap")) == 0) {
+    tableType = 1;
+  } else {
+    elog(ERROR, "magma_createtable: failed to get table format type: [%s]",
+         fmtName);
+  }
+
+  foreach (element, tableelements) {
+    ColumnDef *col = (ColumnDef *)(lfirst(element));
+    MagmaColumn *dcol = NULL;
+    int pkpos = list_find(pk_names, makeString(col->colname));
+    int dkpos = list_find(distributedkey, makeString(col->colname));
+    dcol = &(cols[ncols]);
+    // TODO(xsheng): get default value from col->raw_default
+    dcol->defaultValue = "";
+    dcol->dropped = false;
+    dcol->primaryKeyIndex = pkpos;
+    dcol->distKeyIndex = dkpos;
+    // TODO(xsheng): leave unimplemented sort key index, add it later
+    dcol->sortKeyIndex = -1;
+    dcol->id = ncols;
+    dcol->name = pstrdup(col->colname);
+    dcol->datatype = LookupTypeName(NULL, col->typname);
+    dcol->rawTypeMod = col->typname->typmod;
+    Oid tmpOidVal = dcol->datatype;
+    dcol->datatype = map_hawq_type_to_magma_type(dcol->datatype, !((bool)tableType));
+    switch (dcol->datatype) {
+      case BOOLEANID:
+      case TINYINTID:
+      case SMALLINTID:
+      case INTID:
+      case BIGINTID:
+      case FLOATID:
+      case DOUBLEID:
+      case TIMESTAMPID:
+      case DATEID:
+      case TIMEID: {
+        dcol->scale1 = 0;
+        dcol->scale2 = 0;
+        dcol->isnull = false;
+      } break;
+
+      case JSONBID:
+      case JSONID:
+      case BINARYID:
+      case CHARID:
+      case VARCHARID:
+      case STRINGID:
+      case DECIMALID:
+      case DECIMALNEWID: {
+        dcol->scale1 = col->typname->typmod - VARHDRSZ;
+        dcol->scale2 = 0;
+        dcol->isnull = false;
+      } break;
+
+      case STRUCTEXID:
+      case IOBASETYPEID: {
+        dcol->scale1 = col->typname->typmod - VARHDRSZ;
+        dcol->scale2 = tmpOidVal;  // original oid
+        dcol->isnull = false;
+      } break;
+
+      case INVALIDTYPEID: {
+        elog(ERROR, "data type %s is invalid", TypeNameToString(makeTypeNameFromOid(dcol->datatype, -1)));
+      } break;
+
+      default: {
+        elog(ERROR, "data type %s is not supported yet",
+             TypeNameToString(makeTypeNameFromOid(dcol->datatype, -1)));
+      } break;
+    }
+    ncols++;
+  }
+
+  assert(ncols == list_length(tableelements));
+  /* create table in magma */
+  MagmaClientC *client = create_magma_client_instance();
+  if (client == NULL) {
+    elog(ERROR, "failed to create to magma service");
+  }
+
+  MagmaClientC_SetupTableInfo(client, dbname, schemaname, tablename, tableType);
+  MagmaClientC_CreateTable(client, snapshot, ncols, cols);
+  magma_check_result(&client);
+  pfree(cols);
+  list_free(pk_names);
+  PG_RETURN_VOID();
+}
+
+/*
+ * void
+ * magma_droptable(char *dbname,
+ *              char *schemaname,
+ *              char *tablename)
+ */
+Datum magma_droptable(PG_FUNCTION_ARGS) {
+  PlugStorage ps = (PlugStorage)(fcinfo->context);
+
+  // ExtTableEntry *ete = ps->ps_exttable;
+  char *dbname = ps->ps_db_name;
+  char *schemaname = ps->ps_schema_name;
+  char *tablename = ps->ps_table_name;
+  MagmaSnapshot *snapshot = &(ps->ps_snapshot);
+
+
+  /* drop table in magma */
+  MagmaClientC *client = create_magma_client_instance();
+  if (client == NULL) {
+    elog(ERROR, "failed to connect to magma service");
+  }
+  int16_t tableType = 0;
+  // for drop table, tableType won't be used in the process, set it as default
+  MagmaClientC_SetupTableInfo(client, dbname, schemaname, tablename, tableType);
+  MagmaClientC_DropTable(client, snapshot);
+  magma_check_result(&client);
+
+  PG_RETURN_VOID();
+}
+
+Datum magma_beginscan(PG_FUNCTION_ARGS) {
+  PlugStorage ps = (PlugStorage)(fcinfo->context);
+  ExternalScan *ext_scan = ps->ps_ext_scan;
+  ScanState *scan_state = ps->ps_scan_state;
+  Relation relation = ps->ps_relation;
+  int formatterType = ps->ps_formatter_type;
+  char *formatterName = ps->ps_formatter_name;
+  char *serializeSchema = ps->ps_magma_serializeSchema;
+  int serializeSchemaLen = ps->ps_magma_serializeSchemaLen;
+  MagmaSnapshot *snapshot = &(ps->ps_snapshot);
+
+  Index scan_rel_id = ext_scan->scan.scanrelid;
+  uint32 scan_counter = ext_scan->scancounter;
+  List *uri_list = ext_scan->uriList;
+  List *fmt_opts = ext_scan->fmtOpts;
+  int fmt_encoding = ext_scan->encoding;
+  Plan *scan_plan = &(ext_scan->scan.plan);
+
+  /* Increment relation reference count while scanning relation */
+  /*
+   * This is just to make really sure the relcache entry won't go away while
+   * the scan has a pointer to it.  Caller should be holding the rel open
+   * anyway, so this is redundant in all normal scenarios...
+   */
+  RelationIncrementReferenceCount(relation);
+
+  /* Allocate and initialize the select descriptor */
+  FileScanDesc file_scan_desc = palloc0(sizeof(FileScanDescData));
+  file_scan_desc->fs_inited = false;
+  file_scan_desc->fs_ctup.t_data = NULL;
+  ItemPointerSetInvalid(&file_scan_desc->fs_ctup.t_self);
+  file_scan_desc->fs_cbuf = InvalidBuffer;
+  file_scan_desc->fs_rd = relation;
+  file_scan_desc->fs_scanrelid = scan_rel_id;
+  file_scan_desc->fs_scancounter = scan_counter;
+  file_scan_desc->fs_scanquals = scan_plan->qual;
+  file_scan_desc->fs_noop = false;
+  file_scan_desc->fs_file = NULL;
+  file_scan_desc->fs_formatter = NULL;
+  file_scan_desc->fs_formatter_type = formatterType;
+  file_scan_desc->fs_formatter_name = formatterName;
+  file_scan_desc->fs_serializeSchema =
+      pnstrdup(serializeSchema, serializeSchemaLen);
+  file_scan_desc->fs_serializeSchemaLen = serializeSchemaLen;
+
+  /* Setup scan functions */
+  get_magma_scan_functions(formatterName, file_scan_desc);
+
+  /* Get URI for the scan */
+  /*
+   * get the external URI assigned to us.
+   *
+   * The URI assigned for this segment is normally in the uriList list
+   * at the index of this segment id. However, if we are executing on
+   * MASTER ONLY the (one and only) entry which is destined for the master
+   * will be at the first entry of the uriList list.
+   */
+  char *uri_str = NULL;
+  int segindex = GetQEIndex();
+
+  Value *v = NULL;
+
+  v = (Value *)list_nth(uri_list, 0);
+  uri_str = (char *)strVal(v);
+  if (v->type == T_Null)
+    uri_str = NULL;
+  else
+    uri_str = (char *)strVal(v);
+
+  /*
+   * If a uri is assigned to us - get a reference to it. Some executors
+   * don't have a uri to scan (if # of uri's < # of primary segdbs).
+   * in which case uri will be NULL. If that's the case for this
+   * segdb set to no-op.
+   */
+  if (uri_str) {
+    /* set external source (uri) */
+    file_scan_desc->fs_uri = uri_str;
+    elog(DEBUG3, "fs_uri (%d) is set as %s", segindex, uri_str);
+    /* NOTE: we delay actually opening the data source until external_getnext()
+     */
+  } else {
+    /* segdb has no work to do. set to no-op */
+    file_scan_desc->fs_noop = true;
+    file_scan_desc->fs_uri = NULL;
+  }
+
+  /* Allocate values and nulls structure */
+  TupleDesc tup_desc = RelationGetDescr(relation);
+  file_scan_desc->fs_tupDesc = tup_desc;
+  file_scan_desc->attr = tup_desc->attrs;
+  file_scan_desc->num_phys_attrs = tup_desc->natts;
+
+  file_scan_desc->values =
+      (Datum *)palloc0(file_scan_desc->num_phys_attrs * sizeof(Datum));
+  file_scan_desc->nulls =
+      (bool *)palloc0(file_scan_desc->num_phys_attrs * sizeof(bool));
+
+  /* Setup user data */
+  /* sliceId is no use in there, executor could ensure this */
+  /* currentSliceId == ps->ps_scan_state->ps.state->currentSliceIdInPlan */
+  if (AmISegment()) {
+    /* Initialize user data */
+    GlobalFormatUserData *user_data = palloc0(sizeof(GlobalFormatUserData));
+    if (formatterName != NULL &&
+        (strncasecmp(formatterName, "magmatp", sizeof("magmatp") - 1) == 0)) {
+      user_data->isMagmatp = true;
+    } else {
+      user_data->isMagmatp = false;
+    }
+
+    // special handling for magmatp decimal
+    if (user_data->isMagmatp) {
+      file_scan_desc->in_functions = (FmgrInfo *)palloc0(
+          file_scan_desc->num_phys_attrs * sizeof(FmgrInfo));
+      file_scan_desc->typioparams =
+          (Oid *)palloc0(file_scan_desc->num_phys_attrs * sizeof(Oid));
+      bool hasNumeric = false;
+      for (int i = 0; i < file_scan_desc->num_phys_attrs; ++i) {
+        if (file_scan_desc->attr[i]->atttypid != HAWQ_TYPE_NUMERIC) continue;
+        hasNumeric = true;
+        getTypeInputInfo(file_scan_desc->attr[i]->atttypid,
+                         &file_scan_desc->in_func_oid,
+                         &file_scan_desc->typioparams[i]);
+        fmgr_info(file_scan_desc->in_func_oid,
+                  &file_scan_desc->in_functions[i]);
+      }
+      /*
+       * magmatp table support numeric type with old decimal. Numeric related
+       * function will be called to read numeric column in magmatp table. To
+       * prevent OutOfMemory, InputFunctionCall() should be wrapped by pre_row_context.
+       * magmaap table support numeric type with new decimal. So it's unnecessary
+       * to add the MemoryContext.
+       */
+      if (hasNumeric) {
+        file_scan_desc->fs_pstate = (CopyStateData *)palloc0(sizeof(CopyStateData));
+        CopyState pstate = file_scan_desc->fs_pstate;
+        pstate->fe_eof = false;
+        pstate->eol_type = EOL_UNKNOWN;
+        pstate->eol_str = NULL;
+        pstate->cur_relname = RelationGetRelationName(relation);
+        pstate->cur_lineno = 0;
+        pstate->err_loc_type = ROWNUM_ORIGINAL;
+        pstate->cur_attname = NULL;
+        pstate->raw_buf_done = true; /* true so we will read data in first run */
+        pstate->line_done = true;
+        pstate->bytesread = 0;
+        pstate->custom = false;
+        pstate->header_line = false;
+        pstate->fill_missing = false;
+        pstate->line_buf_converted = false;
+        pstate->raw_buf_index = 0;
+        pstate->processed = 0;
+        pstate->filename = uri_str;
+        pstate->copy_dest = COPY_EXTERNAL_SOURCE;
+        pstate->missing_bytes = 0;
+        pstate->csv_mode = false;
+        pstate->custom = true;
+        pstate->custom_formatter_func = NULL;
+        pstate->custom_formatter_name = NULL;
+        pstate->custom_formatter_params = NIL;
+        pstate->rel = relation;
+        pstate->client_encoding = PG_UTF8;
+        pstate->enc_conversion_proc = NULL;
+        pstate->need_transcoding = false;
+        pstate->encoding_embeds_ascii =
+            PG_ENCODING_IS_CLIENT_ONLY(pstate->client_encoding);
+        pstate->attr_offsets = NULL;
+        pstate->attnumlist = NULL;
+        pstate->force_quote = NIL;
+        pstate->force_quote_flags = NULL;
+        pstate->force_notnull = NIL;
+        pstate->force_notnull_flags = NULL;
+        initStringInfo(&pstate->attribute_buf);
+        initStringInfo(&pstate->line_buf);
+        MemSet(pstate->raw_buf, ' ', RAW_BUF_SIZE * sizeof(char));
+        pstate->raw_buf[RAW_BUF_SIZE] = '\0';
+        /*
+         * Create a temporary memory context that we can reset once per row to
+         * recover palloc'd memory. This avoids any problems with leaks inside
+         * datatype input or output routines, and should be faster than retail
+         * pfree's anyway.
+         */
+        pstate->rowcontext = AllocSetContextCreate(
+            CurrentMemoryContext, "ExtTableMemCxt", ALLOCSET_DEFAULT_MINSIZE,
+                ALLOCSET_DEFAULT_INITSIZE, ALLOCSET_DEFAULT_MAXSIZE);
+      }
+    } else {
+      file_scan_desc->in_functions = NULL;
+      file_scan_desc->typioparams = NULL;
+    }
+
+    /* the number of ranges is dynamic for magma table */
+    int32_t nRanges = 0;
+    ListCell *lc_split = NULL;
+    foreach (lc_split, ps->ps_magma_splits) {
+      List *split = (List *)lfirst(lc_split);
+      nRanges += list_length(split);
+    }
+
+    init_magma_format_user_data_for_read(tup_desc, user_data);
+
+    /* Create formatter instance */
+    // ExtTableEntry *ete = GetExtTableEntry(RelationGetRelid(relation));
+    user_data->fmt = create_magma_formatter_instance(
+        NIL, serializeSchema, serializeSchemaLen, PG_UTF8, formatterName,
+        nRanges);
+
+    /* Prepare database, schema, and table information */
+    char *dbname = database;
+    char *schemaname = getNamespaceNameByOid(RelationGetNamespace(relation));
+    Assert(schemaname != NULL);
+    char *tablename = RelationGetRelationName(relation);
+
+    MagmaFormatC_SetupTarget(user_data->fmt, dbname, schemaname, tablename);
+    MagmaFormatC_SetupTupDesc(user_data->fmt, user_data->numberOfColumns,
+                              user_data->colNames, user_data->colDatatypes,
+                              user_data->colDatatypeMods,
+                              user_data->colIsNulls);
+
+    /* Build tuple description */
+    Plan *plan = &(ext_scan->scan.plan);
+    file_scan_desc->fs_ps_plan = plan;
+    build_magma_tuple_descrition_for_read(plan, relation, user_data, ps->ps_magma_skip_tid);
+
+    /* prepare plan */
+    CommonPlanContext ctx;
+    init_common_plan_context(&ctx);
+    scan_plan->plan_parent_node_id = -1;
+    convert_extscan_to_common_plan(scan_plan, scan_state->splits, relation,
+                                   &ctx);
+    // elog(DEBUG1, "common plan: %s",
+    // univPlanGetJsonFormatedPlan(ctx.univplan));
+
+    int32_t size = 0;
+    char *planstr = univPlanSerialize(ctx.univplan, &size, false);
+    /* Save user data */
+    file_scan_desc->fs_ps_user_data = (void *)user_data;
+
+    /* Begin scan with the formatter */
+    bool enableShm = (strcasecmp(magma_enable_shm, "ON") == 0);
+    MagmaFormatBeginScanMagmaFormatC(
+        user_data->fmt, user_data->colToReads, snapshot, planstr, size,
+        enableShm, ps->ps_magma_skip_tid, magma_shm_limit_per_block * 1024);
+    MagmaFormatCatchedError *e =
+        MagmaFormatGetErrorMagmaFormatC(user_data->fmt);
+    if (e->errCode != ERRCODE_SUCCESSFUL_COMPLETION) {
+      elog(ERROR, "magma_scan: failed to scan: %s(%d)", e->errMessage,
+           e->errCode);
+    }
+
+    free_common_plan_context(&ctx);
+  }
+
+  /* Save file_scan_desc */
+  ps->ps_file_scan_desc = file_scan_desc;
+
+  PG_RETURN_POINTER(file_scan_desc);
+}
+
+void init_common_plan_context(CommonPlanContext *ctx) {
+  ctx->univplan = univPlanNewInstance();
+  ctx->convertible = true;
+  ctx->base.node = NULL;
+  ctx->querySelect = false;
+  ctx->isMagma = true;
+  ctx->stmt = NULL;
+  ctx->setDummyTListRef = false;
+  ctx->scanReadStatsOnly = false;
+  ctx->parent = NULL;
+  ctx->exprBufStack = NIL;
+}
+
+void free_common_plan_context(CommonPlanContext *ctx) {
+  univPlanFreeInstance(&ctx->univplan);
+}
+/*
+ * ExternalSelectDesc
+ * magma_getnext_init(PlanState *planState,
+ *                 ExternalScanState *extScanState)
+ */
+Datum magma_getnext_init(PG_FUNCTION_ARGS) {
+  PlugStorage ps = (PlugStorage)(fcinfo->context);
+  // PlanState *plan_state = ps->ps_plan_state;
+  // ExternalScanState *ext_scan_state = ps->ps_ext_scan_state;
+
+  ExternalSelectDesc ext_select_desc = NULL;
+  /*
+  ExternalSelectDesc ext_select_desc = (ExternalSelectDesc)palloc0(
+                  sizeof(ExternalSelectDescData));
+
+  Plan *rootPlan = NULL;
+
+  if (plan_state != NULL)
+  {
+          ext_select_desc->projInfo = plan_state->ps_ProjInfo;
+
+          // If we have an agg type then our parent is an Agg node
+          rootPlan = plan_state->state->es_plannedstmt->planTree;
+          if (IsA(rootPlan, Agg) && ext_scan_state->parent_agg_type)
+          {
+                  ext_select_desc->agg_type = ext_scan_state->parent_agg_type;
+          }
+  }
+  */
+
+  ps->ps_ext_select_desc = ext_select_desc;
+
+  PG_RETURN_POINTER(ext_select_desc);
+}
+
+Datum magma_getnext(PG_FUNCTION_ARGS) {
+  PlugStorage ps = (PlugStorage)(fcinfo->context);
+  FileScanDesc fsd = ps->ps_file_scan_desc;
+  GlobalFormatUserData *user_data =
+      (GlobalFormatUserData *)(fsd->fs_ps_user_data);
+  TupleTableSlot *slot = ps->ps_tuple_table_slot;
+  bool *nulls = slot_get_isnull(slot);
+  memset(nulls, true, user_data->numberOfColumns);
+
+  bool res = MagmaFormatNextMagmaFormatC(
+      user_data->fmt, user_data->colRawValues, user_data->colValLength, nulls,
+      &(user_data->colRawTid));
+  if (res) {
+    MemoryContext old_context = NULL;
+    if (user_data->isMagmatp && fsd->fs_pstate != NULL &&
+        fsd->fs_pstate->rowcontext != NULL) {
+      /* Free memory for previous tuple if necessary */
+      MemoryContextReset(fsd->fs_pstate->rowcontext);
+      old_context = MemoryContextSwitchTo(fsd->fs_pstate->rowcontext);
+    }
+
+    for (int32_t i = 0; i < user_data->numberOfColumns; ++i) {
+      // Column not to read or column is null
+      if (nulls[i]) continue;
+
+      switch (fsd->attr[i]->atttypid) {
+        case HAWQ_TYPE_BOOL: {
+          user_data->colValues[i] =
+              BoolGetDatum(*(bool *)(user_data->colRawValues[i]));
+          break;
+        }
+        case HAWQ_TYPE_INT2: {
+          user_data->colValues[i] =
+              Int16GetDatum(*(int16_t *)(user_data->colRawValues[i]));
+          break;
+        }
+        case HAWQ_TYPE_INT4: {
+          user_data->colValues[i] =
+              Int32GetDatum(*(int32_t *)(user_data->colRawValues[i]));
+          break;
+        }
+        case HAWQ_TYPE_INT8:
+        case HAWQ_TYPE_TIME:
+        case HAWQ_TYPE_TIMESTAMP:
+        case HAWQ_TYPE_TIMESTAMPTZ: {
+          user_data->colValues[i] =
+              Int64GetDatum(*(int64_t *)(user_data->colRawValues[i]));
+          break;
+        }
+        case HAWQ_TYPE_FLOAT4: {
+          user_data->colValues[i] =
+              Float4GetDatum(*(float *)(user_data->colRawValues[i]));
+          break;
+        }
+        case HAWQ_TYPE_FLOAT8: {
+          user_data->colValues[i] =
+              Float8GetDatum(*(double *)(user_data->colRawValues[i]));
+          break;
+        }
+        case HAWQ_TYPE_JSONB:
+        case HAWQ_TYPE_JSON:
+        case HAWQ_TYPE_VARCHAR:
+        case HAWQ_TYPE_TEXT:
+        case HAWQ_TYPE_BPCHAR:
+        case HAWQ_TYPE_BYTE: {
+          SET_VARSIZE((struct varlena *)(user_data->colRawValues[i]),
+                      user_data->colValLength[i]);
+          user_data->colValues[i] = PointerGetDatum(user_data->colRawValues[i]);
+          break;
+        }
+        case HAWQ_TYPE_NUMERIC: {
+          SET_VARSIZE((struct varlena *)(user_data->colRawValues[i]),
+                      user_data->colValLength[i]);
+          user_data->colValues[i] =
+              PointerGetDatum(user_data->colRawValues[i]);
+          break;
+        }
+        case HAWQ_TYPE_DATE: {
+          user_data->colValues[i] =
+              Int32GetDatum(*(int32_t *)(user_data->colRawValues[i]) -
+                            POSTGRES_EPOCH_JDATE + UNIX_EPOCH_JDATE);
+          break;
+        }
+        default: {
+          ereport(ERROR, (errmsg_internal("MAGMA:%d", fsd->attr[i]->atttypid)));
+          break;
+        }
+      }
+    }
+    if (user_data->isMagmatp && fsd->fs_pstate != NULL &&
+        fsd->fs_pstate->rowcontext != NULL) {
+      MemoryContextSwitchTo(old_context);
+    }
+
+    if (user_data->colRawTid != NULL) {
+      user_data->colTid = *(MagmaTidC *)(user_data->colRawTid);
+      ConvertTidToCtidAndRangeid(user_data->colTid,
+                                 &(slot->PRIVATE_tts_synthetic_ctid),
+                                 &(slot->tts_rangeid));
+    }
+
+    ps->ps_has_tuple = true;
+    slot->PRIVATE_tts_values = user_data->colValues;
+    TupSetVirtualTupleNValid(slot, user_data->numberOfColumns);
+    PG_RETURN_BOOL(true);
+  }
+
+  magma_clear(ps, true);
+
+  PG_RETURN_BOOL(false);
+}
+
+/*
+ * void
+ * magma_rescan(FileScanDesc scan)
+ */
+Datum magma_rescan(PG_FUNCTION_ARGS) {
+  PlugStorage ps = (PlugStorage)(fcinfo->context);
+  FileScanDesc fsd = ps->ps_file_scan_desc;
+  MagmaSnapshot *snapshot = &(ps->ps_snapshot);
+
+  GlobalFormatUserData *user_data =
+      (GlobalFormatUserData *)(fsd->fs_ps_user_data);
+
+  if (user_data == NULL) {
+    /* 1 Initialize user data */
+    user_data = palloc0(sizeof(GlobalFormatUserData));
+
+    if (fsd->fs_formatter_name != NULL &&
+        (strncasecmp(fsd->fs_formatter_name, "magmatp",
+                     sizeof("magmatp") - 1) == 0)) {
+      user_data->isMagmatp = true;
+    } else {
+      user_data->isMagmatp = false;
+    }
+
+    init_magma_format_user_data_for_read(fsd->fs_tupDesc, user_data);
+
+    Relation rel = fsd->fs_rd;
+    ExtTableEntry *ete = GetExtTableEntry(RelationGetRelid(rel));
+
+    int formatterType = ExternalTableType_Invalid;
+
+    char *formatterName = NULL;
+    getExternalTableTypeStr(ete->fmtcode, ete->fmtopts, &formatterType,
+                            &formatterName);
+
+    bool isexternal = false;
+    char *serializeSchema = fsd->fs_serializeSchema;
+    int serializeSchemaLen = fsd->fs_serializeSchemaLen;
+    get_magma_category_info(ete->fmtopts, &isexternal);
+
+    user_data->fmt = create_magma_formatter_instance(
+        NIL, serializeSchema, serializeSchemaLen, PG_UTF8, formatterName, 0);
+
+    /* 4 Build tuple description */
+    Plan *plan = fsd->fs_ps_plan;
+    build_magma_tuple_descrition_for_read(plan, fsd->fs_rd, user_data, ps->ps_magma_skip_tid);
+
+    /* 4.1 Build plan */
+    if (AmISegment() &&
+        currentSliceId == ps->ps_scan_state->ps.state->currentSliceIdInPlan) {
+      CommonPlanContext ctx;
+      init_common_plan_context(&ctx);
+      plan->plan_parent_node_id = -1;
+      convert_extscan_to_common_plan(plan, fsd->fs_ps_scan_state->splits,
+                                     fsd->fs_rd, &ctx);
+      int32_t size = 0;
+      char *planstr = univPlanSerialize(ctx.univplan, &size, false);
+
+      /* 5 Save user data */
+      fsd->fs_ps_user_data = (void *)user_data;
+
+      /* 6 Begin scan with the formatter */
+      if (currentSliceId == ps->ps_scan_state->ps.state->currentSliceIdInPlan) {
+        bool enableShm = (strcasecmp(magma_enable_shm, "ON") == 0);
+        MagmaFormatBeginScanMagmaFormatC(user_data->fmt, user_data->colToReads,
+                                         snapshot, planstr, size,
+                                         enableShm, ps->ps_magma_skip_tid,
+                                         magma_shm_limit_per_block * 1024);
+        MagmaFormatCatchedError *e =
+            MagmaFormatGetErrorMagmaFormatC(user_data->fmt);
+
+        if (e->errCode != ERRCODE_SUCCESSFUL_COMPLETION) {
+          elog(ERROR, "magma_scan: failed to beginscan: %s(%d)", e->errMessage,
+               e->errCode);
+        }
+      }
+
+      free_common_plan_context(&ctx);
+    }
+  }
+
+  PG_RETURN_VOID();
+}
+
+/*
+ * void
+ * magma_endscan(FileScanDesc scan)
+ */
+Datum magma_endscan(PG_FUNCTION_ARGS) {
+  PlugStorage ps = (PlugStorage)(fcinfo->context);
+  FileScanDesc fsd = ps->ps_file_scan_desc;
+
+  GlobalFormatUserData *user_data = (GlobalFormatUserData *)(fsd->fs_ps_user_data);
+
+  // free memory in endscan, for some subquery scenarios "getnext" might not be called
+  if (user_data != NULL) {
+    magma_clear(ps, false);
+  }
+
+  if (fsd->values) {
+    // decrement relation reference count and free scan descriptor storage
+    RelationDecrementReferenceCount(fsd->fs_rd);
+
+    pfree(fsd->values);
+    fsd->values = NULL;
+  }
+
+  if (fsd->nulls) {
+    pfree(fsd->nulls);
+    fsd->nulls = NULL;
+  }
+
+  // free formatter information
+  if (fsd->fs_formatter_name) {
+    pfree(fsd->fs_formatter_name);
+    fsd->fs_formatter_name = NULL;
+  }
+
+  if (fsd->in_functions) {
+    pfree(fsd->in_functions);
+    fsd->in_functions = NULL;
+  }
+
+  if (fsd->typioparams) {
+    pfree(fsd->typioparams);
+    fsd->typioparams = NULL;
+  }
+
+  if (fsd->fs_pstate != NULL && fsd->fs_pstate->rowcontext != NULL) {
+    /*
+     * delete the row context
+     */
+    MemoryContextDelete(fsd->fs_pstate->rowcontext);
+    fsd->fs_pstate->rowcontext = NULL;
+  }
+
+  /*
+   * free parse state memory
+   */
+  if (fsd->fs_pstate != NULL) {
+    if (fsd->fs_pstate->attribute_buf.data)
+      pfree(fsd->fs_pstate->attribute_buf.data);
+    if (fsd->fs_pstate->line_buf.data) pfree(fsd->fs_pstate->line_buf.data);
+
+    pfree(fsd->fs_pstate);
+    fsd->fs_pstate = NULL;
+  }
+
+  PG_RETURN_VOID();
+}
+
+/*
+ * void
+ * magma_stopscan(FileScanDesc scan)
+ */
+Datum magma_stopscan(PG_FUNCTION_ARGS) {
+  PlugStorage ps = (PlugStorage)(fcinfo->context);
+  FileScanDesc fsd = ps->ps_file_scan_desc;
+  GlobalFormatUserData *user_data =
+      (GlobalFormatUserData *)(fsd->fs_ps_user_data);
+  TupleTableSlot *tts = ps->ps_tuple_table_slot;
+
+  if (!user_data) PG_RETURN_VOID();
+
+  MagmaFormatStopScanMagmaFormatC(user_data->fmt);
+  MagmaFormatCatchedError *e = MagmaFormatGetErrorMagmaFormatC(user_data->fmt);
+  if (e->errCode == ERRCODE_SUCCESSFUL_COMPLETION) {
+    MagmaFormatEndScanMagmaFormatC(user_data->fmt);
+    e = MagmaFormatGetErrorMagmaFormatC(user_data->fmt);
+    if (e->errCode != ERRCODE_SUCCESSFUL_COMPLETION) {
+      elog(ERROR, "Magma: failed to finish scan: %s (%d)", e->errMessage,
+           e->errCode);
+    }
+
+    MagmaFormatFreeMagmaFormatC(&(user_data->fmt));
+
+    pfree(user_data->colRawValues);
+    pfree(user_data->colValues);
+    pfree(user_data->colToReads);
+    pfree(user_data->colValLength);
+    for (int i = 0; i < user_data->numberOfColumns; ++i)
+      pfree(user_data->colNames[i]);
+    pfree(user_data->colNames);
+    pfree(user_data->colDatatypes);
+    pfree(user_data->colDatatypeMods);
+    pfree(user_data->colIsNulls);
+    pfree(user_data);
+    fsd->fs_ps_user_data = NULL;
+
+    /* form empty tuple */
+    ps->ps_has_tuple = false;
+
+    tts->PRIVATE_tts_values = NULL;
+    tts->PRIVATE_tts_isnull = NULL;
+    ExecClearTuple(tts);
+  } else {
+    elog(ERROR, "magma_stopscan: failed to stop scan: %s(%d)", e->errMessage,
+         e->errCode);
+  }
+
+  PG_RETURN_VOID();
+}
+
+/* ExternalInsertDesc
+ * magma_begindelete(Relation relation)
+ */
+Datum magma_begindelete(PG_FUNCTION_ARGS) {
+  PlugStorage ps = (PlugStorage)(fcinfo->context);
+  Relation relation = ps->ps_relation;
+  char *serializeSchema = ps->ps_magma_serializeSchema;
+  int serializeSchemaLen = ps->ps_magma_serializeSchemaLen;
+  MagmaSnapshot *snapshot = &(ps->ps_snapshot);
+
+  /* 1. Allocate and initialize the delete descriptor */
+  ExternalInsertDesc edd = palloc0(sizeof(ExternalInsertDescData));
+
+  ExtTableEntry *ete = GetExtTableEntry(RelationGetRelid(relation));
+
+  int formatterType = ExternalTableType_Invalid;
+
+  char *formatterName = NULL;
+  getExternalTableTypeStr(ete->fmtcode, ete->fmtopts, &formatterType,
+                          &formatterName);
+
+  /* 1.1 Setup delete functions */
+  get_magma_delete_functions(formatterName, edd);
+
+  List *fmt_opts = NIL;
+  fmt_opts = lappend(fmt_opts, makeString(pstrdup(ete->fmtopts)));
+
+  /* 1.2 Allocate and initialize structure which track data parsing state */
+  edd->ext_pstate = (CopyStateData *)palloc0(sizeof(CopyStateData));
+  edd->ext_tupDesc = RelationGetDescr(relation);
+
+  /* 1.3 Initialize parse state */
+  /* 1.3.1 Initialize basic information for pstate */
+  CopyState pstate = edd->ext_pstate;
+
+  /* 1.3.2 Setup encoding information */
+  /*
+   * Set up encoding conversion info.  Even if the client and server
+   * encodings are the same, we must apply pg_client_to_server() to validate
+   * data in multibyte encodings.
+   *
+   * Each external table specifies the encoding of its external data. We will
+   * therefore set a client encoding and client-to-server conversion procedure
+   * in here (server-to-client in WET) and these will be used in the data
+   * conversion routines (in copy.c CopyReadLineXXX(), etc).
+   */
+  int fmt_encoding = ete->encoding;
+  Insist(PG_VALID_ENCODING(fmt_encoding));
+  pstate->client_encoding = fmt_encoding;
+  Oid conversion_proc =
+      FindDefaultConversionProc(GetDatabaseEncoding(), fmt_encoding);
+
+  if (OidIsValid(conversion_proc)) {
+    /* conversion proc found */
+    pstate->enc_conversion_proc = palloc0(sizeof(FmgrInfo));
+    fmgr_info(conversion_proc, pstate->enc_conversion_proc);
+  } else {
+    /* no conversion function (both encodings are probably the same) */
+    pstate->enc_conversion_proc = NULL;
+  }
+
+  pstate->need_transcoding = pstate->client_encoding != GetDatabaseEncoding();
+  pstate->encoding_embeds_ascii =
+      PG_ENCODING_IS_CLIENT_ONLY(pstate->client_encoding);
+
+  /* 1.3.3 Setup tuple description */
+  TupleDesc tup_desc = edd->ext_tupDesc;
+  pstate->attr_offsets = (int *)palloc0(tup_desc->natts * sizeof(int));
+
+  /* 1.3.4 Generate or convert list of attributes to process */
+  pstate->attnumlist = CopyGetAttnums(tup_desc, relation, NIL);
+
+  /* 1.3.5 Convert FORCE NOT NULL name list to per-column flags, check validity
+   */
+  pstate->force_notnull_flags = (bool *)palloc0(tup_desc->natts * sizeof(bool));
+  if (pstate->force_notnull) {
+    List *attnums;
+    ListCell *cur;
+
+    attnums = CopyGetAttnums(tup_desc, relation, pstate->force_notnull);
+
+    foreach (cur, attnums) {
+      int attnum = lfirst_int(cur);
+      pstate->force_notnull_flags[attnum - 1] = true;
+    }
+  }
+
+  /* 1.3.6 Take care of state that is WET specific */
+  Form_pg_attribute *attr = tup_desc->attrs;
+  ListCell *cur;
+
+  pstate->null_print_client = pstate->null_print; /* default */
+  pstate->fe_msgbuf = makeStringInfo(); /* use fe_msgbuf as a per-row buffer */
+  pstate->out_functions =
+      (FmgrInfo *)palloc0(tup_desc->natts * sizeof(FmgrInfo));
+
+  foreach (cur,
+           pstate->attnumlist) /* Get info about the columns need to process */
+  {
+    int attnum = lfirst_int(cur);
+    Oid out_func_oid;
+    bool isvarlena;
+
+    getTypeOutputInfo(attr[attnum - 1]->atttypid, &out_func_oid, &isvarlena);
+    fmgr_info(out_func_oid, &pstate->out_functions[attnum - 1]);
+  }
+
+  /*
+   * We need to convert null_print to client encoding, because it
+   * will be sent directly with CopySendString.
+   */
+  if (pstate->need_transcoding) {
+    pstate->null_print_client = pg_server_to_custom(
+        pstate->null_print, pstate->null_print_len, pstate->client_encoding,
+        pstate->enc_conversion_proc);
+  }
+
+  /* 1.3.7 Create temporary memory context for per row process */
+  /*
+   * Create a temporary memory context that we can reset once per row to
+   * recover palloc'd memory.  This avoids any problems with leaks inside
+   * datatype input or output routines, and should be faster than retail
+   * pfree's anyway.
+   */
+  pstate->rowcontext = AllocSetContextCreate(
+      CurrentMemoryContext, "ExtTableMemCxt", ALLOCSET_DEFAULT_MINSIZE,
+      ALLOCSET_DEFAULT_INITSIZE, ALLOCSET_DEFAULT_MAXSIZE);
+
+  /* 1.3.8 Parse format options */
+  char *format_str = pstrdup((char *)strVal(linitial(fmt_opts)));
+  char *fmt_name = NULL;
+  List *l = magma_parse_format_string(format_str, &fmt_name);
+  pstate->custom_formatter_name = fmt_name;
+  pstate->custom_formatter_params = l;
+  pfree(format_str);
+
+  /* 1.4 Initialize formatter data */
+  edd->ext_formatter_data = (FormatterData *)palloc0(sizeof(FormatterData));
+  edd->ext_formatter_data->fmt_perrow_ctx = edd->ext_pstate->rowcontext;
+
+  /* 2. Setup user data */
+  /* 2.1 Get database, schema, table name for the delete */
+  Assert(database != NULL);
+  Oid namespaceOid = RelationGetNamespace(relation);
+  char *schema = getNamespaceNameByOid(namespaceOid);
+  char *table = RelationGetRelationName(relation);
+
+  GlobalFormatUserData *user_data =
+      (GlobalFormatUserData *)palloc0(sizeof(GlobalFormatUserData));
+
+  if (formatterName != NULL &&
+      (strncasecmp(formatterName, "magmatp", sizeof("magmatp") - 1) == 0)) {
+    user_data->isMagmatp = true;
+  } else {
+    user_data->isMagmatp = false;
+  }
+
+  init_magma_format_user_data_for_write(tup_desc, user_data, relation);
+
+  /* the number of ranges is dynamic for magma table */
+  int32_t nRanges = 0;
+  ListCell *lc_split = NULL;
+  foreach (lc_split, ps->ps_magma_splits) {
+      List *split = (List *)lfirst(lc_split);
+      nRanges += list_length(split);
+  }
+
+  /* 2.2 Create formatter instance */
+  List *fmt_opts_defelem = pstate->custom_formatter_params;
+  user_data->fmt = create_magma_formatter_instance(
+      fmt_opts_defelem, serializeSchema, serializeSchemaLen, fmt_encoding,
+      formatterName, nRanges);
+  /*prepare hash info */
+  int32_t nDistKeyIndex = 0;
+  int16_t *distKeyIndex = NULL;
+  fetchDistributionPolicy(relation->rd_id, &nDistKeyIndex, &distKeyIndex);
+
+  uint32 range_to_rg_map[nRanges];
+  List *rg = magma_build_range_to_rg_map(ps->ps_magma_splits, range_to_rg_map);
+  int nRg = list_length(rg);
+  uint16 *rgId = palloc0(sizeof(uint16) * nRg);
+  char **rgUrl = palloc0(sizeof(char *) * nRg);
+  magma_build_rg_to_url_map(ps->ps_magma_splits, rg, rgId, rgUrl);
+
+  /* 2.3 Prepare database, schema, and table information */
+  MagmaFormatC_SetupTarget(user_data->fmt, database, schema, table);
+  MagmaFormatC_SetupTupDesc(user_data->fmt, user_data->numberOfColumns,
+                            user_data->colNames, user_data->colDatatypes,
+                            user_data->colDatatypeMods, user_data->colIsNulls);
+
+  int *jumpHashMap = get_jump_hash_map(nRanges);
+  MagmaFormatC_SetupHasher(user_data->fmt, nDistKeyIndex, distKeyIndex, nRanges,
+                           range_to_rg_map, nRg, rgId, rgUrl, jumpHashMap,
+                           JUMP_HASH_MAP_LENGTH);
+  MagmaFormatCatchedError *e = MagmaFormatGetErrorMagmaFormatC(user_data->fmt);
+  if (e->errCode != ERRCODE_SUCCESSFUL_COMPLETION) {
+    elog(ERROR, "magma_delete: failed to begindelete: %s(%d)", e->errMessage,
+         e->errCode);
+  }
+
+  /* 2.4 Save user data */
+  edd->ext_ps_user_data = (void *)user_data;
+
+  /* 3. Begin insert with the formatter */
+  MagmaFormatBeginDeleteMagmaFormatC(user_data->fmt, snapshot);
+  MagmaFormatCatchedError *e1 = MagmaFormatGetErrorMagmaFormatC(user_data->fmt);
+  if (e1->errCode != ERRCODE_SUCCESSFUL_COMPLETION) {
+    elog(ERROR, "magma_delete: failed to begindelete: %s(%d)", e1->errMessage,
+         e1->errCode);
+  }
+
+  /* 4. Save the result */
+  ps->ps_ext_delete_desc = edd;
+
+  PG_RETURN_POINTER(edd);
+}
+
+/* void
+ * magma_delete(ExternalInsertDesc extDeleteDesc,
+ *           TupleTableSlot *tupTableSlot)
+ */
+Datum magma_delete(PG_FUNCTION_ARGS) {
+  PlugStorage ps = (PlugStorage)(fcinfo->context);
+  ExternalInsertDesc edd = ps->ps_ext_delete_desc;
+  TupleTableSlot *tts = ps->ps_tuple_table_slot;
+
+  /* It may be memtuple, we need to transfer it to virtual tuple */
+  slot_getallattrs(tts);
+
+  GlobalFormatUserData *user_data =
+      (GlobalFormatUserData *)(edd->ext_ps_user_data);
+
+  user_data->colTid.rangeid = DatumGetUInt16(edd->ext_rangeId);
+  user_data->colTid.rowid = DatumGetUInt64(edd->ext_rowId);
+  user_data->colValues = slot_get_values(tts);
+  user_data->colIsNulls = slot_get_isnull(tts);
+
+  static bool DUMMY_BOOL = true;
+  static int8_t DUMMY_INT8 = 0;
+  static int16_t DUMMY_INT16 = 0;
+  static int32_t DUMMY_INT32 = 0;
+  static int64_t DUMMY_INT64 = 0;
+  static float DUMMY_FLOAT = 0.0;
+  static double DUMMY_DOUBLE = 0.0;
+  static char DUMMY_TEXT[1] = "";
+  static TimestampType DUMMY_TIMESTAMP = {0, 0};
+
+  MemoryContext per_row_context = edd->ext_pstate->rowcontext;
+  MemoryContext old_context = MemoryContextSwitchTo(per_row_context);
+
+  /* Get column values */
+  user_data->colRawTid = (char *)(&(user_data->colTid));
+  for (int i = 0; i < user_data->numberOfColumns; ++i) {
+    int dataType = (int)(tts->tts_tupleDescriptor->attrs[i]->atttypid);
+
+    user_data->colRawValues[i] = NULL;
+
+    if (user_data->colIsNulls[i]) {
+      if (dataType == HAWQ_TYPE_CHAR) {
+        user_data->colRawValues[i] = (char *)(&DUMMY_INT8);
+      } else if (dataType == HAWQ_TYPE_INT2) {
+        user_data->colRawValues[i] = (char *)(&DUMMY_INT16);
+      } else if (dataType == HAWQ_TYPE_INT4) {
+        user_data->colRawValues[i] = (char *)(&DUMMY_INT32);
+      } else if (dataType == HAWQ_TYPE_INT8) {
+        user_data->colRawValues[i] = (char *)(&DUMMY_INT64);
+      } else if (dataType == HAWQ_TYPE_TEXT || dataType == HAWQ_TYPE_BYTE ||
+                 dataType == HAWQ_TYPE_BPCHAR ||
+                 dataType == HAWQ_TYPE_VARCHAR ||
+                 dataType == HAWQ_TYPE_NUMERIC) {
+        user_data->colRawValues[i] = (char *)(DUMMY_TEXT);
+      } else if (dataType == HAWQ_TYPE_FLOAT4) {
+        user_data->colRawValues[i] = (char *)(&DUMMY_FLOAT);
+      } else if (dataType == HAWQ_TYPE_FLOAT8) {
+        user_data->colRawValues[i] = (char *)(&DUMMY_DOUBLE);
+      } else if (dataType == HAWQ_TYPE_BOOL) {
+        user_data->colRawValues[i] = (char *)(&DUMMY_BOOL);
+      } else if (dataType == HAWQ_TYPE_DATE) {
+        user_data->colRawValues[i] = (char *)(&DUMMY_INT32);
+      } else if (dataType == HAWQ_TYPE_TIME) {
+        user_data->colRawValues[i] = (char *)(&DUMMY_INT64);
+      } else if (dataType == HAWQ_TYPE_TIMESTAMP) {
+        user_data->colRawValues[i] = (char *)(&DUMMY_TIMESTAMP);
+      } else if (STRUCTEXID == user_data->colDatatypes[i] ||
+                 IOBASETYPEID == user_data->colDatatypes[i]) {
+        user_data->colRawValues[i] = (char *)(DUMMY_TEXT);
+      } else if (dataType == HAWQ_TYPE_INVALID) {
+        elog(ERROR, "HAWQ data type %s is invalid", TypeNameToString(makeTypeNameFromOid(dataType, -1)));
+      } else {
+        elog(ERROR, "HAWQ data type %s is not supported yet", TypeNameToString(makeTypeNameFromOid(dataType, -1)));
+      }
+
+      continue;
+    }
+
+    if (dataType == HAWQ_TYPE_INT4 || dataType == HAWQ_TYPE_INT8 ||
+        dataType == HAWQ_TYPE_FLOAT4 || dataType == HAWQ_TYPE_FLOAT8 ||
+        dataType == HAWQ_TYPE_INT2 || dataType == HAWQ_TYPE_CHAR ||
+        dataType == HAWQ_TYPE_BOOL || dataType == HAWQ_TYPE_TIME) {
+      user_data->colRawValues[i] = (char *)(&(user_data->colValues[i]));
+    } else if (dataType == HAWQ_TYPE_DATE) {
+      int *date = (int *)(&(user_data->colValues[i]));
+      *date += POSTGRES_EPOCH_JDATE - UNIX_EPOCH_JDATE;
+      user_data->colRawValues[i] = (char *)(&(user_data->colValues[i]));
+    } else if (dataType == HAWQ_TYPE_TIMESTAMP) {
+      int64_t *timestamp = (int64_t *) (&(user_data->colValues[i]));
+      user_data->colTimestamp[i].second = *timestamp / 1000000
+          + (POSTGRES_EPOCH_JDATE - UNIX_EPOCH_JDATE) * 60 * 60 * 24;
+      user_data->colTimestamp[i].nanosecond = *timestamp % 1000000 * 1000;
+      int64_t days = user_data->colTimestamp[i].second / 60 / 60 / 24;
+      if (user_data->colTimestamp[i].nanosecond < 0 &&
+          (days > POSTGRES_EPOCH_JDATE - UNIX_EPOCH_JDATE || days < 0))
+        user_data->colTimestamp[i].nanosecond += 1000000000;
+      if(user_data->colTimestamp[i].second < 0 && user_data->colTimestamp[i].nanosecond)
+        user_data->colTimestamp[i].second -= 1;
+      user_data->colRawValues[i] = (char *) (&(user_data->colTimestamp[i]));
+    } else if (dataType == HAWQ_TYPE_TEXT || dataType == HAWQ_TYPE_BYTE ||
+               dataType == HAWQ_TYPE_BPCHAR || dataType == HAWQ_TYPE_VARCHAR ||
+               dataType == HAWQ_TYPE_NUMERIC) {
+      user_data->colRawValues[i] = OutputFunctionCall(
+          &(edd->ext_pstate->out_functions[i]), user_data->colValues[i]);
+    } else if (STRUCTEXID == user_data->colDatatypes[i]) {
+      int32_t len = VARSIZE(user_data->colValues[i]);
+      if (len <= 0) {
+        elog(ERROR, "HAWQ base type(udt) %s should not be less than 0",
+             TypeNameToString(makeTypeNameFromOid(dataType, -1)));
+      }
+
+      char *pVal = DatumGetPointer(user_data->colValues[i]);
+      user_data->colRawValues[i] = palloc0(VARHDRSZ + len);
+
+      //  set value : the first 4 byte is length, than the raw value
+      // SET_VARSIZE(  (struct varlena * )user_data->colRawValues[i], len);
+      *((int32 *)(user_data->colRawValues[i])) = len;
+      memcpy(user_data->colRawValues[i] + VARHDRSZ, pVal, len);
+    } else if (IOBASETYPEID == user_data->colDatatypes[i]) {
+      //  get the length of basetype
+      bool passbyval = tts->tts_tupleDescriptor->attrs[i]->attbyval;
+      int32_t orilen = (int32_t)(tts->tts_tupleDescriptor->attrs[i]->attlen);
+      int32_t len =
+          get_typlen_fast(dataType, passbyval, orilen, user_data->colValues[i]);
+
+      if (1 > len) {  //  invalid length
+        elog(ERROR,
+             "HAWQ composite type(udt) %s got an invalid length:%d",
+             TypeNameToString(makeTypeNameFromOid(dataType, -1)), len);
+      }
+
+      if (passbyval) {
+        //  value store in Datum directly
+        char *val = (char *)(user_data->colValues[i]);
+        user_data->colRawValues[i] = palloc0(len);
+        memcpy(user_data->colRawValues[i], val, len);
+      } else {
+        //  value stored by pointer in Datum
+        char *val = DatumGetPointer(user_data->colValues[i]);
+        user_data->colRawValues[i] = palloc0(VARHDRSZ + len);
+
+        //  set value : the first 4 byte is length, than the raw value
+        // SET_VARSIZE(  (struct varlena * )user_data->colRawValues[i], len);
+        *((int32 *)(user_data->colRawValues[i])) = len;
+        memcpy(user_data->colRawValues[i] + VARHDRSZ, val, len);
+      }
+    } else if (dataType == HAWQ_TYPE_INVALID) {
+      elog(ERROR, "HAWQ data type %s is invalid", TypeNameToString(makeTypeNameFromOid(dataType, -1)));
+    } else {
+      elog(ERROR, "HAWQ data type %s is not supported yet", TypeNameToString(makeTypeNameFromOid(dataType, -1)));
+    }
+  }
+
+  /* Pass to formatter to output */
+  MagmaFormatDeleteMagmaFormatC(user_data->fmt, user_data->colRawTid,
+                                user_data->colRawValues, user_data->colIsNulls);
+
+  MagmaFormatCatchedError *e = MagmaFormatGetErrorMagmaFormatC(user_data->fmt);
+  if (e->errCode != ERRCODE_SUCCESSFUL_COMPLETION) {
+    elog(ERROR, "magma_delete: failed to delete: %s(%d)", e->errMessage,
+         e->errCode);
+  }
+
+  MemoryContextReset(per_row_context);
+  MemoryContextSwitchTo(old_context);
+
+  ps->ps_tuple_oid = InvalidOid;
+
+  PG_RETURN_VOID();
+}
+
+/* void
+ * magma_enddelete(ExternalInsertDesc extDeleteDesc)
+ */
+Datum magma_enddelete(PG_FUNCTION_ARGS) {
+  PlugStorage ps = (PlugStorage)(fcinfo->context);
+  ExternalInsertDesc edd = ps->ps_ext_delete_desc;
+
+  GlobalFormatUserData *user_data =
+      (GlobalFormatUserData *)(edd->ext_ps_user_data);
+
+  MagmaFormatEndDeleteMagmaFormatC(user_data->fmt);
+
+  MagmaFormatCatchedError *e = MagmaFormatGetErrorMagmaFormatC(user_data->fmt);
+  if (e->errCode != ERRCODE_SUCCESSFUL_COMPLETION) {
+    elog(ERROR, "magma_delete: failed to end delete: %s(%d)", e->errMessage,
+         e->errCode);
+  }
+  MagmaFormatFreeMagmaFormatC(&(user_data->fmt));
+
+  for (int i = 0; i < user_data->numberOfColumns; ++i) {
+    pfree(user_data->colNames[i]);
+  }
+  pfree(user_data->colNames);
+  /*
+   * DO NOT pfree colValues and colIsNulls here since ExecutorEnd will call
+   * cleanup_slot to pfree slot->PRIVATE_tts_values and
+   * slot->PRIVATE_tts_isnull. Otherwise it will be freed 2 times.
+   *
+   * pfree(user_data->colValues);
+   * pfree(user_data->colIsNulls);
+   */
+  pfree(user_data->colDatatypes);
+  pfree(user_data->colRawValues);
+  pfree(user_data);
+
+  if (edd->ext_formatter_data) pfree(edd->ext_formatter_data);
+
+  if (edd->ext_pstate != NULL && edd->ext_pstate->rowcontext != NULL) {
+    /*
+     * delete the row context
+     */
+    MemoryContextDelete(edd->ext_pstate->rowcontext);
+    edd->ext_pstate->rowcontext = NULL;
+  }
+
+  pfree(edd);
+
+  PG_RETURN_VOID();
+}
+
+/* ExternalInsertDesc
+ * magma_beginupdate(Relation relation)
+ */
+Datum magma_beginupdate(PG_FUNCTION_ARGS) {
+  PlugStorage ps = (PlugStorage)(fcinfo->context);
+  Relation relation = ps->ps_relation;
+  char *serializeSchema = ps->ps_magma_serializeSchema;
+  int serializeSchemaLen = ps->ps_magma_serializeSchemaLen;
+  MagmaSnapshot *snapshot = &(ps->ps_snapshot);
+
+  /* 1. Allocate and initialize the update descriptor */
+  ExternalInsertDesc eud = palloc0(sizeof(ExternalInsertDescData));
+
+  ExtTableEntry *ete = GetExtTableEntry(RelationGetRelid(relation));
+
+  int formatterType = ExternalTableType_Invalid;
+
+  char *formatterName = NULL;
+  getExternalTableTypeStr(ete->fmtcode, ete->fmtopts, &formatterType,
+                          &formatterName);
+
+  /* 1.1 Setup update functions */
+  get_magma_update_functions(formatterName, eud);
+
+  List *fmt_opts = NIL;
+  fmt_opts = lappend(fmt_opts, makeString(pstrdup(ete->fmtopts)));
+
+  /* 1.2 Allocate and initialize structure which track data parsing state */
+  eud->ext_pstate = (CopyStateData *)palloc0(sizeof(CopyStateData));
+  eud->ext_tupDesc = RelationGetDescr(relation);
+
+  /* 1.3 Initialize parse state */
+  /* 1.3.1 Initialize basic information for pstate */
+  CopyState pstate = eud->ext_pstate;
+
+  /* 1.3.2 Setup encoding information */
+  /*
+   * Set up encoding conversion info.  Even if the client and server
+   * encodings are the same, we must apply pg_client_to_server() to validate
+   * data in multibyte encodings.
+   *
+   * Each external table specifies the encoding of its external data. We will
+   * therefore set a client encoding and client-to-server conversion procedure
+   * in here (server-to-client in WET) and these will be used in the data
+   * conversion routines (in copy.c CopyReadLineXXX(), etc).
+   */
+  int fmt_encoding = ete->encoding;
+  Insist(PG_VALID_ENCODING(fmt_encoding));
+  pstate->client_encoding = fmt_encoding;
+  Oid conversion_proc =
+      FindDefaultConversionProc(GetDatabaseEncoding(), fmt_encoding);
+
+  if (OidIsValid(conversion_proc)) {
+    /* conversion proc found */
+    pstate->enc_conversion_proc = palloc0(sizeof(FmgrInfo));
+    fmgr_info(conversion_proc, pstate->enc_conversion_proc);
+  } else {
+    /* no conversion function (both encodings are probably the same) */
+    pstate->enc_conversion_proc = NULL;
+  }
+
+  pstate->need_transcoding = pstate->client_encoding != GetDatabaseEncoding();
+  pstate->encoding_embeds_ascii =
+      PG_ENCODING_IS_CLIENT_ONLY(pstate->client_encoding);
+
+  /* 1.3.3 Setup tuple description */
+  TupleDesc tup_desc = eud->ext_tupDesc;
+  pstate->attr_offsets = (int *)palloc0(tup_desc->natts * sizeof(int));
+
+  /* 1.3.4 Generate or convert list of attributes to process */
+  pstate->attnumlist = CopyGetAttnums(tup_desc, relation, NIL);
+
+  /* 1.3.5 Convert FORCE NOT NULL name list to per-column flags, check validity
+   */
+  pstate->force_notnull_flags = (bool *)palloc0(tup_desc->natts * sizeof(bool));
+  if (pstate->force_notnull) {
+    List *attnums;
+    ListCell *cur;
+
+    attnums = CopyGetAttnums(tup_desc, relation, pstate->force_notnull);
+
+    foreach (cur, attnums) {
+      int attnum = lfirst_int(cur);
+      pstate->force_notnull_flags[attnum - 1] = true;
+    }
+  }
+
+  /* 1.3.6 Take care of state that is WET specific */
+  Form_pg_attribute *attr = tup_desc->attrs;
+  ListCell *cur;
+
+  pstate->null_print_client = pstate->null_print; /* default */
+  pstate->fe_msgbuf = makeStringInfo(); /* use fe_msgbuf as a per-row buffer */
+  pstate->out_functions =
+      (FmgrInfo *)palloc0(tup_desc->natts * sizeof(FmgrInfo));
+
+  foreach (cur,
+           pstate->attnumlist) /* Get info about the columns need to process */
+  {
+    int attnum = lfirst_int(cur);
+    Oid out_func_oid;
+    bool isvarlena;
+
+    getTypeOutputInfo(attr[attnum - 1]->atttypid, &out_func_oid, &isvarlena);
+    fmgr_info(out_func_oid, &pstate->out_functions[attnum - 1]);
+  }
+
+  /*
+   * We need to convert null_print to client encoding, because it
+   * will be sent directly with CopySendString.
+   */
+  if (pstate->need_transcoding) {
+    pstate->null_print_client = pg_server_to_custom(
+        pstate->null_print, pstate->null_print_len, pstate->client_encoding,
+        pstate->enc_conversion_proc);
+  }
+
+  /* 1.3.7 Create temporary memory context for per row process */
+  /*
+   * Create a temporary memory context that we can reset once per row to
+   * recover palloc'd memory.  This avoids any problems with leaks inside
+   * datatype input or output routines, and should be faster than retail
+   * pfree's anyway.
+   */
+  pstate->rowcontext = AllocSetContextCreate(
+      CurrentMemoryContext, "ExtTableMemCxt", ALLOCSET_DEFAULT_MINSIZE,
+      ALLOCSET_DEFAULT_INITSIZE, ALLOCSET_DEFAULT_MAXSIZE);
+
+  /* 1.3.8 Parse format options */
+  char *format_str = pstrdup((char *)strVal(linitial(fmt_opts)));
+  char *fmt_name = NULL;
+  List *l = magma_parse_format_string(format_str, &fmt_name);
+  pstate->custom_formatter_name = fmt_name;
+  pstate->custom_formatter_params = l;
+  pfree(format_str);
+
+  /* 1.4 Initialize formatter data */
+  eud->ext_formatter_data = (FormatterData *)palloc0(sizeof(FormatterData));
+  eud->ext_formatter_data->fmt_perrow_ctx = eud->ext_pstate->rowcontext;
+
+  /* 2. Setup user data */
+
+  /* 2.1 Get database, schema, table name for the update */
+  Assert(database != NULL);
+  Oid namespaceOid = RelationGetNamespace(relation);
+  char *schema = getNamespaceNameByOid(namespaceOid);
+  char *table = RelationGetRelationName(relation);
+
+  GlobalFormatUserData *user_data =
+      (GlobalFormatUserData *)palloc0(sizeof(GlobalFormatUserData));
+
+  if (formatterName != NULL &&
+      (strncasecmp(formatterName, "magmatp", sizeof("magmatp") - 1) == 0)) {
+    user_data->isMagmatp = true;
+  } else {
+    user_data->isMagmatp = false;
+  }
+
+  init_magma_format_user_data_for_write(tup_desc, user_data, relation);
+
+  /* the number of ranges is dynamic for magma table */
+  int32_t nRanges = 0;
+  ListCell *lc_split = NULL;
+  foreach (lc_split, ps->ps_magma_splits) {
+      List *split = (List *)lfirst(lc_split);
+      nRanges += list_length(split);
+  }
+
+  /* 2.2 Create formatter instance */
+  bool isexternal = false;
+  get_magma_category_info(ete->fmtopts, &isexternal);
+
+  List *fmt_opts_defelem = pstate->custom_formatter_params;
+  user_data->fmt = create_magma_formatter_instance(
+      fmt_opts_defelem, serializeSchema, serializeSchemaLen, fmt_encoding,
+      formatterName, nRanges);
+
+  /*prepare hash info */
+  int32_t nDistKeyIndex = 0;
+  int16_t *distKeyIndex = NULL;
+  fetchDistributionPolicy(relation->rd_id, &nDistKeyIndex, &distKeyIndex);
+
+
+  int32_t range_to_rg_map[nRanges];
+  List *rg = magma_build_range_to_rg_map(ps->ps_magma_splits, range_to_rg_map);
+  int nRg = list_length(rg);
+  int16_t *rgId = palloc0(sizeof(int16_t) * nRg);
+  char **rgUrl = palloc0(sizeof(char *) * nRg);
+  magma_build_rg_to_url_map(ps->ps_magma_splits, rg, rgId, rgUrl);
+
+  /* 2.3 Prepare database, schema, and table information */
+  MagmaFormatC_SetupTarget(user_data->fmt, database, schema, table);
+  MagmaFormatC_SetupTupDesc(user_data->fmt, user_data->numberOfColumns,
+                            user_data->colNames, user_data->colDatatypes,
+                            user_data->colDatatypeMods, user_data->colIsNulls);
+
+  int *jumpHashMap = get_jump_hash_map(nRanges);
+  MagmaFormatC_SetupHasher(user_data->fmt, nDistKeyIndex, distKeyIndex, nRanges,
+                           range_to_rg_map, nRg, rgId, rgUrl, jumpHashMap,
+                           JUMP_HASH_MAP_LENGTH);
+  MagmaFormatCatchedError *e = MagmaFormatGetErrorMagmaFormatC(user_data->fmt);
+  if (e->errCode != ERRCODE_SUCCESSFUL_COMPLETION) {
+    elog(ERROR, "magma_update: failed to begin update: %s(%d)", e->errMessage,
+         e->errCode);
+  }
+  /* 2.4 Save user data */
+  eud->ext_ps_user_data = (void *)user_data;
+
+  /* 3. Begin insert with the formatter */
+  MagmaFormatBeginUpdateMagmaFormatC(user_data->fmt, snapshot);
+  MagmaFormatCatchedError *e1 = MagmaFormatGetErrorMagmaFormatC(user_data->fmt);
+  if (e1->errCode != ERRCODE_SUCCESSFUL_COMPLETION) {
+    elog(ERROR, "magma_update: failed to begin update: %s(%d)", e1->errMessage,
+         e1->errCode);
+  }
+  /* 4. Save the result */
+  ps->ps_ext_update_desc = eud;
+
+  PG_RETURN_POINTER(eud);
+}
+
+/* void
+ * magma_delete(ExternalInsertDesc extUpdDesc,
+ *           TupleTableSlot *tupTableSlot)
+ */
+Datum magma_update(PG_FUNCTION_ARGS) {
+  PlugStorage ps = (PlugStorage)(fcinfo->context);
+  ExternalInsertDesc eud = ps->ps_ext_update_desc;
+  TupleTableSlot *tts = ps->ps_tuple_table_slot;
+
+  /* It may be memtuple, we need to transfer it to virtual tuple */
+  slot_getallattrs(tts);
+
+  GlobalFormatUserData *user_data =
+      (GlobalFormatUserData *)(eud->ext_ps_user_data);
+
+  user_data->colTid.rangeid = DatumGetUInt16(eud->ext_rangeId);
+  user_data->colTid.rowid = DatumGetUInt64(eud->ext_rowId);
+  user_data->colValues = slot_get_values(tts);
+  user_data->colIsNulls = slot_get_isnull(tts);
+
+  static bool DUMMY_BOOL = true;
+  static int8_t DUMMY_INT8 = 0;
+  static int16_t DUMMY_INT16 = 0;
+  static int32_t DUMMY_INT32 = 0;
+  static int64_t DUMMY_INT64 = 0;
+  static float DUMMY_FLOAT = 0.0;
+  static double DUMMY_DOUBLE = 0.0;
+  static char DUMMY_TEXT[1] = "";
+  static TimestampType DUMMY_TIMESTAMP = {0, 0};
+
+  MemoryContext per_row_context = eud->ext_pstate->rowcontext;
+  MemoryContext old_context = MemoryContextSwitchTo(per_row_context);
+
+  /* Get column values */
+  user_data->colRawTid = (char *)(&(user_data->colTid));
+  for (int i = 0; i < user_data->numberOfColumns; ++i) {
+    int dataType = (int)(tts->tts_tupleDescriptor->attrs[i]->atttypid);
+
+    user_data->colRawValues[i] = NULL;
+
+    if (user_data->colIsNulls[i]) {
+      if (dataType == HAWQ_TYPE_CHAR) {
+        user_data->colRawValues[i] = (char *)(&DUMMY_INT8);
+      } else if (dataType == HAWQ_TYPE_INT4) {
+        user_data->colRawValues[i] = (char *)(&DUMMY_INT32);
+      } else if (dataType == HAWQ_TYPE_INT8) {
+        user_data->colRawValues[i] = (char *)(&DUMMY_INT64);
+      } else if (dataType == HAWQ_TYPE_TEXT || dataType == HAWQ_TYPE_BYTE ||
+                 dataType == HAWQ_TYPE_BPCHAR ||
+                 dataType == HAWQ_TYPE_VARCHAR ||
+                 dataType == HAWQ_TYPE_NUMERIC) {
+        user_data->colRawValues[i] = (char *)(DUMMY_TEXT);
+      } else if (dataType == HAWQ_TYPE_FLOAT4) {
+        user_data->colRawValues[i] = (char *)(&DUMMY_FLOAT);
+      } else if (dataType == HAWQ_TYPE_FLOAT8) {
+        user_data->colRawValues[i] = (char *)(&DUMMY_DOUBLE);
+      } else if (dataType == HAWQ_TYPE_INT2) {
+        user_data->colRawValues[i] = (char *)(&DUMMY_INT16);
+      } else if (dataType == HAWQ_TYPE_BOOL) {
+        user_data->colRawValues[i] = (char *)(&DUMMY_BOOL);
+      } else if (dataType == HAWQ_TYPE_DATE) {
+        user_data->colRawValues[i] = (char *)(&DUMMY_INT32);
+      } else if (dataType == HAWQ_TYPE_TIME) {
+        user_data->colRawValues[i] = (char *)(&DUMMY_INT64);
+      } else if (dataType == HAWQ_TYPE_TIMESTAMP) {
+        user_data->colRawValues[i] = (char *)(&DUMMY_TIMESTAMP);
+      } else if (STRUCTEXID == user_data->colDatatypes[i] ||
+                 IOBASETYPEID == user_data->colDatatypes[i]) {
+        user_data->colRawValues[i] = (char *)(DUMMY_TEXT);
+      } else if (dataType == HAWQ_TYPE_INVALID) {
+        elog(ERROR, "HAWQ data type %s is invalid", TypeNameToString(makeTypeNameFromOid(dataType, -1)));
+      } else {
+        elog(ERROR, "HAWQ data type %s is not supported yet", TypeNameToString(makeTypeNameFromOid(dataType, -1)));
+      }
+
+      continue;
+    }
+
+    if (dataType == HAWQ_TYPE_INT4 || dataType == HAWQ_TYPE_INT8 ||
+        dataType == HAWQ_TYPE_FLOAT4 || dataType == HAWQ_TYPE_FLOAT8 ||
+        dataType == HAWQ_TYPE_INT2 || dataType == HAWQ_TYPE_CHAR ||
+        dataType == HAWQ_TYPE_BOOL || dataType == HAWQ_TYPE_TIME) {
+      user_data->colRawValues[i] = (char *)(&(user_data->colValues[i]));
+    } else if (dataType == HAWQ_TYPE_DATE) {
+      int *date = (int *)(&(user_data->colValues[i]));
+      *date += POSTGRES_EPOCH_JDATE - UNIX_EPOCH_JDATE;
+      user_data->colRawValues[i] = (char *)(&(user_data->colValues[i]));
+    } else if (dataType == HAWQ_TYPE_TIMESTAMP) {
+      int64_t *timestamp = (int64_t *) (&(user_data->colValues[i]));
+      user_data->colTimestamp[i].second = *timestamp / 1000000
+          + (POSTGRES_EPOCH_JDATE - UNIX_EPOCH_JDATE) * 60 * 60 * 24;
+      user_data->colTimestamp[i].nanosecond = *timestamp % 1000000 * 1000;
+      int64_t days = user_data->colTimestamp[i].second / 60 / 60 / 24;
+      if (user_data->colTimestamp[i].nanosecond < 0 &&
+          (days > POSTGRES_EPOCH_JDATE - UNIX_EPOCH_JDATE || days < 0))
+        user_data->colTimestamp[i].nanosecond += 1000000000;
+      if(user_data->colTimestamp[i].second < 0 && user_data->colTimestamp[i].nanosecond)
+        user_data->colTimestamp[i].second -= 1;
+      user_data->colRawValues[i] = (char *) (&(user_data->colTimestamp[i]));
+    } else if (dataType == HAWQ_TYPE_TEXT || dataType == HAWQ_TYPE_BYTE ||
+               dataType == HAWQ_TYPE_BPCHAR || dataType == HAWQ_TYPE_VARCHAR) {
+      user_data->colRawValues[i] = OutputFunctionCall(
+          &(eud->ext_pstate->out_functions[i]), user_data->colValues[i]);
+    } else if (dataType == HAWQ_TYPE_NUMERIC) {
+      user_data->colRawValues[i] = OutputFunctionCall(
+          &(eud->ext_pstate->out_functions[i]), user_data->colValues[i]);
+      Numeric num = DatumGetNumeric(user_data->colValues[i]);
+      if (NUMERIC_IS_NAN(num)) {
+        user_data->colIsNulls[i] = true;
+      }
+    } else if (STRUCTEXID == user_data->colDatatypes[i]) {
+      int32_t len = VARSIZE(user_data->colValues[i]);
+      if (len <= 0) {
+        elog(ERROR, "HAWQ base type(udt) %s should not be less than 0",
+             TypeNameToString(makeTypeNameFromOid(dataType, -1)));
+      }
+
+      char *pVal = DatumGetPointer(user_data->colValues[i]);
+      user_data->colRawValues[i] = palloc0(VARHDRSZ + len);
+
+      //  set value : the first 4 byte is length, than the raw value
+      // SET_VARSIZE(  (struct varlena * )user_data->colRawValues[i], len);
+      *((int32 *)(user_data->colRawValues[i])) = len;
+      memcpy(user_data->colRawValues[i] + VARHDRSZ, pVal, len);
+    } else if (IOBASETYPEID == user_data->colDatatypes[i]) {
+      //  get the length of basetype
+      bool passbyval = tts->tts_tupleDescriptor->attrs[i]->attbyval;
+      int32_t orilen = (int32_t)(tts->tts_tupleDescriptor->attrs[i]->attlen);
+      int32_t len =
+          get_typlen_fast(dataType, passbyval, orilen, user_data->colValues[i]);
+
+      if (1 > len) {  //  invalid length
+        elog(ERROR,
+             "HAWQ composite type(udt) %s got an invalid length:%d",
+             TypeNameToString(makeTypeNameFromOid(dataType, -1)), len);
+      }
+
+      if (passbyval) {
+        //  value store in Datum directly
+        char *val = (char *)(user_data->colValues[i]);
+        user_data->colRawValues[i] = palloc0(len);
+        memcpy(user_data->colRawValues[i], val, len);
+      } else {
+        //  value stored by pointer in Datum
+        char *val = DatumGetPointer(user_data->colValues[i]);
+        user_data->colRawValues[i] = palloc0(VARHDRSZ + len);
+
+        //  set value : the first 4 byte is length, than the raw value
+        // SET_VARSIZE(  (struct varlena * )user_data->colRawValues[i], len);
+        *((int32 *)(user_data->colRawValues[i])) = len;
+        memcpy(user_data->colRawValues[i] + VARHDRSZ, val, len);
+      }
+    } else if (dataType == HAWQ_TYPE_INVALID) {
+      elog(ERROR, "HAWQ data type %s is invalid", TypeNameToString(makeTypeNameFromOid(dataType, -1)));
+    } else {
+      elog(ERROR, "HAWQ data type %s is not supported yet", TypeNameToString(makeTypeNameFromOid(dataType, -1)));
+    }
+  }
+
+  /* Pass to formatter to output */
+  int updateCount = MagmaFormatUpdateMagmaFormatC(user_data->fmt, user_data->colRawTid,
+                                user_data->colRawValues, user_data->colIsNulls);
+
+  ps->ps_update_count = updateCount;
+
+  MagmaFormatCatchedError *e = MagmaFormatGetErrorMagmaFormatC(user_data->fmt);
+  if (e->errCode != ERRCODE_SUCCESSFUL_COMPLETION) {
+    elog(ERROR, "magma_update: failed to update: %s(%d)", e->errMessage,
+         e->errCode);
+  }
+
+  MemoryContextReset(per_row_context);
+  MemoryContextSwitchTo(old_context);
+
+  ps->ps_tuple_oid = InvalidOid;
+
+  // PG_RETURN_VOID();
+  PG_RETURN_UINT32(updateCount);
+}
+
+/* void
+ * magma_endupdate(ExternalInsertDesc extUpdDesc)
+ */
+Datum magma_endupdate(PG_FUNCTION_ARGS) {
+  PlugStorage ps = (PlugStorage)(fcinfo->context);
+  ExternalInsertDesc eud = ps->ps_ext_update_desc;
+
+  GlobalFormatUserData *user_data =
+      (GlobalFormatUserData *)(eud->ext_ps_user_data);
+
+  int updateCount = MagmaFormatEndUpdateMagmaFormatC(user_data->fmt);
+  ps->ps_update_count = updateCount;
+
+  MagmaFormatCatchedError *e = MagmaFormatGetErrorMagmaFormatC(user_data->fmt);
+  if (e->errCode != ERRCODE_SUCCESSFUL_COMPLETION) {
+    elog(ERROR, "magma_update: failed to end update: %s(%d)", e->errMessage,
+         e->errCode);
+  }
+  MagmaFormatFreeMagmaFormatC(&(user_data->fmt));
+
+  for (int i = 0; i < user_data->numberOfColumns; ++i) {
+    pfree(user_data->colNames[i]);
+  }
+  pfree(user_data->colNames);
+  pfree(user_data->colDatatypes);
+  pfree(user_data->colRawValues);
+
+  if (eud->ext_formatter_data) {
+    pfree(eud->ext_formatter_data);
+  }
+
+  if (eud->ext_pstate != NULL && eud->ext_pstate->rowcontext != NULL) {
+    /*
+     * delete the row context
+     */
+    MemoryContextDelete(eud->ext_pstate->rowcontext);
+    eud->ext_pstate->rowcontext = NULL;
+  }
+
+  pfree(eud);
+
+  // PG_RETURN_VOID();
+  PG_RETURN_UINT32(updateCount);
+}
+
+/*
+ * ExternalInsertDesc
+ * magma_insert_init(Relation relation,
+ *                int formatterType,
+ *                char *formatterName)
+ */
+Datum magma_insert_init(PG_FUNCTION_ARGS) {
+  PlugStorage ps = (PlugStorage)(fcinfo->context);
+  Relation relation = ps->ps_relation;
+  int formatterType = ps->ps_formatter_type;
+  char *formatterName = ps->ps_formatter_name;
+  char *serializeSchema = ps->ps_magma_serializeSchema;
+  int serializeSchemaLen = ps->ps_magma_serializeSchemaLen;
+  MagmaSnapshot *snapshot = &(ps->ps_snapshot);
+
+  /* 1. Allocate and initialize the insert descriptor */
+  ExternalInsertDesc eid = palloc0(sizeof(ExternalInsertDescData));
+  eid->ext_formatter_type = formatterType;
+  eid->ext_formatter_name = formatterName;
+
+  /* 1.1 Setup insert functions */
+  get_magma_insert_functions(formatterName, eid);
+
+  /* 1.2 Initialize basic information */
+  eid->ext_rel = relation;
+  eid->ext_noop = (Gp_role == GP_ROLE_DISPATCH);
+  eid->ext_formatter_data = NULL;
+
+  /* 1.3 Get URI string */
+  ExtTableEntry *ete = GetExtTableEntry(RelationGetRelid(relation));
+
+  Value *v = linitial(ete->locations);
+  char *uri_str = pstrdup(v->val.str);
+  eid->ext_uri = uri_str;
+
+  /* 1.4 Allocate and initialize structure which track data parsing state */
+  eid->ext_pstate = (CopyStateData *)palloc0(sizeof(CopyStateData));
+  eid->ext_tupDesc = RelationGetDescr(relation);
+  eid->ext_values = (Datum *)palloc0(eid->ext_tupDesc->natts * sizeof(Datum));
+  eid->ext_nulls = (bool *)palloc0(eid->ext_tupDesc->natts * sizeof(bool));
+
+  /* 1.5 Get format options */
+  List *fmt_opts = NIL;
+  fmt_opts = lappend(fmt_opts, makeString(pstrdup(ete->fmtopts)));
+
+  /* 1.6 Initialize parse state */
+  /* 1.6.1 Initialize basic information for pstate */
+  CopyState pstate = eid->ext_pstate;
+  pstate->fe_eof = false;
+  pstate->eol_type = EOL_UNKNOWN;
+  pstate->eol_str = NULL;
+  pstate->cur_relname = RelationGetRelationName(relation);
+  pstate->cur_lineno = 0;
+  pstate->err_loc_type = ROWNUM_ORIGINAL;
+  pstate->cur_attname = NULL;
+  pstate->raw_buf_done = true; /* true so we will read data in first run */
+  pstate->line_done = true;
+  pstate->bytesread = 0;
+  pstate->custom = false;
+  pstate->header_line = false;
+  pstate->fill_missing = false;
+  pstate->line_buf_converted = false;
+  pstate->raw_buf_index = 0;
+  pstate->processed = 0;
+  pstate->filename = uri_str;
+  pstate->copy_dest = COPY_EXTERNAL_SOURCE;
+  pstate->missing_bytes = 0;
+  pstate->rel = relation;
+
+  /* 1.6.2 Setup encoding information */
+  /*
+   * Set up encoding conversion info.  Even if the client and server
+   * encodings are the same, we must apply pg_client_to_server() to validate
+   * data in multibyte encodings.
+   *
+   * Each external table specifies the encoding of its external data. We will
+   * therefore set a client encoding and client-to-server conversion procedure
+   * in here (server-to-client in WET) and these will be used in the data
+   * conversion routines (in copy.c CopyReadLineXXX(), etc).
+   */
+  int fmt_encoding = ete->encoding;
+  Insist(PG_VALID_ENCODING(fmt_encoding));
+  pstate->client_encoding = fmt_encoding;
+  Oid conversion_proc =
+      FindDefaultConversionProc(GetDatabaseEncoding(), fmt_encoding);
+
+  if (OidIsValid(conversion_proc)) {
+    /* conversion proc found */
+    pstate->enc_conversion_proc = palloc0(sizeof(FmgrInfo));
+    fmgr_info(conversion_proc, pstate->enc_conversion_proc);
+  } else {
+    /* no conversion function (both encodings are probably the same) */
+    pstate->enc_conversion_proc = NULL;
+  }
+
+  pstate->need_transcoding = pstate->client_encoding != GetDatabaseEncoding();
+  pstate->encoding_embeds_ascii =
+      PG_ENCODING_IS_CLIENT_ONLY(pstate->client_encoding);
+
+  /* 1.6.3 Parse format options */
+  char *format_str = pstrdup((char *)strVal(linitial(fmt_opts)));
+  char *fmt_name = NULL;
+  List *l = magma_parse_format_string(format_str, &fmt_name);
+  pstate->custom_formatter_name = fmt_name;
+  pstate->custom_formatter_params = l;
+  pfree(format_str);
+
+  /* 1.6.4 Setup tuple description */
+  TupleDesc tup_desc = eid->ext_tupDesc;
+  pstate->attr_offsets = (int *)palloc0(tup_desc->natts * sizeof(int));
+
+  /* 1.6.5 Generate or convert list of attributes to process */
+  pstate->attnumlist = CopyGetAttnums(tup_desc, relation, NIL);
+
+  /* 1.6.6 Convert FORCE NOT NULL name list to per-column flags, check validity
+   */
+  pstate->force_notnull_flags = (bool *)palloc0(tup_desc->natts * sizeof(bool));
+  if (pstate->force_notnull) {
+    List *attnums;
+    ListCell *cur;
+
+    attnums = CopyGetAttnums(tup_desc, relation, pstate->force_notnull);
+
+    foreach (cur, attnums) {
+      int attnum = lfirst_int(cur);
+      pstate->force_notnull_flags[attnum - 1] = true;
+    }
+  }
+
+  /* 1.6.7 Take care of state that is WET specific */
+  Form_pg_attribute *attr = tup_desc->attrs;
+  ListCell *cur;
+
+  pstate->null_print_client = pstate->null_print; /* default */
+  pstate->fe_msgbuf = makeStringInfo(); /* use fe_msgbuf as a per-row buffer */
+  pstate->out_functions =
+      (FmgrInfo *)palloc0(tup_desc->natts * sizeof(FmgrInfo));
+
+  foreach (cur,
+           pstate->attnumlist) /* Get info about the columns need to process */
+  {
+    int attnum = lfirst_int(cur);
+    Oid out_func_oid;
+    bool isvarlena;
+
+    getTypeOutputInfo(attr[attnum - 1]->atttypid, &out_func_oid, &isvarlena);
+    fmgr_info(out_func_oid, &pstate->out_functions[attnum - 1]);
+  }
+
+  /*
+   * We need to convert null_print to client encoding, because it
+   * will be sent directly with CopySendString.
+   */
+  if (pstate->need_transcoding) {
+    pstate->null_print_client = pg_server_to_custom(
+        pstate->null_print, pstate->null_print_len, pstate->client_encoding,
+        pstate->enc_conversion_proc);
+  }
+
+  /* 1.6.8 Create temporary memory context for per row process */
+  /*
+   * Create a temporary memory context that we can reset once per row to
+   * recover palloc'd memory.  This avoids any problems with leaks inside
+   * datatype input or output routines, and should be faster than retail
+   * pfree's anyway.
+   */
+  pstate->rowcontext = AllocSetContextCreate(
+      CurrentMemoryContext, "ExtTableMemCxt", ALLOCSET_DEFAULT_MINSIZE,
+      ALLOCSET_DEFAULT_INITSIZE, ALLOCSET_DEFAULT_MAXSIZE);
+
+  /* 1.7 Initialize formatter data */
+  eid->ext_formatter_data = (FormatterData *)palloc0(sizeof(FormatterData));
+  eid->ext_formatter_data->fmt_perrow_ctx = eid->ext_pstate->rowcontext;
+
+  /* 2. Setup user data */
+
+  /* 2.1 Create formatter instance */
+  Assert(database != NULL);
+  Oid namespaceOid = RelationGetNamespace(relation);
+  char *schema = getNamespaceNameByOid(namespaceOid);
+  char *table = RelationGetRelationName(relation);
+
+  GlobalFormatUserData *user_data =
+      (GlobalFormatUserData *)palloc0(sizeof(GlobalFormatUserData));
+
+  if (formatterName != NULL &&
+      (strncasecmp(formatterName, "magmatp", sizeof("magmatp") - 1) == 0)) {
+    user_data->isMagmatp = true;
+  } else {
+    user_data->isMagmatp = false;
+  }
+
+  /* the number of ranges is dynamic for magma table */
+  int32_t nRanges = 0;
+  ListCell *lc_split = NULL;
+  foreach (lc_split, ps->ps_magma_splits) {
+      List *split = (List *)lfirst(lc_split);
+      nRanges += list_length(split);
+  }
+
+  init_magma_format_user_data_for_write(tup_desc, user_data, relation);
+
+  /*2.2 Create formatter instance*/
+  bool isexternal = false;
+  get_magma_category_info(ete->fmtopts, &isexternal);
+
+  List *fmt_opts_defelem = pstate->custom_formatter_params;
+  user_data->fmt = create_magma_formatter_instance(
+      fmt_opts_defelem, serializeSchema, serializeSchemaLen, fmt_encoding,
+      formatterName, nRanges);
+
+  /* prepare hash info */
+  int32_t nDistKeyIndex = 0;
+  int16_t *distKeyIndex = NULL;
+  fetchDistributionPolicy(relation->rd_id, &nDistKeyIndex, &distKeyIndex);
+
+  uint32 range_to_rg_map[nRanges];
+  List *rg = magma_build_range_to_rg_map(ps->ps_magma_splits, range_to_rg_map);
+  int nRg = list_length(rg);
+  uint16 *rgId = palloc0(sizeof(uint16) * nRg);
+  char **rgUrl = palloc0(sizeof(char *) * nRg);
+  magma_build_rg_to_url_map(ps->ps_magma_splits, rg, rgId, rgUrl);
+
+  /* 2.3 Prepare database, schema, and table information */
+  MagmaFormatC_SetupTarget(user_data->fmt, database, schema, table);
+  MagmaFormatC_SetupTupDesc(user_data->fmt, user_data->numberOfColumns,
+                            user_data->colNames, user_data->colDatatypes,
+                            user_data->colDatatypeMods, user_data->colIsNulls);
+
+  int *jumpHashMap = get_jump_hash_map(nRanges);
+  MagmaFormatC_SetupHasher(user_data->fmt, nDistKeyIndex, distKeyIndex, nRanges,
+                           range_to_rg_map, nRg, rgId, rgUrl, jumpHashMap,
+                           JUMP_HASH_MAP_LENGTH);
+  MagmaFormatCatchedError *e = MagmaFormatGetErrorMagmaFormatC(user_data->fmt);
+  if (e->errCode != ERRCODE_SUCCESSFUL_COMPLETION) {
+    elog(ERROR, "magma_insert: failed to begin insert: %s (%d)", e->errMessage,
+         e->errCode);
+  }
+
+  eid->ext_ps_user_data = (void *)user_data;
+
+  /* 3. Begin insert with the formatter */
+  MagmaFormatBeginInsertMagmaFormatC(user_data->fmt, snapshot);
+
+  MagmaFormatCatchedError *e1 = MagmaFormatGetErrorMagmaFormatC(user_data->fmt);
+  if (e1->errCode != ERRCODE_SUCCESSFUL_COMPLETION) {
+    elog(ERROR, "magma_insert: failed to begin insert: %s (%d)", e1->errMessage,
+         e1->errCode);
+  }
+
+  /* 4. Save the result */
+  ps->ps_ext_insert_desc = eid;
+
+  PG_RETURN_POINTER(eid);
+}
+
+/*
+ * Oid
+ * magma_insert(ExternalInsertDesc extInsertDesc,
+ *           TupleTableSlot *tupTableSlot)
+ */
+Datum magma_insert(PG_FUNCTION_ARGS) {
+  PlugStorage ps = (PlugStorage)(fcinfo->context);
+  ExternalInsertDesc eid = ps->ps_ext_insert_desc;
+  TupleTableSlot *tts = ps->ps_tuple_table_slot;
+
+  GlobalFormatUserData *user_data =
+      (GlobalFormatUserData *)(eid->ext_ps_user_data);
+
+  user_data->colValues = slot_get_values(tts);
+  user_data->colIsNulls = slot_get_isnull(tts);
+
+  static bool DUMMY_BOOL = true;
+  static int8_t DUMMY_INT8 = 0;
+  static int16_t DUMMY_INT16 = 0;
+  static int32_t DUMMY_INT32 = 0;
+  static int64_t DUMMY_INT64 = 0;
+  static float DUMMY_FLOAT = 0.0;
+  static double DUMMY_DOUBLE = 0.0;
+  static char DUMMY_TEXT[1] = "";
+  static int32_t DUMMY_DATE = 0;
+  static int64_t DUMMY_TIME = 0;
+  static TimestampType DUMMY_TIMESTAMP = {0, 0};
+
+  TupleDesc tupdesc = tts->tts_tupleDescriptor;
+  user_data->numberOfColumns = tupdesc->natts;
+
+  MemoryContext per_row_context = eid->ext_pstate->rowcontext;
+  MemoryContext old_context = MemoryContextSwitchTo(per_row_context);
+
+  /* Get column values */
+  for (int i = 0; i < user_data->numberOfColumns; ++i) {
+    int dataType = (int)(tupdesc->attrs[i]->atttypid);
+
+    user_data->colRawValues[i] = NULL;
+
+    if (user_data->colIsNulls[i]) {
+      if (dataType == HAWQ_TYPE_CHAR) {
+        user_data->colRawValues[i] = (char *)(&DUMMY_INT8);
+      } else if (dataType == HAWQ_TYPE_INT2) {
+        user_data->colRawValues[i] = (char *)(&DUMMY_INT16);
+      } else if (dataType == HAWQ_TYPE_INT4) {
+        user_data->colRawValues[i] = (char *)(&DUMMY_INT32);
+      } else if (dataType == HAWQ_TYPE_INT8) {
+        user_data->colRawValues[i] = (char *)(&DUMMY_INT64);
+      } else if (dataType == HAWQ_TYPE_TEXT || dataType == HAWQ_TYPE_BYTE ||
+                 dataType == HAWQ_TYPE_BPCHAR ||
+                 dataType == HAWQ_TYPE_VARCHAR ||
+                 dataType == HAWQ_TYPE_NUMERIC ||
+                 dataType == HAWQ_TYPE_JSON ||
+                 dataType == HAWQ_TYPE_JSONB) {
+        user_data->colRawValues[i] = (char *)(DUMMY_TEXT);
+      } else if (dataType == HAWQ_TYPE_FLOAT4) {
+        user_data->colRawValues[i] = (char *)(&DUMMY_FLOAT);
+      } else if (dataType == HAWQ_TYPE_FLOAT8) {
+        user_data->colRawValues[i] = (char *)(&DUMMY_DOUBLE);
+      } else if (dataType == HAWQ_TYPE_BOOL) {
+        user_data->colRawValues[i] = (char *)(&DUMMY_BOOL);
+      } else if (dataType == HAWQ_TYPE_DATE) {
+        user_data->colRawValues[i] = (char *)(&DUMMY_DATE);
+      } else if (dataType == HAWQ_TYPE_TIME) {
+        user_data->colRawValues[i] = (char *)(&DUMMY_TIME);
+      } else if (dataType == HAWQ_TYPE_TIMESTAMP) {
+        user_data->colRawValues[i] = (char *)(&DUMMY_TIMESTAMP);
+      }
+      // do not adjust the rowtype/basetype to any other location
+      else if (STRUCTEXID == user_data->colDatatypes[i] ||
+               IOBASETYPEID == user_data->colDatatypes[i]) {
+        user_data->colRawValues[i] = (char *)(DUMMY_TEXT);
+      } else if (dataType == HAWQ_TYPE_INVALID) {
+        elog(ERROR, "HAWQ data type %s is invalid", TypeNameToString(makeTypeNameFromOid(dataType, -1)));
+      } else {
+        elog(ERROR, "HAWQ data type %s is not supported yet", TypeNameToString(makeTypeNameFromOid(dataType, -1)));
+      }
+
+      continue;
+    }
+
+    if (dataType == HAWQ_TYPE_CHAR || dataType == HAWQ_TYPE_INT2 ||
+        dataType == HAWQ_TYPE_INT4 || dataType == HAWQ_TYPE_INT8 ||
+        dataType == HAWQ_TYPE_FLOAT4 || dataType == HAWQ_TYPE_FLOAT8 ||
+        dataType == HAWQ_TYPE_BOOL || dataType == HAWQ_TYPE_TIME) {
+      user_data->colRawValues[i] = (char *)(&(user_data->colValues[i]));
+    } else if (dataType == HAWQ_TYPE_DATE) {
+      int *date = (int *)(&(user_data->colValues[i]));
+      *date += POSTGRES_EPOCH_JDATE - UNIX_EPOCH_JDATE;
+      user_data->colRawValues[i] = (char *)(&(user_data->colValues[i]));
+    } else if (dataType == HAWQ_TYPE_TIMESTAMP) {
+      int64_t *timestamp = (int64_t *) (&(user_data->colValues[i]));
+      user_data->colTimestamp[i].second = *timestamp / 1000000
+          + (POSTGRES_EPOCH_JDATE - UNIX_EPOCH_JDATE) * 60 * 60 * 24;
+      user_data->colTimestamp[i].nanosecond = *timestamp % 1000000 * 1000;
+      int64_t days = user_data->colTimestamp[i].second / 60 / 60 / 24;
+      if (user_data->colTimestamp[i].nanosecond < 0 &&
+          (days > POSTGRES_EPOCH_JDATE - UNIX_EPOCH_JDATE || days < 0))
+        user_data->colTimestamp[i].nanosecond += 1000000000;
+      if(user_data->colTimestamp[i].second < 0 && user_data->colTimestamp[i].nanosecond)
+        user_data->colTimestamp[i].second -= 1;
+      user_data->colRawValues[i] = (char *) (&(user_data->colTimestamp[i]));
+    } else if (dataType == HAWQ_TYPE_NUMERIC) {
+      Numeric num = DatumGetNumeric(user_data->colValues[i]);
+      user_data->colRawValues[i] = num;
+      if (NUMERIC_IS_NAN(num)) // XXX(chiyang): problematic legacy NaN
+      {
+        user_data->colIsNulls[i] = true;
+      }
+    } else if (dataType == HAWQ_TYPE_TEXT || dataType == HAWQ_TYPE_BYTE ||
+               dataType == HAWQ_TYPE_BPCHAR || dataType == HAWQ_TYPE_VARCHAR
+  || dataType == HAWQ_TYPE_JSON || dataType == HAWQ_TYPE_JSONB) {
+      struct varlena *varlen =
+          (struct varlena *)DatumGetPointer(user_data->colValues[i]);
+      user_data->colValLength[i] = VARSIZE_ANY_EXHDR(varlen);
+      user_data->colRawValues[i] = VARDATA_ANY(varlen);
+    } else if (STRUCTEXID == user_data->colDatatypes[i]) {
+      int32_t len = VARSIZE(user_data->colValues[i]);
+      if (len <= 0) {
+        elog(ERROR, "HAWQ base type(udt) %s should not be less than 0",
+             TypeNameToString(makeTypeNameFromOid(dataType, -1)));
+      }
+
+      char *pVal = DatumGetPointer(user_data->colValues[i]);
+      user_data->colRawValues[i] = palloc0(VARHDRSZ + len);
+
+      //  set value : the first 4 byte is length, than the raw value
+      // SET_VARSIZE(  (struct varlena * )user_data->colRawValues[i], len);
+      *((int32 *)(user_data->colRawValues[i])) = len;
+      memcpy(user_data->colRawValues[i] + VARHDRSZ, pVal, len);
+
+    } else if (IOBASETYPEID == user_data->colDatatypes[i]) {
+      //  get the length of basetype
+      bool passbyval = tupdesc->attrs[i]->attbyval;
+      int32_t orilen = (int32_t)(tupdesc->attrs[i]->attlen);
+      int32_t len =
+          get_typlen_fast(dataType, passbyval, orilen, user_data->colValues[i]);
+
+      if (1 > len) {  //  invalid length
+        elog(ERROR,
+             "HAWQ composite type(udt) %s got an invalid length:%d",
+             TypeNameToString(makeTypeNameFromOid(dataType, -1)), len);
+      }
+
+      if (passbyval) {
+        //  value store in Datum directly
+        char *val = &(user_data->colValues[i]);
+        user_data->colRawValues[i] = palloc0(VARHDRSZ + len);
+        *((int32 *)(user_data->colRawValues[i])) = len;
+        memcpy(user_data->colRawValues[i] + VARHDRSZ, val, len);
+      } else {
+        //  value stored by pointer in Datum
+        char *val = DatumGetPointer(user_data->colValues[i]);
+        user_data->colRawValues[i] = palloc0(VARHDRSZ + len);
+
+        //  set value : the first 4 byte is length, than the raw value
+        // SET_VARSIZE(  (struct varlena * )user_data->colRawValues[i], len);
+        *((int32 *)(user_data->colRawValues[i])) = len;
+        memcpy(user_data->colRawValues[i] + VARHDRSZ, val, len);
+      }
+    } else if (dataType == HAWQ_TYPE_INVALID) {
+      elog(ERROR, "HAWQ data type %s is invalid", TypeNameToString(makeTypeNameFromOid(dataType, -1)));
+    } else {
+      elog(ERROR, "HAWQ data type %s is not supported yet", TypeNameToString(makeTypeNameFromOid(dataType, -1)));
+    }
+  }
+
+  /* Pass to formatter to output */
+  MagmaFormatInsertMagmaFormatC(user_data->fmt, user_data->colRawValues,
+                                user_data->colValLength, user_data->colIsNulls);
+
+  MagmaFormatCatchedError *e = MagmaFormatGetErrorMagmaFormatC(user_data->fmt);
+  if (e->errCode != ERRCODE_SUCCESSFUL_COMPLETION) {
+    elog(ERROR, "magma_insert: failed to insert: %s(%d)", e->errMessage,
+         e->errCode);
+  }
+
+  ps->ps_tuple_oid = InvalidOid;
+
+  MemoryContextReset(per_row_context);
+  MemoryContextSwitchTo(old_context);
+
+  PG_RETURN_OID(InvalidOid);
+}
+
+/*
+ * void
+ * magma_insert_finish(ExternalInsertDesc extInsertDesc)
+ */
+Datum magma_insert_finish(PG_FUNCTION_ARGS) {
+  PlugStorage ps = (PlugStorage)(fcinfo->context);
+  ExternalInsertDesc eid = ps->ps_ext_insert_desc;
+
+  GlobalFormatUserData *user_data =
+      (GlobalFormatUserData *)(eid->ext_ps_user_data);
+
+  MagmaFormatEndInsertMagmaFormatC(user_data->fmt);
+
+  MagmaFormatCatchedError *e = MagmaFormatGetErrorMagmaFormatC(user_data->fmt);
+  if (e->errCode != ERRCODE_SUCCESSFUL_COMPLETION) {
+    elog(ERROR, "magma_insert: failed to end insert: %s(%d)", e->errMessage,
+         e->errCode);
+  }
+
+  MagmaFormatFreeMagmaFormatC(&(user_data->fmt));
+
+  for (int i = 0; i < user_data->numberOfColumns; ++i) {
+    pfree(user_data->colNames[i]);
+  }
+  pfree(user_data->colNames);
+  pfree(user_data->colDatatypes);
+  pfree(user_data->colRawValues);
+  pfree(user_data->colDatatypeMods);
+  pfree(user_data->colTimestamp);
+  pfree(user_data);
+
+  if (eid->ext_formatter_data) pfree(eid->ext_formatter_data);
+
+  if (eid->ext_formatter_name) pfree(eid->ext_formatter_name);
+
+  if (eid->ext_pstate != NULL && eid->ext_pstate->rowcontext != NULL) {
+    /*
+     * delete the row context
+     */
+    MemoryContextDelete(eid->ext_pstate->rowcontext);
+    eid->ext_pstate->rowcontext = NULL;
+  }
+
+  pfree(eid);
+
+  PG_RETURN_VOID();
+}
+
+/*
+ * void
+ * magma_transaction(PlugStorageTransaction transaction)
+ */
+Datum magma_transaction(PG_FUNCTION_ARGS) {
+  elog(DEBUG3, "magma_transaction begin");
+  PlugStorage ps = (PlugStorage)(fcinfo->context);
+  PlugStorageTransaction pst = ps->ps_transaction;
+
+  PlugStorageTransactionCommand txn_command = pst->pst_transaction_command;
+
+  MagmaClientC *client = create_magma_client_instance();
+  if (client == NULL) {
+    elog(ERROR, "failed to connect to magma service");
+  }
+
+  switch (txn_command) {
+    case PS_TXN_CMD_BEGIN: {
+      int magmaTableFullNamesSize = list_length(ps->magma_talbe_full_names);
+      MagmaTableFullName *magmaTableFullNames = (MagmaTableFullName *) palloc0(magmaTableFullNamesSize * sizeof(MagmaTableFullName));
+      int i = 0;
+      ListCell *lc;
+      foreach (lc, ps->magma_talbe_full_names) {
+        MagmaTableFullName* mtfn = lfirst(lc);
+        magmaTableFullNames[i].databaseName = pstrdup(mtfn->databaseName);
+        magmaTableFullNames[i].schemaName = pstrdup(mtfn->schemaName);
+        magmaTableFullNames[i].tableName = pstrdup(mtfn->tableName);
+        ++i;
+      }
+      pst->pst_transaction_dist =
+          MagmaClientC_BeginTransaction(client, magmaTableFullNames, magmaTableFullNamesSize);
+      for (int i = 0; i < magmaTableFullNamesSize; ++i) {
+        pfree(magmaTableFullNames[i].databaseName);
+        pfree(magmaTableFullNames[i].schemaName);
+        pfree(magmaTableFullNames[i].tableName);
+      }
+      pfree(magmaTableFullNames);
+      if (pst->pst_transaction_dist == NULL) {
+        pst->pst_transaction_status = PS_TXN_STS_DEFAULT;
+        pst->pst_transaction_id = InvalidTransactionId;
+        pst->pst_transaction_dist = NULL;
+        elog(DEBUG1, "magma_transaction: begin snapshot: NULL");
+      } else {
+        elog(DEBUG1, "magma_transaction: begin snapshot: (%llu, %u, %llu, %u)",
+             pst->pst_transaction_dist->currentTransaction.txnId,
+             pst->pst_transaction_dist->currentTransaction.txnStatus,
+             pst->pst_transaction_dist->txnActions.txnActionStartOffset,
+             pst->pst_transaction_dist->txnActions.txnActionSize);
+      }
+      magma_check_result(&client);
+      break;
+    }
+    case PS_TXN_CMD_COMMIT:
+      if (pst->pst_transaction_dist == NULL) {
+        elog(DEBUG1, "magma_transaction: commit snapshot: NULL");
+      } else {
+        elog(DEBUG1,
+             "magma_transaction: commit snapshot: (%llu, %u, %llu, %u)",
+             pst->pst_transaction_dist->currentTransaction.txnId,
+             pst->pst_transaction_dist->currentTransaction.txnStatus,
+             pst->pst_transaction_dist->txnActions.txnActionStartOffset,
+             pst->pst_transaction_dist->txnActions.txnActionSize);
+      }
+
+      MagmaClientC_CommitTransaction(client, pst->pst_transaction_dist);
+      magma_check_result(&client);
+      break;
+    case PS_TXN_CMD_ABORT:
+      if (pst->pst_transaction_dist == NULL) {
+        elog(DEBUG1, "magma_transaction: abort snapshot: NULL");
+      } else {
+        elog(DEBUG1,
+             "magma_transaction: abort snapshot: (%llu, %u, %llu, %u)",
+             pst->pst_transaction_dist->currentTransaction.txnId,
+             pst->pst_transaction_dist->currentTransaction.txnStatus,
+             pst->pst_transaction_dist->txnActions.txnActionStartOffset,
+             pst->pst_transaction_dist->txnActions.txnActionSize);
+      }
+
+      if (pst->pst_transaction_status != PS_TXN_STS_DEFAULT &&
+          pst->pst_transaction_id != InvalidTransactionId &&
+          pst->pst_transaction_dist != NULL) {
+        MagmaClientC_AbortTransaction(client, pst->pst_transaction_dist,
+                                      PlugStorageGetIsCleanupAbort());
+        pst->pst_transaction_dist = NULL;
+        pst->pst_transaction_id = InvalidTransactionId;
+        pst->pst_transaction_status = PS_TXN_STS_DEFAULT;
+        magma_check_result(&client);
+      }
+      break;
+    default:
+      elog(ERROR, "Transaction command for magma is invalid %d", txn_command);
+      break;
+  }
+
+
+  PG_RETURN_VOID();
+}
+
+static void get_magma_category_info(char *fmtoptstr, bool *isexternal) {
+  // do nothing now.
+  char *fmt_name = NULL;
+  List *l = magma_parse_format_string(fmtoptstr, &fmt_name);
+
+  ListCell *opt;
+  foreach (opt, l) {
+    DefElem *defel = (DefElem *)lfirst(opt);
+    char *key = defel->defname;
+    bool need_free_value = false;
+    char *val = (char *)defGetString(defel, &need_free_value);
+
+    /* check category */
+    if (strncasecmp(key, "category", strlen("category")) == 0) {
+      if (strncasecmp(val, "internal", strlen("internal")) == 0) {
+        isexternal = false;
+      }
+      if (strncasecmp(val, "external", strlen("external")) == 0) {
+        isexternal = true;
+      }
+    }
+  }
+}
+
+static FmgrInfo *get_magma_function(char *formatter_name, char *function_name) {
+  Assert(formatter_name);
+  Assert(function_name);
+
+  Oid procOid = InvalidOid;
+  FmgrInfo *procInfo = NULL;
+
+  procOid = LookupPlugStorageValidatorFunc(formatter_name, function_name);
+
+  if (OidIsValid(procOid)) {
+    procInfo = (FmgrInfo *)palloc0(sizeof(FmgrInfo));
+    fmgr_info(procOid, procInfo);
+  } else {
+    elog(ERROR, "%s_%s function was not found for pluggable storage",
+         formatter_name, function_name);
+  }
+
+  return procInfo;
+}
+
+static void get_magma_scan_functions(char *formatter_name,
+                                     FileScanDesc file_scan_desc) {
+  file_scan_desc->fs_ps_scan_funcs.beginscan =
+      get_magma_function(formatter_name, "beginscan");
+
+  file_scan_desc->fs_ps_scan_funcs.getnext_init =
+      get_magma_function(formatter_name, "getnext_init");
+
+  file_scan_desc->fs_ps_scan_funcs.getnext =
+      get_magma_function(formatter_name, "getnext");
+
+  file_scan_desc->fs_ps_scan_funcs.rescan =
+      get_magma_function(formatter_name, "rescan");
+
+  file_scan_desc->fs_ps_scan_funcs.endscan =
+      get_magma_function(formatter_name, "endscan");
+
+  file_scan_desc->fs_ps_scan_funcs.stopscan =
+      get_magma_function(formatter_name, "stopscan");
+}
+
+static void get_magma_insert_functions(char *formatter_name,
+                                       ExternalInsertDesc ext_insert_desc) {
+  ext_insert_desc->ext_ps_insert_funcs.insert_init =
+      get_magma_function(formatter_name, "insert_init");
+
+  ext_insert_desc->ext_ps_insert_funcs.insert =
+      get_magma_function(formatter_name, "insert");
+
+  ext_insert_desc->ext_ps_insert_funcs.insert_finish =
+      get_magma_function(formatter_name, "insert_finish");
+}
+
+static void get_magma_delete_functions(char *formatter_name,
+                                       ExternalInsertDesc ext_delete_desc) {
+  ext_delete_desc->ext_ps_delete_funcs.begindeletes =
+      get_magma_function(formatter_name, "begindelete");
+
+  ext_delete_desc->ext_ps_delete_funcs.deletes =
+      get_magma_function(formatter_name, "delete");
+
+  ext_delete_desc->ext_ps_delete_funcs.enddeletes =
+      get_magma_function(formatter_name, "enddelete");
+}
+
+static void get_magma_update_functions(char *formatter_name,
+                                       ExternalInsertDesc ext_update_desc) {
+  ext_update_desc->ext_ps_update_funcs.beginupdates =
+      get_magma_function(formatter_name, "beginupdate");
+
+  ext_update_desc->ext_ps_update_funcs.updates =
+      get_magma_function(formatter_name, "update");
+
+  ext_update_desc->ext_ps_update_funcs.endupdates =
+      get_magma_function(formatter_name, "endupdate");
+}
+
+static void build_options_in_json(char *serializeSchema, int serializeSchemaLen,
+                                  List *fmt_opts_defelem, int encoding, int rangeNum,
+                                  char *formatterName, char **json_str) {
+  struct json_object *opt_json_object = json_object_new_object();
+
+  /* add format options for the formatter */
+  char *key_str = NULL;
+  char *val_str = NULL;
+  // const char *whitespace = " \t\n\r";
+
+  int nargs = list_length(fmt_opts_defelem);
+  for (int i = 0; i < nargs; ++i) {
+    key_str = ((DefElem *)(list_nth(fmt_opts_defelem, i)))->defname;
+    val_str =
+        ((Value *)((DefElem *)(list_nth(fmt_opts_defelem, i)))->arg)->val.str;
+
+    json_object_object_add(opt_json_object, key_str,
+                           json_object_new_string(val_str));
+  }
+
+  /* add encoding option for orc */
+  if (json_object_object_get(opt_json_object, "encoding") == NULL) {
+    const char *encodingStr = pg_encoding_to_char(encoding);
+    char *encodingStrLower = str_tolower(encodingStr, strlen(encodingStr));
+
+    json_object_object_add(opt_json_object, "encoding",
+                           json_object_new_string(encodingStrLower));
+    if (encodingStrLower) pfree(encodingStrLower);
+  }
+
+  /* add magma_range_num option for magma */
+  if (json_object_object_get(opt_json_object, "magma_range_num") == NULL) {
+
+    json_object_object_add(opt_json_object, "magma_range_num",
+                           json_object_new_int64(rangeNum));
+  }
+
+  /* add magma_serialized_schema option for magma */
+  if (json_object_object_get(opt_json_object, "serialized_schema") == NULL) {
+    json_object_object_add(
+        opt_json_object, "serialized_schema",
+        json_object_new_string_len(serializeSchema, serializeSchemaLen));
+  }
+
+  /* add magma_format_type option for magma */
+  if (json_object_object_get(opt_json_object, "magma_format_type") == NULL) {
+    char *magma_type = NULL;
+    if (formatterName != NULL &&
+        (strncasecmp(formatterName, "magmatp", strlen("magmatp")) == 0)) {
+      magma_type = "0";
+    } else if (strncasecmp(formatterName, "magmaap", strlen("magmaap")) ==
+               0) {
+      magma_type = "1";
+    }
+    json_object_object_add(opt_json_object, "magma_format_type",
+                           json_object_new_string(magma_type));
+  }
+
+  *json_str = NULL;
+  if (opt_json_object != NULL) {
+    const char *str = json_object_to_json_string(opt_json_object);
+    *json_str = (char *)palloc0(strlen(str) + 1);
+    strcpy(*json_str, str);
+    json_object_put(opt_json_object);
+
+    elog(DEBUG3, "formatter options are %s", *json_str);
+  }
+}
+
+static MagmaFormatC *create_magma_formatter_instance(List *fmt_opts_defelem,
+                                                     char *serializeSchema,
+                                                     int serializeSchemaLen,
+                                                     int fmt_encoding,
+                                                     char *formatterName,
+                                                     int rangeNum) {
+  char *fmt_opts_str = NULL;
+
+  build_options_in_json(serializeSchema, serializeSchemaLen, fmt_opts_defelem,
+                        fmt_encoding, rangeNum, formatterName, &fmt_opts_str);
+
+  MagmaFormatC *magma_format_c = MagmaFormatNewMagmaFormatC(fmt_opts_str);
+  if (fmt_opts_str != NULL) {
+    pfree(fmt_opts_str);
+  }
+  return magma_format_c;
+}
+
+static MagmaClientC *create_magma_client_instance() {
+  if (global_magma_client != NULL) {
+    MagmaClientC_ResetMagmaClient4Reuse(&global_magma_client);
+    return global_magma_client;
+  }
+
+  global_magma_client = MagmaClientC_NewMagmaClient(magma_nodes_url);
+  MagmaResult *result = MagmaClientC_GetResult(global_magma_client);
+  if (result->level == MAGMA_ERROR) {
+    MagmaClientC_FreeMagmaClient(&global_magma_client);
+    elog(ERROR, "%s", result->message);
+  }
+  return global_magma_client;
+}
+
+static void init_magma_format_user_data_for_read(
+    TupleDesc tup_desc, GlobalFormatUserData *user_data) {
+  user_data->numberOfColumns = tup_desc->natts;
+  user_data->colNames = palloc0(sizeof(char *) * user_data->numberOfColumns);
+  user_data->colDatatypes = palloc0(sizeof(int) * user_data->numberOfColumns);
+  user_data->colDatatypeMods = palloc0(
+      sizeof(int64_t) * user_data->numberOfColumns);
+  user_data->colValues = palloc0(sizeof(Datum) * user_data->numberOfColumns);
+  user_data->colRawValues = palloc0(
+      sizeof(char *) * user_data->numberOfColumns);
+  user_data->colValLength = palloc0(
+      sizeof(uint64_t) * user_data->numberOfColumns);
+  user_data->colIsNulls = palloc0(sizeof(bool) * user_data->numberOfColumns);
+
+  for (int i = 0; i < user_data->numberOfColumns; i++) {
+      Form_pg_attribute attr = tup_desc->attrs[i];
+      user_data->colNames[i] = pstrdup(attr->attname.data);
+      user_data->colDatatypes[i] = map_hawq_type_to_magma_type(attr->atttypid, user_data->isMagmatp);
+      user_data->colDatatypeMods[i] = attr->atttypmod;
+      user_data->colRawTid = NULL;
+      user_data->colValues[i] = NULL;
+      user_data->colRawValues[i] = NULL;
+      user_data->colValLength[i] = 0;
+      user_data->colIsNulls[i] = false;
+    }
+}
+
+static void init_magma_format_user_data_for_write(
+    TupleDesc tup_desc, GlobalFormatUserData *user_data, Relation relation) {
+  user_data->numberOfColumns = tup_desc->natts;
+  user_data->colNames = palloc0(sizeof(char *) * user_data->numberOfColumns);
+  user_data->colDatatypes = palloc0(sizeof(int) * user_data->numberOfColumns);
+  user_data->colRawValues =
+      palloc0(sizeof(char *) * user_data->numberOfColumns);
+  user_data->colValLength =
+      palloc0(sizeof(uint64_t) * user_data->numberOfColumns);
+  user_data->colDatatypeMods =
+      palloc0(sizeof(int64_t) * user_data->numberOfColumns);
+  user_data->colIsNulls = palloc0(sizeof(bool) * user_data->numberOfColumns);
+  user_data->colTimestamp =
+      palloc0(sizeof(TimestampType) * user_data->numberOfColumns);
+  for (int i = 0; i < user_data->numberOfColumns; ++i) {
+    Form_pg_attribute attr = tup_desc->attrs[i];
+    user_data->colNames[i] = pstrdup(attr->attname.data);
+    user_data->colDatatypes[i] = map_hawq_type_to_magma_type(attr->atttypid, user_data->isMagmatp);
+    user_data->colDatatypeMods[i] = relation->rd_att->attrs[i]->atttypmod;
+    user_data->colIsNulls[i] = !(relation->rd_att->attrs[i]->attnotnull);
+  }
+}
+
+static void build_magma_tuple_descrition_for_read(
+    Plan *plan, Relation relation, GlobalFormatUserData *user_data, bool skipTid) {
+  user_data->colToReads = palloc0(sizeof(bool) * user_data->numberOfColumns);
+
+  for (int i = 0; i < user_data->numberOfColumns; ++i)
+  {
+    user_data->colToReads[i] = plan ? false : true;
+
+    /* 64 is the name type length */
+    user_data->colNames[i] = palloc(sizeof(char) * 64);
+
+    strcpy(user_data->colNames[i],
+        relation->rd_att->attrs[i]->attname.data);
+
+    int data_type = (int) (relation->rd_att->attrs[i]->atttypid);
+    user_data->colDatatypes[i] = map_hawq_type_to_common_plan(data_type);
+    user_data->colDatatypeMods[i] = relation->rd_att->attrs[i]->atttypmod;
+  }
+
+  if (plan)
+  {
+    /* calculate columns to read for seqscan */
+    GetNeededColumnsForScan((Node *) plan->targetlist,
+        user_data->colToReads, user_data->numberOfColumns);
+
+    GetNeededColumnsForScan((Node *) plan->qual, user_data->colToReads,
+        user_data->numberOfColumns);
+
+//    if (skipTid) {
+//      int32_t i = 0;
+//      for (; i < user_data->numberOfColumns; ++i) {
+//        if (user_data->colToReads[i]) break;
+//      }
+//      if (i == user_data->numberOfColumns) user_data->colToReads[0] = true;
+//    }
+  }
+}
+
+static void magma_scan_error_callback(void *arg) {
+  CopyState cstate = (CopyState)arg;
+
+  errcontext("External table %s", cstate->cur_relname);
+}
+
+static List *magma_parse_format_string(char *fmtstr, char **fmtname) {
+  char *token;
+  const char *whitespace = " \t\n\r";
+  char nonstd_backslash = 0;
+  int encoding = GetDatabaseEncoding();
+
+  token =
+      magma_strtokx2(fmtstr, whitespace, NULL, NULL, 0, false, true, encoding);
+  /* parse user custom options. take it as is. no validation needed */
+
+  List *l = NIL;
+  bool formatter_found = false;
+
+  if (token) {
+    char *key = token;
+    char *val = NULL;
+    StringInfoData key_modified;
+
+    initStringInfo(&key_modified);
+
+    while (key) {
+      /* MPP-14467 - replace meta chars back to original */
+      resetStringInfo(&key_modified);
+      appendStringInfoString(&key_modified, key);
+      replaceStringInfoString(&key_modified, "<gpx20>", " ");
+
+      val = magma_strtokx2(NULL, whitespace, NULL, "'", nonstd_backslash, true,
+                           true, encoding);
+      if (val) {
+        if (pg_strcasecmp(key, "formatter") == 0) {
+          *fmtname = pstrdup(val);
+          formatter_found = true;
+        } else
+
+          l = lappend(l, makeDefElem(pstrdup(key_modified.data),
+                                     (Node *)makeString(pstrdup(val))));
+      } else
+        goto error;
+
+      key = magma_strtokx2(NULL, whitespace, NULL, NULL, 0, false, false,
+                           encoding);
+    }
+  }
+
+  if (!formatter_found) {
+    /*
+     * If there is no formatter option specified, use format name. So
+     * we don't report error here.
+     */
+  }
+
+  return l;
+
+error:
+  if (token)
+    ereport(ERROR,
+            (errcode(ERRCODE_GP_INTERNAL_ERROR),
+             errmsg("external table internal parse error at \"%s\"", token)));
+  else
+    ereport(ERROR, (errcode(ERRCODE_GP_INTERNAL_ERROR),
+                    errmsg("external table internal parse error at end of "
+                           "line")));
+  return NIL;
+}
+
+static char *magma_strtokx2(const char *s, const char *whitespace,
+                            const char *delim, const char *quote, char escape,
+                            bool e_strings, bool del_quotes, int encoding) {
+  static char *storage = NULL; /* store the local copy of the users string
+                                * here */
+  static char *string = NULL;  /* pointer into storage where to continue on
+                                * next call */
+
+  /* variously abused variables: */
+  unsigned int offset;
+  char *start;
+  char *p;
+
+  if (s) {
+    // pfree(storage);
+
+    /*
+     * We may need extra space to insert delimiter nulls for adjacent
+     * tokens.	2X the space is a gross overestimate, but it's unlikely
+     * that this code will be used on huge strings anyway.
+     */
+    storage = palloc0(2 * strlen(s) + 1);
+    strcpy(storage, s);
+    string = storage;
+  }
+
+  if (!storage) return NULL;
+
+  /* skip leading whitespace */
+  offset = strspn(string, whitespace);
+  start = &string[offset];
+
+  /* end of string reached? */
+  if (*start == '\0') {
+    /* technically we don't need to free here, but we're nice */
+    pfree(storage);
+    storage = NULL;
+    string = NULL;
+    return NULL;
+  }
+
+  /* test if delimiter character */
+  if (delim && strchr(delim, *start)) {
+    /*
+     * If not at end of string, we need to insert a null to terminate the
+     * returned token.	We can just overwrite the next character if it
+     * happens to be in the whitespace set ... otherwise move over the
+     * rest of the string to make room.  (This is why we allocated extra
+     * space above).
+     */
+    p = start + 1;
+    if (*p != '\0') {
+      if (!strchr(whitespace, *p)) memmove(p + 1, p, strlen(p) + 1);
+      *p = '\0';
+      string = p + 1;
+    } else {
+      /* at end of string, so no extra work */
+      string = p;
+    }
+
+    return start;
+  }
+
+  /* check for E string */
+  p = start;
+  if (e_strings && (*p == 'E' || *p == 'e') && p[1] == '\'') {
+    quote = "'";
+    escape = '\\'; /* if std strings before, not any more */
+    p++;
+  }
+
+  /* test if quoting character */
+  if (quote && strchr(quote, *p)) {
+    /* okay, we have a quoted token, now scan for the closer */
+    char thisquote = *p++;
+
+    /* MPP-6698 START
+     * unfortunately, it is possible for an external table format
+     * string to be represented in the catalog in a way which is
+     * problematic to parse: when using a single quote as a QUOTE
+     * or ESCAPE character the format string will show [quote '''].
+     * since we do not want to change how this is stored at this point
+     * (as it will affect previous versions of the software already
+     * in production) the following code block will detect this scenario
+     * where 3 quote characters follow each other, with no forth one.
+     * in that case, we will skip the second one (the first is skipped
+     * just above) and the last trailing quote will be skipped below.
+     * the result will be the actual token (''') and after stripping
+     * it due to del_quotes we'll end up with (').
+     * very ugly, but will do the job...
+     */
+    char qt = quote[0];
+
+    if (strlen(p) >= 3 && p[0] == qt && p[1] == qt && p[2] != qt) p++;
+    /* MPP-6698 END */
+
+    for (; *p; p += pg_encoding_mblen(encoding, p)) {
+      if (*p == escape && p[1] != '\0')
+        p++; /* process escaped anything */
+      else if (*p == thisquote && p[1] == thisquote)
+        p++; /* process doubled quote */
+      else if (*p == thisquote) {
+        p++; /* skip trailing quote */
+        break;
+      }
+    }
+
+    /*
+     * If not at end of string, we need to insert a null to terminate the
+     * returned token.	See notes above.
+     */
+    if (*p != '\0') {
+      if (!strchr(whitespace, *p)) memmove(p + 1, p, strlen(p) + 1);
+      *p = '\0';
+      string = p + 1;
+    } else {
+      /* at end of string, so no extra work */
+      string = p;
+    }
+
+    /* Clean up the token if caller wants that */
+    if (del_quotes) magma_strip_quotes(start, thisquote, escape, encoding);
+
+    return start;
+  }
+
+  /*
+   * Otherwise no quoting character.	Scan till next whitespace, delimiter
+   * or quote.  NB: at this point, *start is known not to be '\0',
+   * whitespace, delim, or quote, so we will consume at least one character.
+   */
+  offset = strcspn(start, whitespace);
+
+  if (delim) {
+    unsigned int offset2 = strcspn(start, delim);
+
+    if (offset > offset2) offset = offset2;
+  }
+
+  if (quote) {
+    unsigned int offset2 = strcspn(start, quote);
+
+    if (offset > offset2) offset = offset2;
+  }
+
+  p = start + offset;
+
+  /*
+   * If not at end of string, we need to insert a null to terminate the
+   * returned token.	See notes above.
+   */
+  if (*p != '\0') {
+    if (!strchr(whitespace, *p)) memmove(p + 1, p, strlen(p) + 1);
+    *p = '\0';
+    string = p + 1;
+  } else {
+    /* at end of string, so no extra work */
+    string = p;
+  }
+
+  return start;
+}
+
+static void magma_strip_quotes(char *source, char quote, char escape,
+                               int encoding) {
+  char *src;
+  char *dst;
+
+  Assert(source);
+  Assert(quote);
+
+  src = dst = source;
+
+  if (*src && *src == quote) src++; /* skip leading quote */
+
+  while (*src) {
+    char c = *src;
+    int i;
+
+    if (c == quote && src[1] == '\0')
+      break; /* skip trailing quote */
+    else if (c == quote && src[1] == quote)
+      src++; /* process doubled quote */
+    else if (c == escape && src[1] != '\0')
+      src++; /* process escaped character */
+
+    i = pg_encoding_mblen(encoding, src);
+    while (i--) *dst++ = *src++;
+  }
+
+  *dst = '\0';
+}
+
+static void magma_check_result(MagmaClientC **client) {
+  Assert(client != NULL && *client != NULL);
+
+  MagmaResult *result = MagmaClientC_GetResult(*client);
+  Assert(result != NULL);
+
+  switch (result->level) {
+    case 0:  // DEBUG
+      elog(DEBUG3, "%s", result->message);
+      break;
+
+    case 1:  // LOG
+      elog(LOG, "%s", result->message);
+      break;
+
+    case 2:  // INFO
+      elog(INFO, "%s", result->message);
+      break;
+
+    case 3:  // NOTICE
+      elog(NOTICE, "%s", result->message);
+      break;
+
+    case 4:  // WARNING
+      elog(WARNING, "%s", result->message);
+      break;
+
+    case 5:  // ERROR
+      elog(ERROR, "%s", result->message);
+      break;
+
+    default:
+      elog(ERROR, "invalid error level %d", result->level);
+      break;
+  }
+}
+
+bool checkUnsupportedDataTypeMagma(int32_t hawqTypeID) {
+  switch (hawqTypeID) {
+    case HAWQ_TYPE_BOOL:
+    case HAWQ_TYPE_INT2:
+    case HAWQ_TYPE_INT4:
+    case HAWQ_TYPE_INT8:
+    case HAWQ_TYPE_FLOAT4:
+    case HAWQ_TYPE_FLOAT8:
+    case HAWQ_TYPE_CHAR:
+    case HAWQ_TYPE_TEXT:
+    case HAWQ_TYPE_BYTE:
+    case HAWQ_TYPE_BPCHAR:
+    case HAWQ_TYPE_VARCHAR:
+    case HAWQ_TYPE_DATE:
+    case HAWQ_TYPE_TIME:
+    case HAWQ_TYPE_TIMESTAMP:
+    case HAWQ_TYPE_INT2_ARRAY:
+    case HAWQ_TYPE_INT4_ARRAY:
+    case HAWQ_TYPE_INT8_ARRAY:
+    case HAWQ_TYPE_FLOAT4_ARRAY:
+    case HAWQ_TYPE_FLOAT8_ARRAY:
+    case HAWQ_TYPE_NUMERIC:
+    case HAWQ_TYPE_JSON:
+    case HAWQ_TYPE_JSONB:
+      return false;
+    default:
+      return true;
+  }
+}
+
+/*
+static int rangeCmp(const void *p1, const void *p2) {
+  MagmaRange *r1 = (MagmaRange *)p1;
+  MagmaRange *r2 = (MagmaRange *)p2;
+
+  // replicaGroupid frist
+  if (r1->replicaGroups[0].id < r2->replicaGroups[0].id) {
+    return -1;
+  }
+
+  if (r1->replicaGroups[0].id > r2->replicaGroups[0].id) {
+    return 1;
+  }
+
+  // inner Group second by the first three bits in rangeid
+  if (r1->groupId < r2->groupId) {
+    return -1;
+  }
+
+  if (r1->groupId > r2->groupId) {
+    return 1;
+  }
+
+  return 0;
+}
+*/
+
+int32_t map_hawq_type_to_magma_type(int32_t hawqTypeID, bool isMagmatp) {
+  switch (hawqTypeID) {
+    case HAWQ_TYPE_BOOL:
+      return BOOLEANID;
+
+    case HAWQ_TYPE_CHAR:
+      return TINYINTID;
+
+    case HAWQ_TYPE_INT2:
+      return SMALLINTID;
+
+    case HAWQ_TYPE_INT4:
+      return INTID;
+
+    case HAWQ_TYPE_INT8:
+    case HAWQ_TYPE_TID:
+      return BIGINTID;
+
+    case HAWQ_TYPE_FLOAT4:
+      return FLOATID;
+
+    case HAWQ_TYPE_FLOAT8:
+      return DOUBLEID;
+
+    case HAWQ_TYPE_NUMERIC:
+      return DECIMALNEWID;
+
+    case HAWQ_TYPE_DATE:
+      return DATEID;
+
+    case HAWQ_TYPE_BPCHAR:
+      return CHARID;
+
+    case HAWQ_TYPE_VARCHAR:
+      return VARCHARID;
+
+    case HAWQ_TYPE_NAME:
+    case HAWQ_TYPE_TEXT:
+      return STRINGID;
+
+    case HAWQ_TYPE_JSON:
+      return JSONID;
+
+    case HAWQ_TYPE_JSONB:
+      return JSONBID;
+
+    case HAWQ_TYPE_TIME:
+      return TIMEID;
+
+    case HAWQ_TYPE_TIMESTAMPTZ:
+    case HAWQ_TYPE_TIMESTAMP:
+    case HAWQ_TYPE_TIMETZ:
+      return TIMESTAMPID;
+
+    case HAWQ_TYPE_INTERVAL:
+      return INTERVALID;
+
+    case HAWQ_TYPE_MONEY:
+    case HAWQ_TYPE_BIT:
+    case HAWQ_TYPE_VARBIT:
+    case HAWQ_TYPE_BYTE:
+    case HAWQ_TYPE_XML:
+    case HAWQ_TYPE_MACADDR:
+    case HAWQ_TYPE_INET:
+    case HAWQ_TYPE_CIDR:
+      return BINARYID;
+
+    case HAWQ_TYPE_INT2_ARRAY:
+      return SMALLINTARRAYID;
+
+    case HAWQ_TYPE_INT4_ARRAY:
+      return INTARRAYID;
+
+    case HAWQ_TYPE_INT8_ARRAY:
+      return BIGINTARRAYID;
+
+    case HAWQ_TYPE_FLOAT4_ARRAY:
+      return FLOATARRAYID;
+
+    case HAWQ_TYPE_FLOAT8_ARRAY:
+      return DOUBLEARRAYID;
+
+    case HAWQ_TYPE_TEXT_ARRAY:
+      return STRINGARRAYID;
+
+    case HAWQ_TYPE_BPCHAR_ARRAY:
+      return BPCHARARRAYID;
+
+    case HAWQ_TYPE_POINT:
+    case HAWQ_TYPE_LSEG:
+    case HAWQ_TYPE_PATH:
+    case HAWQ_TYPE_BOX:
+    case HAWQ_TYPE_POLYGON:
+    case HAWQ_TYPE_CIRCLE:
+    default:
+      return type_is_rowtype(hawqTypeID)
+                 ? (STRUCTEXID)
+                 : (type_is_basetype(hawqTypeID) ? IOBASETYPEID
+                                                 : INVALIDTYPEID);
+  }
+}
diff --git a/contrib/magma/magma_install.sql b/contrib/magma/magma_install.sql
new file mode 100644
index 0000000..6229261
--- /dev/null
+++ b/contrib/magma/magma_install.sql
@@ -0,0 +1,217 @@
+------------------------------------------------------------------
+-- magma protocol
+------------------------------------------------------------------
+CREATE OR REPLACE FUNCTION pg_catalog.magma_validate() RETURNS void
+AS '$libdir/magma.so', 'magma_protocol_validate'
+LANGUAGE C STABLE;
+
+CREATE OR REPLACE FUNCTION pg_catalog.magma_blocklocation() RETURNS void
+AS '$libdir/magma.so', 'magma_protocol_blocklocation'
+LANGUAGE C STABLE;
+
+CREATE OR REPLACE FUNCTION pg_catalog.magma_tablesize() RETURNS void
+AS '$libdir/magma.so', 'magma_protocol_tablesize'
+LANGUAGE C STABLE;
+
+CREATE OR REPLACE FUNCTION pg_catalog.magma_databasesize() RETURNS void
+AS '$libdir/magma.so', 'magma_protocol_databasesize'
+LANGUAGE C STABLE;
+
+------------------------------------------------------------------
+-- magma format
+------------------------------------------------------------------
+CREATE OR REPLACE FUNCTION pg_catalog.magma_validate_interfaces() RETURNS void
+AS '$libdir/magma.so', 'magma_validate_interfaces'
+LANGUAGE C STABLE;
+
+CREATE OR REPLACE FUNCTION pg_catalog.magmaap_validate_interfaces() RETURNS void
+AS '$libdir/magma.so', 'magma_validate_interfaces'
+LANGUAGE C STABLE;
+
+CREATE OR REPLACE FUNCTION pg_catalog.magmatp_validate_interfaces() RETURNS void
+AS '$libdir/magma.so', 'magma_validate_interfaces'
+LANGUAGE C STABLE;
+
+CREATE OR REPLACE FUNCTION pg_catalog.magma_validate_options() RETURNS void
+AS '$libdir/magma.so', 'magma_validate_options'
+LANGUAGE C STABLE;
+
+CREATE OR REPLACE FUNCTION pg_catalog.magmaap_validate_options() RETURNS void
+AS '$libdir/magma.so', 'magma_validate_options'
+LANGUAGE C STABLE;
+
+CREATE OR REPLACE FUNCTION pg_catalog.magmatp_validate_options() RETURNS void
+AS '$libdir/magma.so', 'magma_validate_options'
+LANGUAGE C STABLE;
+
+CREATE OR REPLACE FUNCTION pg_catalog.magma_validate_encodings() RETURNS void
+AS '$libdir/magma.so', 'magma_validate_encodings'
+LANGUAGE C STABLE;
+
+CREATE OR REPLACE FUNCTION pg_catalog.magmaap_validate_encodings() RETURNS void
+AS '$libdir/magma.so', 'magma_validate_encodings'
+LANGUAGE C STABLE;
+
+CREATE OR REPLACE FUNCTION pg_catalog.magmatp_validate_encodings() RETURNS void
+AS '$libdir/magma.so', 'magma_validate_encodings'
+LANGUAGE C STABLE;
+
+CREATE OR REPLACE FUNCTION pg_catalog.magma_validate_datatypes() RETURNS void
+AS '$libdir/magma.so', 'magma_validate_datatypes'
+LANGUAGE C STABLE;
+
+CREATE OR REPLACE FUNCTION pg_catalog.magmaap_validate_datatypes() RETURNS void
+AS '$libdir/magma.so', 'magma_validate_datatypes'
+LANGUAGE C STABLE;
+
+CREATE OR REPLACE FUNCTION pg_catalog.magmatp_validate_datatypes() RETURNS void
+AS '$libdir/magma.so', 'magma_validate_datatypes'
+LANGUAGE C STABLE;
+
+CREATE OR REPLACE FUNCTION pg_catalog.magma_createtable() RETURNS void
+AS '$libdir/magma.so', 'magma_createtable'
+LANGUAGE C STABLE;
+
+CREATE OR REPLACE FUNCTION pg_catalog.magma_droptable() RETURNS void
+AS '$libdir/magma.so', 'magma_droptable'
+LANGUAGE C STABLE;
+
+CREATE OR REPLACE FUNCTION pg_catalog.magma_createindex() RETURNS void
+AS '$libdir/magma.so', 'magma_createindex'
+LANGUAGE C STABLE;
+
+CREATE OR REPLACE FUNCTION pg_catalog.magma_dropindex() RETURNS void
+AS '$libdir/magma.so', 'magma_dropindex'
+LANGUAGE C STABLE;
+
+CREATE OR REPLACE FUNCTION pg_catalog.magma_reindex_index() RETURNS void
+AS '$libdir/magma.so', 'magma_reindex_index'
+LANGUAGE C STABLE;
+
+CREATE OR REPLACE FUNCTION pg_catalog.magmatp_beginscan() RETURNS bytea
+AS '$libdir/magma.so', 'magma_beginscan'
+LANGUAGE C STABLE;
+
+CREATE OR REPLACE FUNCTION pg_catalog.magmatp_getnext_init() RETURNS bytea
+AS '$libdir/magma.so', 'magma_getnext_init'
+LANGUAGE C STABLE;
+
+CREATE OR REPLACE FUNCTION pg_catalog.magmatp_getnext() RETURNS bytea
+AS '$libdir/magma.so', 'magma_getnext'
+LANGUAGE C STABLE;
+
+CREATE OR REPLACE FUNCTION pg_catalog.magmatp_rescan() RETURNS void
+AS '$libdir/magma.so', 'magma_rescan'
+LANGUAGE C STABLE;
+
+CREATE OR REPLACE FUNCTION pg_catalog.magmatp_endscan() RETURNS void
+AS '$libdir/magma.so', 'magma_endscan'
+LANGUAGE C STABLE;
+
+CREATE OR REPLACE FUNCTION pg_catalog.magmatp_stopscan() RETURNS void
+AS '$libdir/magma.so', 'magma_stopscan'
+LANGUAGE C STABLE;
+
+CREATE OR REPLACE FUNCTION pg_catalog.magmatp_begindelete() RETURNS bytea
+AS '$libdir/magma.so', 'magma_begindelete'
+LANGUAGE C STABLE;
+
+CREATE OR REPLACE FUNCTION pg_catalog.magmatp_delete() RETURNS void
+AS '$libdir/magma.so', 'magma_delete'
+LANGUAGE C STABLE;
+
+CREATE OR REPLACE FUNCTION pg_catalog.magmatp_enddelete() RETURNS void
+AS '$libdir/magma.so', 'magma_enddelete'
+LANGUAGE C STABLE;
+
+CREATE OR REPLACE FUNCTION pg_catalog.magmatp_beginupdate() RETURNS bytea
+AS '$libdir/magma.so', 'magma_beginupdate'
+LANGUAGE C STABLE;
+
+CREATE OR REPLACE FUNCTION pg_catalog.magmatp_update() RETURNS void
+AS '$libdir/magma.so', 'magma_update'
+LANGUAGE C STABLE;
+
+CREATE OR REPLACE FUNCTION pg_catalog.magmatp_endupdate() RETURNS void
+AS '$libdir/magma.so', 'magma_endupdate'
+LANGUAGE C STABLE;
+ 
+CREATE OR REPLACE FUNCTION pg_catalog.magmatp_insert_init() RETURNS bytea
+AS '$libdir/magma.so', 'magma_insert_init'
+LANGUAGE C STABLE;
+
+CREATE OR REPLACE FUNCTION pg_catalog.magmatp_insert() RETURNS bytea
+AS '$libdir/magma.so', 'magma_insert'
+LANGUAGE C STABLE;
+
+CREATE OR REPLACE FUNCTION pg_catalog.magmatp_insert_finish() RETURNS void
+AS '$libdir/magma.so', 'magma_insert_finish'
+LANGUAGE C STABLE;
+
+CREATE OR REPLACE FUNCTION pg_catalog.magma_transaction() RETURNS void
+AS '$libdir/magma.so', 'magma_transaction'
+LANGUAGE C STABLE;
+
+CREATE OR REPLACE FUNCTION pg_catalog.magmaap_beginscan() RETURNS bytea
+AS '$libdir/magma.so', 'magma_beginscan'
+LANGUAGE C STABLE;
+
+CREATE OR REPLACE FUNCTION pg_catalog.magmaap_getnext_init() RETURNS bytea
+AS '$libdir/magma.so', 'magma_getnext_init'
+LANGUAGE C STABLE;
+
+CREATE OR REPLACE FUNCTION pg_catalog.magmaap_getnext() RETURNS bytea
+AS '$libdir/magma.so', 'magma_getnext'
+LANGUAGE C STABLE;
+
+CREATE OR REPLACE FUNCTION pg_catalog.magmaap_rescan() RETURNS void
+AS '$libdir/magma.so', 'magma_rescan'
+LANGUAGE C STABLE;
+
+CREATE OR REPLACE FUNCTION pg_catalog.magmaap_endscan() RETURNS void
+AS '$libdir/magma.so', 'magma_endscan'
+LANGUAGE C STABLE;
+
+CREATE OR REPLACE FUNCTION pg_catalog.magmaap_stopscan() RETURNS void
+AS '$libdir/magma.so', 'magma_stopscan'
+LANGUAGE C STABLE;
+
+CREATE OR REPLACE FUNCTION pg_catalog.magmaap_begindelete() RETURNS bytea
+AS '$libdir/magma.so', 'magma_begindelete'
+LANGUAGE C STABLE;
+
+CREATE OR REPLACE FUNCTION pg_catalog.magmaap_delete() RETURNS void
+AS '$libdir/magma.so', 'magma_delete'
+LANGUAGE C STABLE;
+
+CREATE OR REPLACE FUNCTION pg_catalog.magmaap_enddelete() RETURNS void
+AS '$libdir/magma.so', 'magma_enddelete'
+LANGUAGE C STABLE;
+
+CREATE OR REPLACE FUNCTION pg_catalog.magmaap_beginupdate() RETURNS bytea
+AS '$libdir/magma.so', 'magma_beginupdate'
+LANGUAGE C STABLE;
+
+CREATE OR REPLACE FUNCTION pg_catalog.magmaap_update() RETURNS void
+AS '$libdir/magma.so', 'magma_update'
+LANGUAGE C STABLE;
+
+CREATE OR REPLACE FUNCTION pg_catalog.magmaap_endupdate() RETURNS void
+AS '$libdir/magma.so', 'magma_endupdate'
+LANGUAGE C STABLE;
+
+CREATE OR REPLACE FUNCTION pg_catalog.magmaap_insert_init() RETURNS bytea
+AS '$libdir/magma.so', 'magma_insert_init'
+LANGUAGE C STABLE;
+
+CREATE OR REPLACE FUNCTION pg_catalog.magmaap_insert() RETURNS bytea
+AS '$libdir/magma.so', 'magma_insert'
+LANGUAGE C STABLE;
+
+CREATE OR REPLACE FUNCTION pg_catalog.magmaap_insert_finish() RETURNS void
+AS '$libdir/magma.so', 'magma_insert_finish'
+LANGUAGE C STABLE;
+
+CREATE OR REPLACE FUNCTION pg_catalog.magma_getstatus() RETURNS void
+AS '$libdir/magma.so', 'magma_getstatus'
+LANGUAGE C STABLE;
diff --git a/contrib/magma/monitor_install.sql b/contrib/magma/monitor_install.sql
new file mode 100644
index 0000000..7950fd5
--- /dev/null
+++ b/contrib/magma/monitor_install.sql
@@ -0,0 +1,77 @@
+------------------------------------------------------------------
+-- hawq status
+------------------------------------------------------------------
+CREATE FUNCTION hawq_magma_status() RETURNS SETOF record LANGUAGE internal VOLATILE STRICT AS 'hawq_magma_status' WITH (OID=5085, DESCRIPTION="Return magma node information");
+
+CREATE OR REPLACE FUNCTION pg_catalog.magma_getstatus() RETURNS void
+AS '$libdir/magma.so', 'magma_getstatus'
+LANGUAGE C STABLE;
+
+CREATE VIEW pg_catalog.hawq_magma_status AS
+    SELECT * FROM hawq_magma_status() AS s
+    (node text,
+     compactJobRunning text,
+     compactJob text,
+     compactActionJobRunning text,
+     compactActionJob text,
+     dirs text,
+     description text);
+
+------------------------------------------------------------------
+-- history table
+------------------------------------------------------------------
+CREATE READABLE EXTERNAL WEB TABLE hawq_toolkit.__hawq_pg_stat_activity_history
+(
+    datid                int,
+    datname              text,
+    usesysid             int,
+    username             text,
+    procpid              int,
+    sess_id              int,
+    query_start          text,
+    query_end            text,
+    client_addr          text,
+    client_port          int,
+    application_name     text,
+    cpu                  text,
+    memory               text,
+    status               text,
+    errinfo              text,
+    query                text
+)
+EXECUTE E'cat $GP_SEG_DATADIR/pg_log/*.history' ON MASTER
+FORMAT 'CSV' (DELIMITER ',' NULL '' QUOTE '"');
+
+REVOKE ALL ON TABLE hawq_toolkit.__hawq_pg_stat_activity_history FROM public;
+
+------------------------------------------------------------------
+-- schema/db previleges
+------------------------------------------------------------------
+
+CREATE VIEW information_schema.schema_privileges as
+SELECT nspname AS schema_name,
+coalesce(nullif(role.name,''), 'PUBLIC') AS grantee,
+substring(
+CASE WHEN position('U' in split_part(split_part((','||array_to_string(nspacl,',')), ','||role.name||'=',2 ) ,'/',1)) > 0 THEN ',USAGE' ELSE '' END
+|| CASE WHEN position('C' in split_part(split_part((','||array_to_string(nspacl,',')), ','||role.name||'=',2 ) ,'/',1)) > 0 THEN ',CREATE' ELSE '' END
+, 2,10000) AS privilege_type
+FROM pg_namespace pn, (SELECT pg_roles.rolname AS name
+FROM pg_roles UNION ALL SELECT '' AS name) AS role
+WHERE (','||array_to_string(nspacl,',')) LIKE '%,'||role.name||'=%'
+AND nspowner > 1;
+
+GRANT SELECT ON information_schema.schema_privileges TO PUBLIC;
+
+CREATE VIEW information_schema.database_privileges as
+SELECT datname AS database_name,
+coalesce(nullif(role.name,''), 'PUBLIC') AS grantee,
+substring(
+CASE WHEN position('C' in split_part(split_part((','||array_to_string(datacl,',')), ','||role.name||'=',2 ) ,'/',1)) > 0 THEN ',CREATE' ELSE '' END
+|| CASE WHEN position('T' in split_part(split_part((','||array_to_string(datacl,',')), ','||role.name||'=',2 ) ,'/',1)) > 0 THEN ',TEMPORARY' ELSE '' END
+|| CASE WHEN position('c' in split_part(split_part((','||array_to_string(datacl,',')), ','||role.name||'=',2 ) ,'/',1)) > 0 THEN ',CONNECT' ELSE '' END
+, 2,10000) AS privilege_type
+FROM pg_database pd, (SELECT pg_roles.rolname AS name
+FROM pg_roles UNION ALL SELECT '' AS name) AS role
+WHERE (','||array_to_string(datacl,',')) LIKE '%,'||role.name||'=%';
+
+GRANT SELECT ON information_schema.database_privileges TO PUBLIC;
diff --git a/contrib/orc/Makefile b/contrib/orc/Makefile
index 540da15..4459b20 100644
--- a/contrib/orc/Makefile
+++ b/contrib/orc/Makefile
@@ -1,21 +1,3 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
 MODULE_big = orc
 OBJS       = orc.o
 
@@ -27,5 +9,7 @@ subdir = contrib/orc
 top_builddir = ../..
 include $(top_builddir)/src/Makefile.global
 include $(top_srcdir)/contrib/contrib-global.mk
-override CFLAGS += -lstorage -ljson-c -luuid -I${top_builddir}/src/backend/utils -L$(prefix)/lib -I$(prefix)/include
+
+override CFLAGS += -ljson-c -luuid
+
 endif
diff --git a/contrib/orc/orc.c b/contrib/orc/orc.c
index 36018cf..092abcd 100644
--- a/contrib/orc/orc.c
+++ b/contrib/orc/orc.c
@@ -1,22 +1,3 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
 #include <json-c/json.h>
 
 #include "c.h"
@@ -45,14 +26,17 @@
 #include "postmaster/identity.h"
 #include "nodes/makefuncs.h"
 #include "nodes/plannodes.h"
+#include "parser/parse_type.h"
 #include "utils/uri.h"
+#include "utils/numeric.h"
 #include "cdb/cdbfilesystemcredential.h"
+#include "optimizer/newPlanner.h"
 
 #include "storage/cwrapper/orc-format-c.h"
 #include "storage/cwrapper/hdfs-file-system-c.h"
 #include "cdb/cdbvars.h"
 
-#define ORC_TIMESTAMP_EPOCH_JDATE 2457024 /* == date2j(2015, 1, 1) */
+#define ORC_TIMESTAMP_EPOCH_JDATE	2457024 /* == date2j(2015, 1, 1) */
 #define MAX_ORC_ARRAY_DIMS        10000
 #define ORC_NUMERIC_MAX_PRECISION 38
 
@@ -96,56 +80,58 @@ Datum orc_insert_finish(PG_FUNCTION_ARGS);
 
 typedef struct
 {
-  int64_t second;
-  int64_t nanosecond;
+	int64_t second;
+	int64_t nanosecond;
 } TimestampType;
 
 typedef struct ORCFormatUserData
 {
-  ORCFormatC *fmt;
-  char **colNames;
-  int *colDatatypes;
-  int64_t *colDatatypeMods;
-  int32_t numberOfColumns;
-  char **colRawValues;
-  Datum *colValues;
-  uint64_t *colValLength;
-  bits8 **colValNullBitmap;
-  int **colValDims;
-  char **colAddresses;
-  bool *colToReads;
-
-  int nSplits;
-  ORCFormatFileSplit *splits;
-
-  // for write only
-  TimestampType *colTimestamp;
+	ORCFormatC *fmt;
+	char **colNames;
+	int *colDatatypes;
+	int64_t *colDatatypeMods;
+	int32_t numberOfColumns;
+	char **colRawValues;
+	Datum *colValues;
+	uint64_t *colValLength;
+	bits8 **colValNullBitmap;
+	int **colValDims;
+	char **colAddresses;
+	bool *colToReads;
+
+	CommonPlanContext ctx;
+
+	int nSplits;
+	ORCFormatFileSplit *splits;
+
+	// for write only
+	TimestampType *colTimestamp;
 } ORCFormatUserData;
 
 static FmgrInfo *get_orc_function(char *formatter_name, char *function_name);
 static void get_scan_functions(FileScanDesc file_scan_desc);
 static void get_insert_functions(ExternalInsertDesc ext_insert_desc);
 static void init_format_user_data_for_read(TupleDesc tup_desc,
-    ORCFormatUserData *user_data);
+		ORCFormatUserData *user_data);
 static void init_format_user_data_for_write(TupleDesc tup_desc,
-    ORCFormatUserData *user_data);
+		ORCFormatUserData *user_data);
 static void build_options_in_json(List *fmt_opts_defelem, int encoding,
-    char **json_str, TupleDesc tupDesc);
+		char **json_str, TupleDesc tupDesc);
 static ORCFormatC *create_formatter_instance(List *fmt_opts_defelem,
-    int encoding, int segno, TupleDesc tupDesc);
+		int encoding, int segno, TupleDesc tupDesc);
 static void build_file_splits(Uri *uri, ScanState *scan_state,
-    ORCFormatUserData *user_data);
+		ORCFormatUserData *user_data);
 static void build_tuple_descrition_for_read(Plan *plan, Relation relation,
-    ORCFormatUserData *user_data);
+		ORCFormatUserData *user_data);
 static void build_tuple_descrition_for_write(Relation relation,
-    ORCFormatUserData *user_data);
+		ORCFormatUserData *user_data);
 static void orc_scan_error_callback(void *arg);
 static void orc_parse_format_string(CopyState pstate, char *fmtstr);
 static char *orc_strtokx2(const char *s, const char *whitespace,
-    const char *delim, const char *quote, char escape, bool e_strings,
-    bool del_quotes, int encoding);
+		const char *delim, const char *quote, char escape, bool e_strings,
+		bool del_quotes, int encoding);
 static void orc_strip_quotes(char *source, char quote, char escape,
-    int encoding);
+		int encoding);
 
 /* Implementation of validators for pluggable storage format ORC */
 
@@ -155,16 +141,16 @@ static void orc_strip_quotes(char *source, char quote, char escape,
  */
 Datum orc_validate_interfaces(PG_FUNCTION_ARGS)
 {
-  PlugStorageValidator psv_interface =
-      (PlugStorageValidator) (fcinfo->context);
+	PlugStorageValidator psv_interface =
+			(PlugStorageValidator) (fcinfo->context);
 
-  if (pg_strncasecmp(psv_interface->format_name, "orc", strlen("orc")) != 0)
-  {
-    ereport(ERROR,
-        (errcode(ERRCODE_SYNTAX_ERROR), errmsg("ORC: incorrect format name \'%s\'", psv_interface->format_name)));
-  }
+	if (pg_strncasecmp(psv_interface->format_name, "orc", strlen("orc")) != 0)
+	{
+		ereport(ERROR,
+				(errcode(ERRCODE_SYNTAX_ERROR), errmsg("ORC: incorrect format name \'%s\'", psv_interface->format_name)));
+	}
 
-  PG_RETURN_VOID() ;
+	PG_RETURN_VOID() ;
 }
 
 /*
@@ -175,140 +161,140 @@ Datum orc_validate_interfaces(PG_FUNCTION_ARGS)
  */
 Datum orc_validate_options(PG_FUNCTION_ARGS)
 {
-  PlugStorageValidator psv = (PlugStorageValidator) (fcinfo->context);
-
-  List *format_opts = psv->format_opts;
-  char *format_str = psv->format_str;
-  bool is_writable = psv->is_writable;
-  TupleDesc tup_desc = psv->tuple_desc;
-
-  char *formatter = NULL;
-  char *compresstype = NULL;
-  char *bloomfilter = NULL;
-  char *dicthreshold = NULL;
-  char *bucketnum = NULL;
-  char *category = NULL;
-
-  ListCell *opt;
-
-  const int maxlen = 8 * 1024 - 1;
-  int len = 0;
-
-  foreach(opt, format_opts)
-  {
-    DefElem *defel = (DefElem *) lfirst(opt);
-    char *key = defel->defname;
-    bool need_free_value = false;
-    char *val = (char *) defGetString(defel, &need_free_value);
-
-    /* check formatter */
-    if (strncasecmp(key, "formatter", strlen("formatter")) == 0)
-    {
-      char *formatter_values[] =
-      { "orc" };
-      checkPlugStorageFormatOption(&formatter, key, val,
-      true, 1, formatter_values);
-    }
-
-    /* check option for orc format */
-    if (strncasecmp(key, "compresstype", strlen("compresstype")) == 0)
-    {
-      char *compresstype_values[] =
-      { "none", "snappy", "lz4" };
-      checkPlugStorageFormatOption(&compresstype, key, val, is_writable,
-          3, compresstype_values);
-    }
-
-    if (strncasecmp(key, "bloomfilter", strlen("bloomfilter")) == 0)
-    {
-      int attnum = tup_desc->natts;
-      char **attribute_names = palloc0(attnum * sizeof(char*));
-      for (int i = 0; i < attnum; ++i) {
-        int name_len = strlen(((Form_pg_attribute) (tup_desc->attrs[i]))->attname.data);
-        char *attribute = palloc0(name_len + 1);
-        strncpy(attribute, ((Form_pg_attribute) (tup_desc->attrs[i]))->attname.data, name_len);
-        attribute_names[i] = attribute;
-      }
-      char *dup_val = pstrdup(val);
-      char *token = strtok(dup_val, ",");
-      while (token) {
-        checkPlugStorageFormatOption(&bloomfilter, key, token, true, attnum, attribute_names);
-        bloomfilter = NULL;
-        token = strtok(NULL, ",");
-      }
-    }
-
-    if (strncasecmp(key, "dicthreshold", strlen("dicthreshold")) == 0)
-    {
-      checkPlugStorageFormatOption(&dicthreshold, key, val,
-      true, 0, NULL);
-      char *end;
-      double threshold = strtod(val, &end);
-      if (end == val || *end != '\0' || threshold < 0 || threshold > 1)
-      {
-        ereport(ERROR,
-            (errcode(ERRCODE_SYNTAX_ERROR), errmsg("dicthreshold \"%s\" must be within [0-1]", val), errOmitLocation(true)));
-      }
-    }
-
-    if (strncasecmp(key, "bucketnum", strlen("bucketnum")) == 0)
-    {
-      checkPlugStorageFormatOption(&bucketnum, key, val,
-      true, 0, NULL);
-      char *end;
-      long bucketnumber = strtol(val, &end, 10);
-      if (end == val || *end != '\0' || bucketnumber <= 0)
-      {
-        ereport(ERROR,
-            (errcode(ERRCODE_SYNTAX_ERROR), errmsg("bucketnum \"%s\" must be > 0", val), errOmitLocation(true)));
-      }
-    }
-
-    /* check category orc format */
-    if (strncasecmp(key, "category", strlen("category")) == 0)
-    {
-      char *category_values[] =
-      { "internal", "external" };
-      checkPlugStorageFormatOption(&category, key, val,
-      true, 2, category_values);
-    }
-
-    if (strncasecmp(key, "formatter", strlen("formatter"))
-        && strncasecmp(key, "compresstype", strlen("compresstype"))
-        && strncasecmp(key, "bloomfilter", strlen("bloomfilter"))
-        && strncasecmp(key, "dicthreshold", strlen("dicthreshold"))
-        && strncasecmp(key, "bucketnum", strlen("bucketnum"))
-        && strncasecmp(key, "category", strlen("category")))
-    {
-      ereport(ERROR,
-          (errcode(ERRCODE_SYNTAX_ERROR), errmsg("Option \"%s\" for ORC table is invalid", key), errOmitLocation(true)));
-    }
-
-    sprintf((char * ) format_str + len, "%s '%s' ", key, val);
-    len += strlen(key) + strlen(val) + 4;
-
-    if (need_free_value)
-    {
-      pfree(val);
-      val = NULL;
-    }
-
-    AssertImply(need_free_value, NULL == val);
-
-    if (len > maxlen)
-    {
-      ereport(ERROR,
-          (errcode(ERRCODE_SYNTAX_ERROR), errmsg("format options must be less than %d bytes in size", maxlen), errOmitLocation(true)));
-    }
-  }
-
-  if (!formatter)
-  {
-    ereport(ERROR,
-        (errcode(ERRCODE_SYNTAX_ERROR), errmsg("no formatter function specified"), errOmitLocation(true)));
-  }
-
-  PG_RETURN_VOID() ;
+	PlugStorageValidator psv = (PlugStorageValidator) (fcinfo->context);
+
+	List *format_opts = psv->format_opts;
+	char *format_str = psv->format_str;
+	bool is_writable = psv->is_writable;
+	TupleDesc tup_desc = psv->tuple_desc;
+
+	char *formatter = NULL;
+	char *compresstype = NULL;
+	char *bloomfilter = NULL;
+	char *dicthreshold = NULL;
+	char *bucketnum = NULL;
+	char *category = NULL;
+
+	ListCell *opt;
+
+	const int maxlen = 8 * 1024 - 1;
+	int len = 0;
+
+	foreach(opt, format_opts)
+	{
+		DefElem *defel = (DefElem *) lfirst(opt);
+		char *key = defel->defname;
+		bool need_free_value = false;
+		char *val = (char *) defGetString(defel, &need_free_value);
+
+		/* check formatter */
+		if (strncasecmp(key, "formatter", strlen("formatter")) == 0)
+		{
+			char *formatter_values[] =
+			{ "orc" };
+			checkPlugStorageFormatOption(&formatter, key, val,
+			true, 1, formatter_values);
+		}
+
+		/* check option for orc format */
+		if (strncasecmp(key, "compresstype", strlen("compresstype")) == 0)
+		{
+			char *compresstype_values[] =
+			{ "none", "snappy", "lz4" };
+			checkPlugStorageFormatOption(&compresstype, key, val, is_writable,
+					3, compresstype_values);
+		}
+
+		if (strncasecmp(key, "bloomfilter", strlen("bloomfilter")) == 0)
+		{
+		  int attnum = tup_desc->natts;
+		  char **attribute_names = palloc0(attnum * sizeof(char*));
+		  for (int i = 0; i < attnum; ++i) {
+		    int name_len = strlen(((Form_pg_attribute) (tup_desc->attrs[i]))->attname.data);
+		    char *attribute = palloc0(name_len + 1);
+		    strncpy(attribute, ((Form_pg_attribute) (tup_desc->attrs[i]))->attname.data, name_len);
+		    attribute_names[i] = attribute;
+		  }
+		  char *dup_val = pstrdup(val);
+		  char *token = strtok(dup_val, ",");
+		  while (token) {
+		    checkPlugStorageFormatOption(&bloomfilter, key, token, true, attnum, attribute_names);
+		    bloomfilter = NULL;
+		    token = strtok(NULL, ",");
+		  }
+		}
+
+		if (strncasecmp(key, "dicthreshold", strlen("dicthreshold")) == 0)
+		{
+			checkPlugStorageFormatOption(&dicthreshold, key, val,
+			true, 0, NULL);
+			char *end;
+			double threshold = strtod(val, &end);
+			if (end == val || *end != '\0' || threshold < 0 || threshold > 1)
+			{
+				ereport(ERROR,
+						(errcode(ERRCODE_SYNTAX_ERROR), errmsg("dicthreshold \"%s\" must be within [0-1]", val), errOmitLocation(true)));
+			}
+		}
+
+		if (strncasecmp(key, "bucketnum", strlen("bucketnum")) == 0)
+		{
+			checkPlugStorageFormatOption(&bucketnum, key, val,
+			true, 0, NULL);
+			char *end;
+			long bucketnumber = strtol(val, &end, 10);
+			if (end == val || *end != '\0' || bucketnumber <= 0)
+			{
... 299680 lines suppressed ...