You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hawq.apache.org by zt...@apache.org on 2022/03/16 06:37:14 UTC

[hawq] branch ztao updated (1aa69cf -> c79ab32)

This is an automated email from the ASF dual-hosted git repository.

ztao1987 pushed a change to branch ztao
in repository https://gitbox.apache.org/repos/asf/hawq.git.


    from 1aa69cf  HAWQ-1832. fix ORC bloom filter option
     new c3e9193  HAWQ-1811. Sync with OushuDB - Phase IV
     new c79ab32  HAWQ-1834. add options for native orc table creation

The 2 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 CMakeLists.txt                                 |     3 +-
 contrib/hornet/hornet.c                        |   131 +
 contrib/hornet/load_hornet_helper_function.sql |    24 +-
 contrib/hornet/newcdbhash.c                    |   177 +
 contrib/hornet/oldcdbhash.c                    |   412 +
 contrib/magma/magma.c                          |   258 +-
 contrib/orc/orc.c                              |     6 +-
 src/all_src_files.txt                          |     8 +
 src/backend/access/appendonly/aomd.c           |    55 +
 src/backend/access/bitmap/bitmapinsert.c       |     3 +-
 src/backend/access/bitmap/bitmappages.c        |     2 +-
 src/backend/access/bitmap/bitmapsearch.c       |     4 +-
 src/backend/access/common/reloptions.c         |   278 +-
 src/backend/access/common/scankey.c            |    15 +-
 src/backend/access/external/plugstorage.c      |    54 +-
 src/backend/access/index/catquery.c            |    12 +-
 src/backend/access/index/gperf.init            |    10 +-
 src/backend/access/nbtree/nbtsearch.c          |     4 +-
 src/backend/access/orc/orcam.c                 |   440 +-
 src/backend/access/orc/orcsegfiles.c           |    34 +-
 src/backend/bootstrap/bootparse.y              |    38 +
 src/backend/catalog/Makefile                   |     2 +
 src/backend/catalog/aclchk.c                   |    83 +-
 src/backend/catalog/catalog.c                  |    24 +-
 src/backend/catalog/core/catcoregen.py         |     8 +
 src/backend/catalog/dependency.c               |    50 +
 src/backend/catalog/index.c                    |     3 +-
 src/backend/catalog/namespace.c                |    71 +
 src/backend/catalog/skylon_elabel.c            |    62 +
 src/backend/catalog/skylon_elabel_attribute.c  |    66 +
 src/backend/catalog/skylon_graph.c             |    60 +
 src/backend/catalog/skylon_graph_elabel.c      |    59 +
 src/backend/catalog/skylon_graph_vlabel.c      |    59 +
 src/backend/catalog/skylon_index.c             |    73 +
 src/backend/catalog/skylon_vlabel.c            |    60 +
 src/backend/catalog/skylon_vlabel_attribute.c  |    64 +
 src/backend/catalog/system_views.sql           |    29 +
 src/backend/cdb/Makefile                       |     1 -
 src/backend/cdb/cdbdatalocality.c              |   187 +-
 src/backend/cdb/cdbexplain.c                   |   165 +-
 src/backend/cdb/cdbplan.c                      |     8 +
 src/backend/cdb/dispatcher.c                   |     1 -
 src/backend/cdb/dispatcher_new.c               |    10 +-
 src/backend/commands/copy.c                    |     4 +
 src/backend/commands/explain.c                 |     9 -
 src/backend/commands/indexcmds.c               |    80 +-
 src/backend/commands/tablecmds.c               |  1428 ++++
 src/backend/commands/vacuum.c                  |    20 +
 src/backend/executor/execIndexscan.c           |    34 +-
 src/backend/executor/execMain.c                |    34 +-
 src/backend/executor/execUtils.c               |    13 +-
 src/backend/executor/functions.c               |     4 +-
 src/backend/executor/newExecutor.c             |    12 +-
 src/backend/executor/nodeExternalscan.c        |   104 +-
 src/backend/executor/nodeIndexscan.c           |    50 +-
 src/backend/executor/nodeNestloop.c            |    38 +-
 src/backend/executor/nodeSubplan.c             |    68 +-
 src/backend/executor/spi.c                     |     5 +-
 src/backend/nodes/copyfuncs.c                  |    35 +-
 src/backend/nodes/equalfuncs.c                 |     9 +
 src/backend/nodes/outfast.c                    |    30 +
 src/backend/nodes/outfuncs.c                   |     9 +
 src/backend/nodes/readfast.c                   |    33 +-
 src/backend/nodes/readfuncs.c                  |    12 +-
 src/backend/optimizer/path/indxpath.c          |   108 +-
 src/backend/optimizer/plan/createplan.c        |    33 +-
 src/backend/optimizer/plan/newPlanner.c        |    70 +-
 src/backend/optimizer/plan/planner.c           |     2 +-
 src/backend/optimizer/plan/setrefs.c           |    26 +
 src/backend/parser/analyze.c                   |   451 +-
 src/backend/parser/gram.y                      |   262 +-
 src/backend/parser/parse_clause.c              |   183 +-
 src/backend/parser/parse_expr.c                |    49 +-
 src/backend/parser/parse_relation.c            |    19 +-
 src/backend/tcop/postgres.c                    |    22 +-
 src/backend/tcop/utility.c                     |    56 +
 src/backend/utils/cache/lsyscache.c            |    52 +-
 src/backend/utils/cache/relcache.c             |    37 +
 src/backend/utils/init/globals.c               |     2 -
 src/backend/utils/misc/guc.c                   |    25 +-
 src/backend/utils/mmgr/Makefile                |     2 +
 src/bin/psql/tab-complete.c                    |    87 +-
 src/include/access/aomd.h                      |    10 +
 src/include/access/orcam.h                     |    67 +-
 src/include/access/orcsegfiles.h               |     3 +-
 src/include/access/plugstorage.h               |    13 +-
 src/include/access/relscan.h                   |    10 +-
 src/include/access/skey.h                      |    20 +-
 src/include/catalog/calico.pl                  |     8 +
 src/include/catalog/dependency.h               |     3 +
 src/include/catalog/indexing.h                 |    21 +
 src/include/catalog/namespace.h                |     3 +-
 src/include/catalog/pg_tidycat.h               |     8 +
 src/include/catalog/pg_type.h                  |    23 +
 src/include/catalog/skylon_elabel.h            |   113 +
 src/include/catalog/skylon_elabel_attribute.h  |   118 +
 src/include/catalog/skylon_graph.h             |   107 +
 src/include/catalog/skylon_graph_elabel.h      |   112 +
 src/include/catalog/skylon_graph_vlabel.h      |   111 +
 src/include/catalog/skylon_index.h             |    58 +
 src/include/catalog/skylon_vlabel.h            |   106 +
 src/include/catalog/skylon_vlabel_attribute.h  |   116 +
 src/include/cdb/cdbexplain.h                   |     5 -
 src/include/cdb/dispatcher_new.h               |     3 +
 src/include/commands/tablecmds.h               |    12 +
 src/include/executor/executor.h                |     7 +-
 src/include/executor/nodeIndexscan.h           |     1 +
 src/include/miscadmin.h                        |     2 -
 src/include/nodes/execnodes.h                  |    10 +-
 src/include/nodes/nodes.h                      |     4 +
 src/include/nodes/parsenodes.h                 |    43 +-
 src/include/nodes/plannodes.h                  |     9 +-
 src/include/optimizer/cost.h                   |     2 +-
 src/include/optimizer/newPlanner.h             |    20 +-
 src/include/parser/gramparse.h                 |     7 +-
 src/include/parser/kwlist.h                    |     5 +
 src/include/parser/parse_node.h                |     1 +
 src/include/utils/acl.h                        |     1 +
 src/include/utils/lsyscache.h                  |     1 +
 src/include/utils/rel.h                        |     5 +-
 tools/bin/gppylib/data/3.1.json                |   620 +-
 tools/bin/gppylib/data/3.2.json                | 10458 -----------------------
 tools/bin/gppylib/data/4.0.json                | 10458 -----------------------
 tools/bin/hawq                                 |     6 +
 tools/bin/hawqbackup                           |   261 +
 tools/bin/hawqpylib/HAWQ_HELP.py               |    22 +
 tools/bin/hawqrestore                          |   105 +
 127 files changed, 7925 insertions(+), 21803 deletions(-)
 create mode 100644 contrib/hornet/newcdbhash.c
 create mode 100644 contrib/hornet/oldcdbhash.c
 create mode 100644 src/backend/catalog/skylon_elabel.c
 create mode 100644 src/backend/catalog/skylon_elabel_attribute.c
 create mode 100644 src/backend/catalog/skylon_graph.c
 create mode 100644 src/backend/catalog/skylon_graph_elabel.c
 create mode 100644 src/backend/catalog/skylon_graph_vlabel.c
 create mode 100644 src/backend/catalog/skylon_index.c
 create mode 100644 src/backend/catalog/skylon_vlabel.c
 create mode 100644 src/backend/catalog/skylon_vlabel_attribute.c
 create mode 100644 src/include/catalog/skylon_elabel.h
 create mode 100644 src/include/catalog/skylon_elabel_attribute.h
 create mode 100644 src/include/catalog/skylon_graph.h
 create mode 100644 src/include/catalog/skylon_graph_elabel.h
 create mode 100644 src/include/catalog/skylon_graph_vlabel.h
 create mode 100644 src/include/catalog/skylon_index.h
 create mode 100644 src/include/catalog/skylon_vlabel.h
 create mode 100644 src/include/catalog/skylon_vlabel_attribute.h
 mode change 100755 => 100644 tools/bin/gppylib/data/3.1.json
 delete mode 100755 tools/bin/gppylib/data/3.2.json
 delete mode 100755 tools/bin/gppylib/data/4.0.json
 create mode 100755 tools/bin/hawqbackup
 create mode 100755 tools/bin/hawqrestore

[hawq] 01/02: HAWQ-1811. Sync with OushuDB - Phase IV

Posted by zt...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

ztao1987 pushed a commit to branch ztao
in repository https://gitbox.apache.org/repos/asf/hawq.git

commit c3e91931e4340ef33abdf63a957f76788edb79d7
Author: ztao1987 <zh...@gmail.com>
AuthorDate: Wed Mar 16 14:34:58 2022 +0800

    HAWQ-1811. Sync with OushuDB - Phase IV
---
 CMakeLists.txt                                 |     3 +-
 contrib/hornet/hornet.c                        |   131 +
 contrib/hornet/load_hornet_helper_function.sql |    24 +-
 contrib/hornet/newcdbhash.c                    |   177 +
 contrib/hornet/oldcdbhash.c                    |   412 +
 contrib/magma/magma.c                          |   258 +-
 contrib/orc/orc.c                              |     6 +-
 src/all_src_files.txt                          |     8 +
 src/backend/access/appendonly/aomd.c           |    55 +
 src/backend/access/bitmap/bitmapinsert.c       |     3 +-
 src/backend/access/bitmap/bitmappages.c        |     2 +-
 src/backend/access/bitmap/bitmapsearch.c       |     4 +-
 src/backend/access/common/scankey.c            |    15 +-
 src/backend/access/external/plugstorage.c      |    54 +-
 src/backend/access/index/catquery.c            |    12 +-
 src/backend/access/index/gperf.init            |    10 +-
 src/backend/access/nbtree/nbtsearch.c          |     4 +-
 src/backend/access/orc/orcam.c                 |   417 +-
 src/backend/access/orc/orcsegfiles.c           |    34 +-
 src/backend/bootstrap/bootparse.y              |    38 +
 src/backend/catalog/Makefile                   |     2 +
 src/backend/catalog/aclchk.c                   |    83 +-
 src/backend/catalog/catalog.c                  |    24 +-
 src/backend/catalog/core/catcoregen.py         |     8 +
 src/backend/catalog/dependency.c               |    50 +
 src/backend/catalog/index.c                    |     3 +-
 src/backend/catalog/namespace.c                |    71 +
 src/backend/catalog/skylon_elabel.c            |    62 +
 src/backend/catalog/skylon_elabel_attribute.c  |    66 +
 src/backend/catalog/skylon_graph.c             |    60 +
 src/backend/catalog/skylon_graph_elabel.c      |    59 +
 src/backend/catalog/skylon_graph_vlabel.c      |    59 +
 src/backend/catalog/skylon_index.c             |    73 +
 src/backend/catalog/skylon_vlabel.c            |    60 +
 src/backend/catalog/skylon_vlabel_attribute.c  |    64 +
 src/backend/catalog/system_views.sql           |    29 +
 src/backend/cdb/Makefile                       |     1 -
 src/backend/cdb/cdbdatalocality.c              |   187 +-
 src/backend/cdb/cdbexplain.c                   |   165 +-
 src/backend/cdb/cdbplan.c                      |     8 +
 src/backend/cdb/dispatcher.c                   |     1 -
 src/backend/cdb/dispatcher_new.c               |    10 +-
 src/backend/commands/copy.c                    |     4 +
 src/backend/commands/explain.c                 |     9 -
 src/backend/commands/indexcmds.c               |    80 +-
 src/backend/commands/tablecmds.c               |  1428 ++++
 src/backend/commands/vacuum.c                  |    20 +
 src/backend/executor/execIndexscan.c           |    34 +-
 src/backend/executor/execMain.c                |    34 +-
 src/backend/executor/execUtils.c               |    13 +-
 src/backend/executor/functions.c               |     4 +-
 src/backend/executor/newExecutor.c             |    12 +-
 src/backend/executor/nodeExternalscan.c        |   104 +-
 src/backend/executor/nodeIndexscan.c           |    50 +-
 src/backend/executor/nodeNestloop.c            |    38 +-
 src/backend/executor/nodeSubplan.c             |    68 +-
 src/backend/executor/spi.c                     |     5 +-
 src/backend/nodes/copyfuncs.c                  |    35 +-
 src/backend/nodes/equalfuncs.c                 |     9 +
 src/backend/nodes/outfast.c                    |    30 +
 src/backend/nodes/outfuncs.c                   |     9 +
 src/backend/nodes/readfast.c                   |    33 +-
 src/backend/nodes/readfuncs.c                  |    12 +-
 src/backend/optimizer/path/indxpath.c          |   108 +-
 src/backend/optimizer/plan/createplan.c        |    33 +-
 src/backend/optimizer/plan/newPlanner.c        |    70 +-
 src/backend/optimizer/plan/planner.c           |     2 +-
 src/backend/optimizer/plan/setrefs.c           |    26 +
 src/backend/parser/analyze.c                   |   451 +-
 src/backend/parser/gram.y                      |   262 +-
 src/backend/parser/parse_clause.c              |   183 +-
 src/backend/parser/parse_expr.c                |    49 +-
 src/backend/parser/parse_relation.c            |    19 +-
 src/backend/tcop/postgres.c                    |    22 +-
 src/backend/tcop/utility.c                     |    56 +
 src/backend/utils/cache/lsyscache.c            |    52 +-
 src/backend/utils/init/globals.c               |     2 -
 src/backend/utils/misc/guc.c                   |    25 +-
 src/backend/utils/mmgr/Makefile                |     2 +
 src/bin/psql/tab-complete.c                    |    87 +-
 src/include/access/aomd.h                      |    10 +
 src/include/access/orcam.h                     |    60 +-
 src/include/access/orcsegfiles.h               |     3 +-
 src/include/access/plugstorage.h               |    13 +-
 src/include/access/relscan.h                   |    10 +-
 src/include/access/skey.h                      |    20 +-
 src/include/catalog/calico.pl                  |     8 +
 src/include/catalog/dependency.h               |     3 +
 src/include/catalog/indexing.h                 |    21 +
 src/include/catalog/namespace.h                |     3 +-
 src/include/catalog/pg_tidycat.h               |     8 +
 src/include/catalog/pg_type.h                  |    23 +
 src/include/catalog/skylon_elabel.h            |   113 +
 src/include/catalog/skylon_elabel_attribute.h  |   118 +
 src/include/catalog/skylon_graph.h             |   107 +
 src/include/catalog/skylon_graph_elabel.h      |   112 +
 src/include/catalog/skylon_graph_vlabel.h      |   111 +
 src/include/catalog/skylon_index.h             |    58 +
 src/include/catalog/skylon_vlabel.h            |   106 +
 src/include/catalog/skylon_vlabel_attribute.h  |   116 +
 src/include/cdb/cdbexplain.h                   |     5 -
 src/include/cdb/dispatcher_new.h               |     3 +
 src/include/commands/tablecmds.h               |    12 +
 src/include/executor/executor.h                |     7 +-
 src/include/executor/nodeIndexscan.h           |     1 +
 src/include/miscadmin.h                        |     2 -
 src/include/nodes/execnodes.h                  |    10 +-
 src/include/nodes/nodes.h                      |     4 +
 src/include/nodes/parsenodes.h                 |    43 +-
 src/include/nodes/plannodes.h                  |     9 +-
 src/include/optimizer/cost.h                   |     2 +-
 src/include/optimizer/newPlanner.h             |    20 +-
 src/include/parser/gramparse.h                 |     7 +-
 src/include/parser/kwlist.h                    |     5 +
 src/include/parser/parse_node.h                |     1 +
 src/include/utils/acl.h                        |     1 +
 src/include/utils/lsyscache.h                  |     1 +
 tools/bin/gppylib/data/3.1.json                |   620 +-
 tools/bin/gppylib/data/3.2.json                | 10458 -----------------------
 tools/bin/gppylib/data/4.0.json                | 10458 -----------------------
 tools/bin/hawq                                 |     6 +
 tools/bin/hawqbackup                           |   261 +
 tools/bin/hawqpylib/HAWQ_HELP.py               |    22 +
 tools/bin/hawqrestore                          |   105 +
 124 files changed, 7698 insertions(+), 21680 deletions(-)

diff --git a/CMakeLists.txt b/CMakeLists.txt
index d137340..5975f15 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -109,7 +109,6 @@ include_directories(${CMAKE_SOURCE_DIR}/hornet/univplan/src)
 include_directories(${CMAKE_SOURCE_DIR}/hornet/magma/src)
 include_directories(${CMAKE_SOURCE_DIR}/hornet/storage/src)
 include_directories(${CMAKE_SOURCE_DIR}/hornet/executor/src)
-include_directories(${CMAKE_SOURCE_DIR}/hornet/scheduler/src)
 include_directories(${CMAKE_BINARY_DIR}/hornet/dbcommon/src)
 include_directories(/opt/dependency/package/include)
 
@@ -124,7 +123,7 @@ add_dependencies(dxltranslators config)
 add_executable(postgres ${cdb_source} ${generate_source})
 target_link_libraries(postgres z bz2 lz4 snappy xml2 curl ldap json-c krb5 yarn thrift) # basic
 target_link_libraries(postgres gpos xerces-c naucrates gpdbcost gpopt dxltranslators) # gp-orca
-target_link_libraries(postgres hdfs3 dbcommon-shared univplan-shared storage-shared magma-client-shared executor-shared scheduler-shared) # hornet
+target_link_libraries(postgres hdfs3 dbcommon-shared univplan-shared storage-shared magma-client-shared storage-magma-format-shared executor-shared) # hornet
 target_link_libraries(postgres dl)
 add_dependencies(postgres config)
 
diff --git a/contrib/hornet/hornet.c b/contrib/hornet/hornet.c
index 3777459..25627b1 100644
--- a/contrib/hornet/hornet.c
+++ b/contrib/hornet/hornet.c
@@ -19,13 +19,22 @@
 
 #include "postgres.h"
 
+#include <inttypes.h>
+
 #include "funcapi.h"
 
+#include "access/fileam.h"
+#include "access/filesplit.h"
+#include "access/orcam.h"
+#include "catalog/pg_exttable.h"
 #include "hdfs/hdfs.h"
+#include "storage/cwrapper/orc-format-c.h"
 #include "storage/fd.h"
 #include "storage/filesystem.h"
 #include "utils/builtins.h"
+#include "utils/datum.h"
 #include "utils/hawq_funcoid_mapping.h"
+#include "utils/lsyscache.h"
 
 Datum ls_hdfs_dir(PG_FUNCTION_ARGS);
 
@@ -133,9 +142,131 @@ Datum ls_hdfs_dir(PG_FUNCTION_ARGS) {
   }
 }
 
+Datum is_supported_proc_in_NewQE(PG_FUNCTION_ARGS);
 PG_FUNCTION_INFO_V1(is_supported_proc_in_NewQE);
 Datum is_supported_proc_in_NewQE(PG_FUNCTION_ARGS) {
   Oid a = PG_GETARG_OID(0);
   int32_t mappingFuncId = HAWQ_FUNCOID_MAPPING(a);
   PG_RETURN_BOOL(!(IS_HAWQ_MAPPING_FUNCID_INVALID(mappingFuncId)));
 }
+
+Datum orc_tid_scan(FunctionCallInfo fcinfo, int segno, const char *url,
+                   uint64_t tid) {
+  Assert(segno == 0 || url == NULL);
+
+  // Argument checking
+  Oid argtype = get_fn_expr_argtype(fcinfo->flinfo, 0);
+  if (!type_is_rowtype(argtype))
+    ereport(ERROR,
+            (errcode(ERRCODE_DATATYPE_MISMATCH),
+             errmsg("first argument of %s must be a row type", __func__)));
+  Oid relId = get_typ_typrelid(argtype);
+  char relStorage = get_rel_relstorage(relId);
+  if (relstorage_is_external(relStorage)) {
+    ExtTableEntry *extEntry = GetExtTableEntry(relId);
+    const char *fmtName = getExtTblFormatterTypeInFmtOptsStr(extEntry->fmtopts);
+    if (strcasecmp("orc", fmtName) != 0) {
+      ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+                      errmsg("Invalid external table type of %s for ORC Table.",
+                             fmtName)));
+    }
+    if (segno > 0) {
+      ereport(ERROR,
+              (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+               errmsg("Expecting URL for external ORC Table.", fmtName)));
+    }
+  } else if (RELSTORAGE_ORC != relStorage) {
+    ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+                    errmsg("Invalid table type of '%c' for ORC Table.",
+                           get_rel_relstorage(relId))));
+  }
+
+  // Retrieve output tuple description
+  TupleDesc tupdesc;
+  if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE)
+    ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+                    errmsg("function returning record called in context "
+                           "that cannot accept type record")));
+  TupleTableSlot *slot = TupleDescGetSlot(tupdesc);
+
+  // Setup projection
+  bool *proj = (bool *)palloc(tupdesc->natts * sizeof(bool));
+  if (fcinfo->nargs == 4) {  // specify the attribute to project
+    ArrayType *arr = PG_GETARG_ARRAYTYPE_P(3);
+    size_t num = (ARR_SIZE(arr) - ARR_DATA_OFFSET(arr)) / sizeof(int32_t);
+    int32_t *attrNums = (int32_t *)ARR_DATA_PTR(arr);
+    memset(proj, 0, tupdesc->natts);
+    for (size_t i = 0; i < num; i++) {
+      if (attrNums[i] <= 0 || attrNums[i] > tupdesc->natts)
+        ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+                        errmsg("Invalid attribute number of %" PRId32 ".",
+                               attrNums[i])));
+      proj[attrNums[i] - 1] = 1;
+    }
+  } else {  // scan the whole tuple
+    memset(proj, 1, tupdesc->natts);
+  }
+
+  // Construct file splits
+  FileSplit split = makeNode(FileSplitNode);
+  split->segno = segno;
+  if (segno == 0) split->ext_file_uri_string = (char *)url;
+  split->offsets = 0;
+  split->lengths = INT64_MAX;
+  split->logiceof = INT64_MAX;
+  List *fileSplits = list_make1(split);
+
+  Relation rel = RelationIdGetRelation(relId);
+  OrcScanDescData *scanDesc =
+      orcBeginReadWithOptionsStr(rel, ActiveSnapshot, NULL, fileSplits, proj,
+                                 NULL, "{\"format\": \"APACHE_ORC_FORMAT\"}");
+  RelationClose(rel);
+
+  // XXX(chiyang): hack way to directly get `ORCFormatC *fmt;`, which is defined
+  // inside orcam.c.
+  bool scanSucceed =
+      ORCFormatTidScanC(*(ORCFormatC **)scanDesc->orcFormatData, tid);
+  checkOrcError(scanDesc->orcFormatData);
+  if (scanSucceed) {
+    orcReadNext(scanDesc, slot);
+
+    // Materialize the tuple
+    Datum *values = slot_get_values(slot);
+    bool *nulls = slot_get_isnull(slot);
+    for (size_t idx = 0; idx < tupdesc->natts; idx++) {
+      if (!nulls[idx]) {
+        values[idx] = datumCopy(values[idx], tupdesc->attrs[idx]->attbyval,
+                                tupdesc->attrs[idx]->attlen);
+      }
+    }
+  } else {
+    orcEndRead(scanDesc);
+    ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+                    errmsg("TID %" PRIu64 " exceeds file tuple count.", tid)));
+  }
+  orcEndRead(scanDesc);
+
+  HeapTuple retTuple =
+      heap_form_tuple(tupdesc, slot_get_values(slot), slot_get_isnull(slot));
+
+  PG_RETURN_DATUM(HeapTupleGetDatum(retTuple));
+}
+
+Datum orc_segno_tid_scan(PG_FUNCTION_ARGS);
+PG_FUNCTION_INFO_V1(orc_segno_tid_scan);
+Datum orc_segno_tid_scan(PG_FUNCTION_ARGS) {
+  int segno = PG_GETARG_INT32(1);
+  uint64_t tid = PG_GETARG_INT64(2);
+
+  return orc_tid_scan(fcinfo, segno, NULL, tid);
+}
+
+Datum orc_url_tid_scan(PG_FUNCTION_ARGS);
+PG_FUNCTION_INFO_V1(orc_url_tid_scan);
+Datum orc_url_tid_scan(PG_FUNCTION_ARGS) {
+  const char *url = DatumGetCString(
+      DirectFunctionCall1(textout, PointerGetDatum(PG_GETARG_TEXT_P(1))));
+  uint64_t tid = PG_GETARG_INT64(2);
+
+  return orc_tid_scan(fcinfo, 0, url, tid);
+}
diff --git a/contrib/hornet/load_hornet_helper_function.sql b/contrib/hornet/load_hornet_helper_function.sql
index 06c0dfd..8ba058e 100644
--- a/contrib/hornet/load_hornet_helper_function.sql
+++ b/contrib/hornet/load_hornet_helper_function.sql
@@ -1,3 +1,5 @@
+begin;
+
 DROP SCHEMA IF EXISTS hornet_helper CASCADE;
 CREATE SCHEMA hornet_helper;
 SET SEARCH_PATH = hornet_helper;
@@ -193,4 +195,24 @@ $$ LANGUAGE PLPGSQL;
 
 drop function if exists is_supported_proc_in_NewQE(oid);
 
-create function is_supported_proc_in_NewQE(oid) returns boolean as '$libdir/hornet','is_supported_proc_in_NewQE'language c immutable;
\ No newline at end of file
+create function is_supported_proc_in_NewQE(oid) returns boolean as '$libdir/hornet','is_supported_proc_in_NewQE'language c immutable;
+
+
+
+drop function if exists orc_tid_scan(anyelement, text, bigint, int[]);
+create function orc_tid_scan(anyelement, text, bigint, int[]) returns anyelement
+as '$libdir/hornet','orc_url_tid_scan' language c stable;
+
+drop function if exists orc_tid_scan(anyelement, text, bigint);
+create function orc_tid_scan(anyelement, text, bigint) returns anyelement
+as '$libdir/hornet','orc_url_tid_scan' language c stable;
+
+drop function if exists orc_tid_scan(anyelement, int, bigint, int[]);
+create function orc_tid_scan(anyelement, int, bigint, int[]) returns anyelement
+as '$libdir/hornet','orc_segno_tid_scan' language c stable;
+
+drop function if exists orc_tid_scan(anyelement, int, bigint);
+create function orc_tid_scan(anyelement, int, bigint) returns anyelement
+as '$libdir/hornet','orc_segno_tid_scan' language c stable;
+
+commit;
diff --git a/contrib/hornet/newcdbhash.c b/contrib/hornet/newcdbhash.c
new file mode 100644
index 0000000..7a66e23
--- /dev/null
+++ b/contrib/hornet/newcdbhash.c
@@ -0,0 +1,177 @@
+#include "postgres.h"
+
+#include "fmgr.h"
+
+#include "executor/spi.h"
+#include "utils/date.h"
+#include "utils/datetime.h"
+#include "utils/memutils.h"
+#include "utils/numeric.h"
+#include "utils/timestamp.h"
+
+#include "dbcommon/cwrapper/dbcommon-c.h"
+#include "dbcommon/type/type-kind.h"
+PG_FUNCTION_INFO_V1(timestampToout);
+Datum timestampToout(PG_FUNCTION_ARGS) {
+  Timestamp timestamp = PG_GETARG_TIMESTAMP(0);
+  char* result;
+  struct pg_tm tt, *tm = &tt;
+  fsec_t fsec;
+  char* tzn = NULL;
+  char buf[MAXDATELEN + 1];
+  if (timestamp2tm(timestamp, NULL, tm, &fsec, NULL, NULL) == 0)
+    EncodeDateTime(tm, fsec, NULL, &tzn, 1, buf);
+  result = pstrdup(buf);
+  PG_RETURN_CSTRING(result);
+}
+
+PG_FUNCTION_INFO_V1(dateToOut);
+Datum dateToOut(PG_FUNCTION_ARGS) {
+  DateADT date = PG_GETARG_DATEADT(0);
+  char* result;
+  struct pg_tm tt, *tm = &tt;
+  char buf[MAXDATELEN + 1];
+
+  j2date(date + POSTGRES_EPOCH_JDATE, &(tm->tm_year), &(tm->tm_mon),
+         &(tm->tm_mday));
+
+  EncodeDateOnly(tm, 1, buf);
+
+  result = pstrdup(buf);
+  PG_RETURN_CSTRING(result);
+}
+
+PG_FUNCTION_INFO_V1(timestamptzToout);
+Datum timestamptzToout(PG_FUNCTION_ARGS) {
+  TimestampTz dt = PG_GETARG_TIMESTAMPTZ(0);
+  char* result;
+  int tz;
+  struct pg_tm tt, *tm = &tt;
+  fsec_t fsec;
+  char* tzn;
+  char buf[MAXDATELEN + 1];
+  if (timestamp2tm(dt, &tz, tm, &fsec, &tzn, NULL) == 0)
+    EncodeDateTime(tm, fsec, &tz, &tzn, 1, buf);
+  result = pstrdup(buf);
+  PG_RETURN_CSTRING(result);
+}
+
+PG_FUNCTION_INFO_V1(cdbHashApiBigInt);
+Datum cdbHashApiBigInt(PG_FUNCTION_ARGS) {
+  char* str = DatumGetCString(
+      DirectFunctionCall1(int8out, Int64GetDatum(PG_GETARG_INT64(0))));
+  int32_t ret = dbcommonCdbHash(BIGINTID, str);
+  PG_RETURN_INT32(ret);
+}
+
+PG_FUNCTION_INFO_V1(cdbHashApiBool);
+Datum cdbHashApiBool(PG_FUNCTION_ARGS) {
+  char* str = DatumGetCString(
+      DirectFunctionCall1(boolout, BoolGetDatum(PG_GETARG_BOOL(0))));
+  int32_t ret = dbcommonCdbHash(BOOLEANID, str);
+  PG_RETURN_INT32(ret);
+}
+
+PG_FUNCTION_INFO_V1(cdbHashApiInt);
+Datum cdbHashApiInt(PG_FUNCTION_ARGS) {
+  char* str = DatumGetCString(
+      DirectFunctionCall1(int4out, Int32GetDatum(PG_GETARG_INT32(0))));
+  int32_t ret = dbcommonCdbHash(INTID, str);
+  PG_RETURN_INT32(ret);
+}
+
+PG_FUNCTION_INFO_V1(cdbHashApiSmallInt);
+Datum cdbHashApiSmallInt(PG_FUNCTION_ARGS) {
+  char* str = DatumGetCString(
+      DirectFunctionCall1(int2out, Int16GetDatum((int16)PG_GETARG_INT32(0))));
+  int32_t ret = dbcommonCdbHash(SMALLINTID, str);
+  PG_RETURN_INT32(ret);
+}
+
+PG_FUNCTION_INFO_V1(cdbHashApiTimestamp);
+Datum cdbHashApiTimestamp(PG_FUNCTION_ARGS) {
+  char* str = DatumGetCString(DirectFunctionCall1(
+      timestampToout, TimestampGetDatum(PG_GETARG_TIMESTAMP(0))));
+  int32_t ret = dbcommonCdbHash(TIMESTAMPID, str);
+  PG_RETURN_INT32(ret);
+}
+
+PG_FUNCTION_INFO_V1(cdbHashApiTimestampTz);
+Datum cdbHashApiTimestampTz(PG_FUNCTION_ARGS) {
+  char* str = DatumGetCString(DirectFunctionCall1(
+      timestampToout, TimestampGetDatum(PG_GETARG_TIMESTAMP(0))));
+  int32_t ret = dbcommonCdbHash(TIMESTAMPTZID, str);
+  PG_RETURN_INT32(ret);
+}
+
+PG_FUNCTION_INFO_V1(cdbHashApiBpchar);
+Datum cdbHashApiBpchar(PG_FUNCTION_ARGS) {
+  char* str = DatumGetCString(
+      DirectFunctionCall1(bpcharout, PointerGetDatum(PG_GETARG_BPCHAR_P(0))));
+  int32_t ret = dbcommonCdbHash(CHARID, str);
+  PG_RETURN_INT32(ret);
+}
+
+PG_FUNCTION_INFO_V1(cdbHashApiText);
+Datum cdbHashApiText(PG_FUNCTION_ARGS) {
+  char* str = DatumGetCString(
+      DirectFunctionCall1(textout, PointerGetDatum(PG_GETARG_TEXT_P(0))));
+  int32_t ret = dbcommonCdbHash(STRINGID, str);
+  PG_RETURN_INT32(ret);
+}
+
+PG_FUNCTION_INFO_V1(cdbHashApiVarchar);
+Datum cdbHashApiVarchar(PG_FUNCTION_ARGS) {
+  char* str = DatumGetCString(
+      DirectFunctionCall1(varcharout, PointerGetDatum(PG_GETARG_VARCHAR_P(0))));
+  int32_t ret = dbcommonCdbHash(VARCHARID, str);
+  PG_RETURN_INT32(ret);
+}
+
+PG_FUNCTION_INFO_V1(cdbHashApiBytea);
+Datum cdbHashApiBytea(PG_FUNCTION_ARGS) {
+  char* str = DatumGetCString(
+      DirectFunctionCall1(byteaout, PointerGetDatum(PG_GETARG_BYTEA_P(0))));
+  int32_t ret = dbcommonCdbHash(BINARYID, str);
+  PG_RETURN_INT32(ret);
+}
+
+PG_FUNCTION_INFO_V1(cdbHashApiFloat8);
+Datum cdbHashApiFloat8(PG_FUNCTION_ARGS) {
+  char* str = DatumGetCString(
+      DirectFunctionCall1(float8out, Float8GetDatum(PG_GETARG_FLOAT8(0))));
+  int32_t ret = dbcommonCdbHash(DOUBLEID, str);
+  PG_RETURN_INT32(ret);
+}
+
+PG_FUNCTION_INFO_V1(cdbHashApiFloat4);
+Datum cdbHashApiFloat4(PG_FUNCTION_ARGS) {
+  char* str = DatumGetCString(
+      DirectFunctionCall1(float4out, Float4GetDatum(PG_GETARG_FLOAT4(0))));
+  int32_t ret = dbcommonCdbHash(FLOATID, str);
+  PG_RETURN_INT32(ret);
+}
+
+PG_FUNCTION_INFO_V1(cdbHashApiDate);
+Datum cdbHashApiDate(PG_FUNCTION_ARGS) {
+  char* str = DatumGetCString(
+      DirectFunctionCall1(dateToOut, DateADTGetDatum(PG_GETARG_DATEADT(0))));
+  int32_t ret = dbcommonCdbHash(DATEID, str);
+  PG_RETURN_INT32(ret);
+}
+
+PG_FUNCTION_INFO_V1(cdbHashApiTime);
+Datum cdbHashApiTime(PG_FUNCTION_ARGS) {
+  char* str = DatumGetCString(
+      DirectFunctionCall1(time_out, TimeADTGetDatum(PG_GETARG_TIMEADT(0))));
+  int32_t ret = dbcommonCdbHash(TIMEID, str);
+  PG_RETURN_INT32(ret);
+}
+
+PG_FUNCTION_INFO_V1(cdbHashApiNumeric);
+Datum cdbHashApiNumeric(PG_FUNCTION_ARGS) {
+  char* str = DatumGetCString(
+      DirectFunctionCall1(numeric_out, NumericGetDatum(PG_GETARG_NUMERIC(0))));
+  int32_t ret = dbcommonCdbHash(DECIMALNEWID, str);
+  PG_RETURN_INT32(ret);
+}
diff --git a/contrib/hornet/oldcdbhash.c b/contrib/hornet/oldcdbhash.c
new file mode 100644
index 0000000..ab8bc97
--- /dev/null
+++ b/contrib/hornet/oldcdbhash.c
@@ -0,0 +1,412 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*-------------------------------------------------------------------------
+ *
+ * hashapi_access.c
+ *     Test functions for accessing the Cdb Hash API.
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ *-------------------------------------------------------------------------
+ */
+#include "postgres.h"
+
+#include "fmgr.h"
+
+#include "cdb/cdbhash.h"
+#include "executor/spi.h"
+#include "funcapi.h"
+#include "utils/date.h"
+#include "utils/datetime.h"
+#include "utils/memutils.h"
+#include "utils/numeric.h"
+#include "utils/timestamp.h"
+#define GET_STR(textp) \
+  DatumGetCString(DirectFunctionCall1(textout, PointerGetDatum(textp)))
+
+/*
+ * Pointer to the hash session characteristics.
+ */
+static CdbHash *h;
+
+/*==============================================
+ *
+ * SINGLE VALUE HASHING
+ *
+ *==============================================
+ */
+
+/*
+ * HASHAPI_Hash_1_BigInt
+ * Perform a simple 1-value bigint hash
+ */
+PG_FUNCTION_INFO_V1(HASHAPI_Hash_1_BigInt);
+Datum HASHAPI_Hash_1_BigInt(PG_FUNCTION_ARGS) {
+  int32 num_segs;            /* number of segments  */
+  int16 algorithm;           /* hashing algorithm   */
+  int64 val1;                /* big int input value */
+  unsigned int targetbucket; /* 0-based  */
+  Datum d1;
+  Oid oid;
+  /* Get the value to hash */
+  val1 = PG_GETARG_INT64(0);
+  d1 = Int64GetDatum(val1);
+  /* create a CdbHash for this hash test. */
+  h = makeCdbHash(1, 1);
+  /* init cdb hash */
+  cdbhashinit(h);
+  oid = INT8OID;
+  cdbhash(h, d1, oid);
+  PG_RETURN_INT32(h->hash);
+}
+
+/*
+ * HASHAPI_Hash_1_BigInt
+ * Perform a simple 1-value bigint hash
+ */
+PG_FUNCTION_INFO_V1(HASHAPI_Hash_1_Bool);
+Datum HASHAPI_Hash_1_Bool(PG_FUNCTION_ARGS) {
+  Datum d1;
+  Oid oid;
+  bool val1; /* boolean input value */
+  /* Get the value to hash */
+  val1 = PG_GETARG_BOOL(0);
+  d1 = BoolGetDatum(val1);
+  h = makeCdbHash(1, 1);
+  cdbhashinit(h);
+  oid = BOOLOID;
+  cdbhash(h, d1, oid);
+  PG_RETURN_INT32(h->hash);
+}
+
+/*
+ * HASHAPI_Hash_1_Int
+ * Perform a simple 1-value int4 hash
+ */
+PG_FUNCTION_INFO_V1(HASHAPI_Hash_1_Int);
+Datum HASHAPI_Hash_1_Int(PG_FUNCTION_ARGS) {
+  int32 val1; /* int input value */
+  Datum d1;
+  Oid oid;
+  val1 = PG_GETARG_INT32(0);
+  d1 = Int32GetDatum(val1);
+  h = makeCdbHash(1, 1);
+  /* init cdb hash */
+  cdbhashinit(h);
+  oid = INT4OID;
+  cdbhash(h, d1, oid);
+  PG_RETURN_INT32(h->hash);
+}
+
+/*
+ * HASHAPI_Hash_1_SmallInt
+ * Perform a simple 1-value int2 hash
+ */
+PG_FUNCTION_INFO_V1(HASHAPI_Hash_1_SmallInt);
+Datum HASHAPI_Hash_1_SmallInt(PG_FUNCTION_ARGS) {
+  int32 value; /* int input value
+                                  will be cast to int16 */
+  int16 val1;
+  unsigned int targetbucket; /* 0-based   */
+  Datum d1;
+  Oid oid;
+  value = PG_GETARG_INT32(0);
+  val1 = (int16)value;
+  d1 = Int16GetDatum(val1);
+  /* create a CdbHash for this hash test. */
+  h = makeCdbHash(1, 1);
+  /* init cdb hash */
+  cdbhashinit(h);
+  oid = INT2OID;
+  cdbhash(h, d1, oid);
+  PG_RETURN_INT32(h->hash);
+}
+
+/*
+ * HASHAPI_Hash_1_Char
+ * Perform a simple 1 character hash
+ */
+PG_FUNCTION_INFO_V1(HASHAPI_Hash_1_BpChar);
+Datum HASHAPI_Hash_1_BpChar(PG_FUNCTION_ARGS) {
+  BpChar *val1; /* char value          */
+  Datum d1;
+  Oid oid;
+  val1 = PG_GETARG_BPCHAR_P(0);
+  d1 = PointerGetDatum(val1);
+  h = makeCdbHash(1, 1);
+  cdbhashinit(h);
+  oid = BPCHAROID;
+  cdbhash(h, d1, oid);
+  PG_RETURN_INT32(h->hash);
+}
+
+/*
+ * HASHAPI_Hash_1_Text
+ * Perform a simple 1 string hash.
+ */
+PG_FUNCTION_INFO_V1(HASHAPI_Hash_1_Text);
+Datum HASHAPI_Hash_1_Text(PG_FUNCTION_ARGS) {
+  text *val1; /* char value          */
+  Datum d1;
+  Oid oid;
+  val1 = PG_GETARG_TEXT_P(0);
+  d1 = PointerGetDatum(val1);
+  h = makeCdbHash(1, 1);
+  cdbhashinit(h);
+  oid = TEXTOID;
+  cdbhash(h, d1, oid);
+  PG_RETURN_INT32(h->hash);
+}
+
+/*
+ * HASHAPI_Hash_1_Varchar
+ * Perform a simple 1 string (of type VARCHAR) hash.
+ */
+PG_FUNCTION_INFO_V1(HASHAPI_Hash_1_Varchar);
+Datum HASHAPI_Hash_1_Varchar(PG_FUNCTION_ARGS) {
+  VarChar *val1; /* varchar value		  */
+  Datum d1;
+  Oid oid;
+  val1 = PG_GETARG_VARCHAR_P(0);
+  d1 = PointerGetDatum(val1);
+  /* create a CdbHash for this hash test. */
+  h = makeCdbHash(1, 1);
+  cdbhashinit(h);
+  oid = VARCHAROID;
+  cdbhash(h, d1, oid);
+  /* Avoid leaking memory for toasted inputs */
+  PG_RETURN_INT32(h->hash);
+}
+
+/*
+ * HASHAPI_Hash_1_Bytea
+ * Perform a simple 1 string (of type BYTEA) hash.
+ */
+PG_FUNCTION_INFO_V1(HASHAPI_Hash_1_Bytea);
+Datum HASHAPI_Hash_1_Bytea(PG_FUNCTION_ARGS) {
+  bytea *val1;     /* bytea value		  */
+  int16 algorithm; /* hashing algorithm   */
+  Datum d1;
+  Oid oid;
+  /* Get the value to hash */
+  val1 = PG_GETARG_BYTEA_P(0);
+  d1 = PointerGetDatum(val1);
+  /* create a CdbHash for this hash test. */
+  h = makeCdbHash(1, 1);
+  cdbhashinit(h);
+  oid = BYTEAOID;
+  cdbhash(h, d1, oid);
+  /* Avoid leaking memory for toasted inputs */
+  PG_RETURN_INT32(h->hash);
+}
+
+/*
+ * HASHAPI_Hash_1_float8
+ * Perform a single float8 value (double) hash.
+ */
+PG_FUNCTION_INFO_V1(HASHAPI_Hash_1_float8);
+Datum HASHAPI_Hash_1_float8(PG_FUNCTION_ARGS) {
+  float8 val1; /* varchar value		  */
+  Datum d1;
+  Oid oid;
+  /* Get the value to hash */
+  val1 = PG_GETARG_FLOAT8(0);
+  d1 = Float8GetDatum(val1);
+  /* create a CdbHash for this hash test. */
+  h = makeCdbHash(1, 1);
+  /* init cdb hash */
+  cdbhashinit(h);
+  oid = FLOAT8OID;
+  cdbhash(h, d1, oid);
+  PG_RETURN_INT32(h->hash);
+}
+
+/*
+ * HASHAPI_Hash_1_float4
+ * Perform a single float4 value (double) hash.
+ */
+PG_FUNCTION_INFO_V1(HASHAPI_Hash_1_float4);
+Datum HASHAPI_Hash_1_float4(PG_FUNCTION_ARGS) {
+  float4 val1; /* varchar value		  */
+  Datum d1;
+  Oid oid;
+  /* Get the value to hash */
+  val1 = PG_GETARG_FLOAT4(0);
+  d1 = Float4GetDatum(val1);
+  /* create a CdbHash for this hash test. */
+  h = makeCdbHash(1, 1);
+  /* init cdb hash */
+  cdbhashinit(h);
+  oid = FLOAT4OID;
+  cdbhash(h, d1, oid);
+  PG_RETURN_INT32(h->hash);
+}
+
+/*
+ * HASHAPI_Hash_1_timestamp
+ * Perform a single timestamp value hash.
+ */
+PG_FUNCTION_INFO_V1(HASHAPI_Hash_1_timestamp);
+Datum HASHAPI_Hash_1_timestamp(PG_FUNCTION_ARGS) {
+  Timestamp val1; /* varchar value		  */
+  Datum d1;
+  Oid oid;
+  val1 = PG_GETARG_TIMESTAMP(0);
+  d1 = TimestampGetDatum(val1);
+  /* create a CdbHash for this hash test. */
+  h = makeCdbHash(1, 1);
+  /* init cdb hash */
+  cdbhashinit(h);
+  oid = TIMESTAMPOID;
+  cdbhash(h, d1, oid);
+  PG_RETURN_INT32(h->hash);
+}
+
+/*
+ * HASHAPI_Hash_1_timestamptz
+ * Perform a single timestamp with time zone value hash.
+ */
+PG_FUNCTION_INFO_V1(HASHAPI_Hash_1_timestamptz);
+Datum HASHAPI_Hash_1_timestamptz(PG_FUNCTION_ARGS) {
+  TimestampTz val1; /* varchar value		  */
+  Datum d1;
+  Oid oid;
+  /* Get the value to hash */
+  val1 = PG_GETARG_TIMESTAMPTZ(0);
+  d1 = TimestampTzGetDatum(val1);
+  /* create a CdbHash for this hash test. */
+  h = makeCdbHash(1, 1);
+  /* init cdb hash */
+  cdbhashinit(h);
+  oid = TIMESTAMPTZOID;
+  cdbhash(h, d1, oid);
+  PG_RETURN_INT32(h->hash);
+}
+
+/*
+ * HASHAPI_Hash_1_date
+ * Perform a single date value hash.
+ */
+PG_FUNCTION_INFO_V1(HASHAPI_Hash_1_date);
+Datum HASHAPI_Hash_1_date(PG_FUNCTION_ARGS) {
+  DateADT val1; /* date value		  */
+  Datum d1;
+  Oid oid;
+  /* Get the value to hash */
+  val1 = PG_GETARG_DATEADT(0);
+  d1 = DateADTGetDatum(val1);
+  /* create a CdbHash for this hash test. */
+  h = makeCdbHash(1, 1);
+  /* init cdb hash */
+  cdbhashinit(h);
+  oid = DATEOID;
+  cdbhash(h, d1, oid);
+  /* reduce the result hash value */
+  PG_RETURN_INT32(h->hash);
+}
+
+/*
+ * HASHAPI_Hash_1_time
+ * Perform a single time value hash.
+ */
+PG_FUNCTION_INFO_V1(HASHAPI_Hash_1_time);
+Datum HASHAPI_Hash_1_time(PG_FUNCTION_ARGS) {
+  TimeADT val1; /* time value		  */
+  Datum d1;
+  Oid oid;
+  /* Get the value to hash */
+  val1 = PG_GETARG_TIMEADT(0);
+  d1 = TimeADTGetDatum(val1);
+  /* create a CdbHash for this hash test. */
+  h = makeCdbHash(1, 1);
+  /* init cdb hash */
+  cdbhashinit(h);
+  oid = TIMEOID;
+  cdbhash(h, d1, oid);
+  PG_RETURN_INT32(h->hash);
+}
+
+/*
+ * HASHAPI_Hash_1_timetz
+ * Perform a single time with time zone value hash.
+ */
+PG_FUNCTION_INFO_V1(HASHAPI_Hash_1_timetz);
+Datum HASHAPI_Hash_1_timetz(PG_FUNCTION_ARGS) {
+  TimeTzADT *val1; /* time w/timezone value */
+  int16 algorithm; /* hashing algorithm   */
+  Datum d1;
+  Oid oid;
+  /* Get the value to hash */
+  val1 = PG_GETARG_TIMETZADT_P(0);
+  d1 = TimeTzADTPGetDatum(val1);
+  /* create a CdbHash for this hash test. */
+  h = makeCdbHash(1, 1);
+  /* init cdb hash */
+  cdbhashinit(h);
+  oid = TIMETZOID;
+  cdbhash(h, d1, oid);
+  PG_RETURN_INT32(h->hash);
+}
+
+/*
+ * HASHAPI_Hash_1_numeric
+ * Perform a single NUMERIC value hash.
+ */
+PG_FUNCTION_INFO_V1(HASHAPI_Hash_1_numeric);
+Datum HASHAPI_Hash_1_numeric(PG_FUNCTION_ARGS) {
+  Numeric val1;    /* NUMERIC value */
+  int16 algorithm; /* hashing algorithm   */
+  Datum d1;
+  Oid oid;
+  val1 = PG_GETARG_NUMERIC(0);
+  d1 = NumericGetDatum(val1);
+  h = makeCdbHash(1, 1);
+  cdbhashinit(h);
+  oid = NUMERICOID;
+  cdbhash(h, d1, oid);
+  PG_RETURN_INT32(h->hash);
+}
+
+/*
+ * HASHAPI_Hash_1_null
+ * Perform a single null value hash.
+ */
+PG_FUNCTION_INFO_V1(HASHAPI_Hash_1_null);
+Datum HASHAPI_Hash_1_null(PG_FUNCTION_ARGS) {
+  h = makeCdbHash(1, 1);
+  /* init cdb hash */
+  cdbhashinit(h);
+  cdbhashnull(h);
+  PG_RETURN_INT32(h->hash);
+}
diff --git a/contrib/magma/magma.c b/contrib/magma/magma.c
index c732b85..b91ec80 100644
--- a/contrib/magma/magma.c
+++ b/contrib/magma/magma.c
@@ -174,7 +174,7 @@ typedef struct MagmaTidC {
   uint16_t rangeid;
 } MagmaTidC;
 
-typedef struct GlobalFormatUserData {
+typedef struct MagmaFormatUserData {
   MagmaFormatC *fmt;
   char *dbname;
   char *schemaname;
@@ -196,9 +196,11 @@ typedef struct GlobalFormatUserData {
 
   // for insert/update/delete
   TimestampType *colTimestamp;
-} GlobalFormatUserData;
 
-static MagmaClientC* global_magma_client;
+  bool isFirstRescan;
+} MagmaFormatUserData;
+
+static MagmaClientC *magma_client_instance;
 
 /*
  * Utility functions for magma in pluggable storage
@@ -225,15 +227,15 @@ static MagmaFormatC *create_magma_formatter_instance(List *fmt_opts_defelem,
 
 static MagmaClientC *create_magma_client_instance();
 static void init_magma_format_user_data_for_read(
-    TupleDesc tup_desc, GlobalFormatUserData *user_data);
+    TupleDesc tup_desc, MagmaFormatUserData *user_data);
 static void init_magma_format_user_data_for_write(
-    TupleDesc tup_desc, GlobalFormatUserData *user_data, Relation relation);
+    TupleDesc tup_desc, MagmaFormatUserData *user_data, Relation relation);
 
 static void build_options_in_json(char *serializeSchema, int serializeSchemaLen,
                                   List *fmt_opts_defelem, int encoding, int rangeNum,
                                   char *formatterName, char **json_str);
 static void build_magma_tuple_descrition_for_read(
-    Plan *plan, Relation relation, GlobalFormatUserData *user_data, bool skipTid);
+    Plan *plan, Relation relation, MagmaFormatUserData *user_data, bool skipTid);
 
 static void magma_scan_error_callback(void *arg);
 
@@ -256,7 +258,7 @@ static void getHostNameByIp(const char *ipaddr, char *hostname);
 
 static void magma_clear(PlugStorage ps, bool clearSlot) {
   FileScanDesc fsd = ps->ps_file_scan_desc;
-  GlobalFormatUserData *user_data = (GlobalFormatUserData *)(fsd->fs_ps_user_data);
+  MagmaFormatUserData *user_data = (MagmaFormatUserData *)(fsd->fs_ps_user_data);
   TupleTableSlot *slot = ps->ps_tuple_table_slot;
 
   if (user_data->fmt) {
@@ -1120,6 +1122,8 @@ Datum magma_beginscan(PG_FUNCTION_ARGS) {
   file_scan_desc->fs_serializeSchema =
       pnstrdup(serializeSchema, serializeSchemaLen);
   file_scan_desc->fs_serializeSchemaLen = serializeSchemaLen;
+  file_scan_desc->fs_ps_magma_splits = ps->ps_magma_splits;
+  file_scan_desc->fs_ps_magma_skip_tid = ps->ps_magma_skip_tid;
 
   /* Setup scan functions */
   get_magma_scan_functions(formatterName, file_scan_desc);
@@ -1179,7 +1183,8 @@ Datum magma_beginscan(PG_FUNCTION_ARGS) {
   /* currentSliceId == ps->ps_scan_state->ps.state->currentSliceIdInPlan */
   if (AmISegment()) {
     /* Initialize user data */
-    GlobalFormatUserData *user_data = palloc0(sizeof(GlobalFormatUserData));
+    MagmaFormatUserData *user_data = palloc0(sizeof(MagmaFormatUserData));
+    user_data->isFirstRescan = true;
     if (formatterName != NULL &&
         (strncasecmp(formatterName, "magmatp", sizeof("magmatp") - 1) == 0)) {
       user_data->isMagmatp = true;
@@ -1347,6 +1352,8 @@ void init_common_plan_context(CommonPlanContext *ctx) {
   ctx->scanReadStatsOnly = false;
   ctx->parent = NULL;
   ctx->exprBufStack = NIL;
+  ctx->isConvertingIndexQual = false;
+  ctx->idxColumns = NIL;
 }
 
 void free_common_plan_context(CommonPlanContext *ctx) {
@@ -1360,8 +1367,6 @@ void free_common_plan_context(CommonPlanContext *ctx) {
 Datum magma_getnext_init(PG_FUNCTION_ARGS) {
   checkOushuDbExtensiveFunctionSupport(__func__);
   PlugStorage ps = (PlugStorage)(fcinfo->context);
-  // PlanState *plan_state = ps->ps_plan_state;
-  // ExternalScanState *ext_scan_state = ps->ps_ext_scan_state;
 
   ExternalSelectDesc ext_select_desc = NULL;
   /*
@@ -1392,8 +1397,8 @@ Datum magma_getnext(PG_FUNCTION_ARGS) {
   checkOushuDbExtensiveFunctionSupport(__func__);
   PlugStorage ps = (PlugStorage)(fcinfo->context);
   FileScanDesc fsd = ps->ps_file_scan_desc;
-  GlobalFormatUserData *user_data =
-      (GlobalFormatUserData *)(fsd->fs_ps_user_data);
+  MagmaFormatUserData *user_data =
+      (MagmaFormatUserData *)(fsd->fs_ps_user_data);
   TupleTableSlot *slot = ps->ps_tuple_table_slot;
   bool *nulls = slot_get_isnull(slot);
   memset(nulls, true, user_data->numberOfColumns);
@@ -1508,81 +1513,124 @@ Datum magma_getnext(PG_FUNCTION_ARGS) {
 Datum magma_rescan(PG_FUNCTION_ARGS) {
   checkOushuDbExtensiveFunctionSupport(__func__);
   PlugStorage ps = (PlugStorage)(fcinfo->context);
+  ScanState *scan_state = ps->ps_scan_state;
   FileScanDesc fsd = ps->ps_file_scan_desc;
   MagmaSnapshot *snapshot = &(ps->ps_snapshot);
 
-  GlobalFormatUserData *user_data =
-      (GlobalFormatUserData *)(fsd->fs_ps_user_data);
-
-  if (user_data == NULL) {
-    /* 1 Initialize user data */
-    user_data = palloc0(sizeof(GlobalFormatUserData));
+  MagmaRuntimeKeys runtimeKeys;
+  Assert(ps->num_run_time_keys >= 0);
+  if (ps->num_run_time_keys == 0) {
+    runtimeKeys.num = 0;
+    runtimeKeys.keys = NULL;
+  } else {
+    Assert(ps->runtime_key_info != NULL);
+    runtimeKeys.num = ps->num_run_time_keys;
+    runtimeKeys.keys = palloc0(ps->num_run_time_keys * sizeof(MagmaRuntimeKey));
+    for (int i = 0; i < ps->num_run_time_keys; ++i) {
+      ScanKey scan_key = ps->runtime_key_info[i].scan_key;
+      runtimeKeys.keys[i].flag = scan_key->sk_flags;
+      runtimeKeys.keys[i].attnoold = scan_key->sk_attnoold;
+      runtimeKeys.keys[i].value =
+          OutputFunctionCall(&scan_key->sk_out_func, scan_key->sk_argument);
+    }
+  }
 
-    if (fsd->fs_formatter_name != NULL &&
-        (strncasecmp(fsd->fs_formatter_name, "magmatp",
-                     sizeof("magmatp") - 1) == 0)) {
-      user_data->isMagmatp = true;
-    } else {
-      user_data->isMagmatp = false;
+  MagmaFormatUserData *user_data =
+      (MagmaFormatUserData *)(fsd->fs_ps_user_data);
+  if (user_data != NULL) {
+    // There are 2 cases that user_data is not null:
+    // 1. If this is the first rescan, at this point, we have done
+    // magma_beginscan() and haven't done magma_getnext() yet.
+    // We don't need to create user_data from scratch, just use it.
+    if (user_data->isFirstRescan) {
+      user_data->isFirstRescan = false;
+      MagmaFormatReScanMagmaFormatC(user_data->fmt, &runtimeKeys);
+      if (runtimeKeys.keys) {
+        pfree(runtimeKeys.keys);
+      }
+      PG_RETURN_VOID();
     }
 
-    init_magma_format_user_data_for_read(fsd->fs_tupDesc, user_data);
+    // 2. Otherwise is not the first rescan, we should do magma_clear() here.
+    // This case happens with the Nested Loop Exists Join. In that case, as long
+    // as we can get a piece of data in magma_getnext(), we will start a new
+    // rescan. Therefore, we didn't do mamga_clear() in magma_getnext(), which
+    // resulted in the dirty user_data not being cleared.
+    // We don't reuse the user_data since that would make the code complex, just
+    // clear it and create a new one below.
+    magma_clear(ps, true);
+  }
 
-    Relation rel = fsd->fs_rd;
-    ExtTableEntry *ete = GetExtTableEntry(RelationGetRelid(rel));
+  /* Initialize user data */
+  user_data = palloc0(sizeof(MagmaFormatUserData));
+  if (fsd->fs_formatter_name != NULL &&
+      (strncasecmp(fsd->fs_formatter_name, "magmatp", sizeof("magmatp") - 1) == 0)) {
+    user_data->isMagmatp = true;
+  } else {
+    user_data->isMagmatp = false;
+  }
 
-    int formatterType = ExternalTableType_Invalid;
+  /* the number of ranges is dynamic for magma table */
+  int32_t nRanges = 0;
+  ListCell *lc_split = NULL;
+  foreach (lc_split, fsd->fs_ps_magma_splits) {
+    List *split = (List *)lfirst(lc_split);
+    nRanges += list_length(split);
+  }
 
-    char *formatterName = NULL;
-    getExternalTableTypeStr(ete->fmtcode, ete->fmtopts, &formatterType,
-                            &formatterName);
+  init_magma_format_user_data_for_read(fsd->fs_tupDesc, user_data);
 
-    bool isexternal = false;
-    char *serializeSchema = fsd->fs_serializeSchema;
-    int serializeSchemaLen = fsd->fs_serializeSchemaLen;
-    get_magma_category_info(ete->fmtopts, &isexternal);
+  /* Create formatter instance */
+  user_data->fmt = create_magma_formatter_instance(
+      NIL, fsd->fs_serializeSchema, fsd->fs_serializeSchemaLen, PG_UTF8, fsd->fs_formatter_name, nRanges);
 
-    user_data->fmt = create_magma_formatter_instance(
-        NIL, serializeSchema, serializeSchemaLen, PG_UTF8, formatterName, 0);
-
-    /* 4 Build tuple description */
-    Plan *plan = fsd->fs_ps_plan;
-    build_magma_tuple_descrition_for_read(plan, fsd->fs_rd, user_data, ps->ps_magma_skip_tid);
-
-    /* 4.1 Build plan */
-    if (AmISegment() &&
-        currentSliceId == ps->ps_scan_state->ps.state->currentSliceIdInPlan) {
-      CommonPlanContext ctx;
-      init_common_plan_context(&ctx);
-      plan->plan_parent_node_id = -1;
-      convert_extscan_to_common_plan(plan, fsd->fs_ps_scan_state->splits,
-                                     fsd->fs_rd, &ctx);
-      int32_t size = 0;
-      char *planstr = univPlanSerialize(ctx.univplan, &size, false);
-
-      /* 5 Save user data */
-      fsd->fs_ps_user_data = (void *)user_data;
-
-      /* 6 Begin scan with the formatter */
-      if (currentSliceId == ps->ps_scan_state->ps.state->currentSliceIdInPlan) {
-        bool enableShm = (strcasecmp(magma_enable_shm, "ON") == 0);
-        MagmaFormatBeginScanMagmaFormatC(user_data->fmt, user_data->colToReads,
-                                         snapshot, planstr, size,
-                                         enableShm, ps->ps_magma_skip_tid,
-                                         magma_shm_limit_per_block * 1024);
-        MagmaFormatCatchedError *e =
-            MagmaFormatGetErrorMagmaFormatC(user_data->fmt);
-
-        if (e->errCode != ERRCODE_SUCCESSFUL_COMPLETION) {
-          elog(ERROR, "magma_scan: failed to beginscan: %s(%d)", e->errMessage,
-               e->errCode);
-        }
-      }
+  /* Prepare database, schema, and table information */
+  char *dbname = database;
+  char *schemaname = getNamespaceNameByOid(RelationGetNamespace(fsd->fs_rd));
+  Assert(schemaname != NULL);
+  char *tablename = RelationGetRelationName(fsd->fs_rd);
 
-      free_common_plan_context(&ctx);
-    }
+  MagmaFormatC_SetupTarget(user_data->fmt, dbname, schemaname, tablename);
+  MagmaFormatC_SetupTupDesc(user_data->fmt, user_data->numberOfColumns,
+                            user_data->colNames, user_data->colDatatypes,
+                            user_data->colDatatypeMods,
+                            user_data->colIsNulls);
+
+  /* Build tuple description */
+  Plan *plan = fsd->fs_ps_plan;
+  build_magma_tuple_descrition_for_read(plan, fsd->fs_rd, user_data, fsd->fs_ps_magma_skip_tid);
+
+  /* Build plan */
+  CommonPlanContext ctx;
+  init_common_plan_context(&ctx);
+  plan->plan_parent_node_id = -1;
+  convert_extscan_to_common_plan(plan, scan_state->splits,
+                                 fsd->fs_rd, &ctx);
+  int32_t size = 0;
+  char *planstr = univPlanSerialize(ctx.univplan, &size, false);
+
+  /* Save user data */
+  fsd->fs_ps_user_data = (void *)user_data;
+
+  /* Begin scan with the formatter */
+  bool enableShm = (strcasecmp(magma_enable_shm, "ON") == 0);
+  MagmaFormatBeginScanMagmaFormatC(user_data->fmt, user_data->colToReads,
+                                   snapshot, planstr, size,
+                                   enableShm, fsd->fs_ps_magma_skip_tid,
+                                   magma_shm_limit_per_block * 1024);
+  MagmaFormatCatchedError *e = MagmaFormatGetErrorMagmaFormatC(user_data->fmt);
+  if (e->errCode != ERRCODE_SUCCESSFUL_COMPLETION) {
+    elog(ERROR, "magma_scan: failed to beginscan: %s(%d)", e->errMessage,
+         e->errCode);
+  }
+
+  MagmaFormatReScanMagmaFormatC(user_data->fmt, &runtimeKeys);
+  if (runtimeKeys.keys) {
+    pfree(runtimeKeys.keys);
   }
 
+  free_common_plan_context(&ctx);
+
   PG_RETURN_VOID();
 }
 
@@ -1595,7 +1643,7 @@ Datum magma_endscan(PG_FUNCTION_ARGS) {
   PlugStorage ps = (PlugStorage)(fcinfo->context);
   FileScanDesc fsd = ps->ps_file_scan_desc;
 
-  GlobalFormatUserData *user_data = (GlobalFormatUserData *)(fsd->fs_ps_user_data);
+  MagmaFormatUserData *user_data = (MagmaFormatUserData *)(fsd->fs_ps_user_data);
 
   // free memory in endscan, for some subquery scenarios "getnext" might not be called
   if (user_data != NULL) {
@@ -1662,8 +1710,8 @@ Datum magma_stopscan(PG_FUNCTION_ARGS) {
   checkOushuDbExtensiveFunctionSupport(__func__);
   PlugStorage ps = (PlugStorage)(fcinfo->context);
   FileScanDesc fsd = ps->ps_file_scan_desc;
-  GlobalFormatUserData *user_data =
-      (GlobalFormatUserData *)(fsd->fs_ps_user_data);
+  MagmaFormatUserData *user_data =
+      (MagmaFormatUserData *)(fsd->fs_ps_user_data);
   TupleTableSlot *tts = ps->ps_tuple_table_slot;
 
   if (!user_data) PG_RETURN_VOID();
@@ -1855,8 +1903,8 @@ Datum magma_begindelete(PG_FUNCTION_ARGS) {
   char *schema = getNamespaceNameByOid(namespaceOid);
   char *table = RelationGetRelationName(relation);
 
-  GlobalFormatUserData *user_data =
-      (GlobalFormatUserData *)palloc0(sizeof(GlobalFormatUserData));
+  MagmaFormatUserData *user_data =
+      (MagmaFormatUserData *)palloc0(sizeof(MagmaFormatUserData));
 
   if (formatterName != NULL &&
       (strncasecmp(formatterName, "magmatp", sizeof("magmatp") - 1) == 0)) {
@@ -1938,8 +1986,8 @@ Datum magma_delete(PG_FUNCTION_ARGS) {
   /* It may be memtuple, we need to transfer it to virtual tuple */
   slot_getallattrs(tts);
 
-  GlobalFormatUserData *user_data =
-      (GlobalFormatUserData *)(edd->ext_ps_user_data);
+  MagmaFormatUserData *user_data =
+      (MagmaFormatUserData *)(edd->ext_ps_user_data);
 
   user_data->colTid.rangeid = DatumGetUInt16(edd->ext_rangeId);
   user_data->colTid.rowid = DatumGetUInt64(edd->ext_rowId);
@@ -2105,8 +2153,8 @@ Datum magma_enddelete(PG_FUNCTION_ARGS) {
   PlugStorage ps = (PlugStorage)(fcinfo->context);
   ExternalInsertDesc edd = ps->ps_ext_delete_desc;
 
-  GlobalFormatUserData *user_data =
-      (GlobalFormatUserData *)(edd->ext_ps_user_data);
+  MagmaFormatUserData *user_data =
+      (MagmaFormatUserData *)(edd->ext_ps_user_data);
 
   MagmaFormatEndDeleteMagmaFormatC(user_data->fmt);
 
@@ -2297,8 +2345,8 @@ Datum magma_beginupdate(PG_FUNCTION_ARGS) {
   char *schema = getNamespaceNameByOid(namespaceOid);
   char *table = RelationGetRelationName(relation);
 
-  GlobalFormatUserData *user_data =
-      (GlobalFormatUserData *)palloc0(sizeof(GlobalFormatUserData));
+  MagmaFormatUserData *user_data =
+      (MagmaFormatUserData *)palloc0(sizeof(MagmaFormatUserData));
 
   if (formatterName != NULL &&
       (strncasecmp(formatterName, "magmatp", sizeof("magmatp") - 1) == 0)) {
@@ -2383,8 +2431,8 @@ Datum magma_update(PG_FUNCTION_ARGS) {
   /* It may be memtuple, we need to transfer it to virtual tuple */
   slot_getallattrs(tts);
 
-  GlobalFormatUserData *user_data =
-      (GlobalFormatUserData *)(eud->ext_ps_user_data);
+  MagmaFormatUserData *user_data =
+      (MagmaFormatUserData *)(eud->ext_ps_user_data);
 
   user_data->colTid.rangeid = DatumGetUInt16(eud->ext_rangeId);
   user_data->colTid.rowid = DatumGetUInt64(eud->ext_rowId);
@@ -2559,8 +2607,8 @@ Datum magma_endupdate(PG_FUNCTION_ARGS) {
   PlugStorage ps = (PlugStorage)(fcinfo->context);
   ExternalInsertDesc eud = ps->ps_ext_update_desc;
 
-  GlobalFormatUserData *user_data =
-      (GlobalFormatUserData *)(eud->ext_ps_user_data);
+  MagmaFormatUserData *user_data =
+      (MagmaFormatUserData *)(eud->ext_ps_user_data);
 
   int updateCount = MagmaFormatEndUpdateMagmaFormatC(user_data->fmt);
   ps->ps_update_count = updateCount;
@@ -2780,8 +2828,8 @@ Datum magma_insert_init(PG_FUNCTION_ARGS) {
   char *schema = getNamespaceNameByOid(namespaceOid);
   char *table = RelationGetRelationName(relation);
 
-  GlobalFormatUserData *user_data =
-      (GlobalFormatUserData *)palloc0(sizeof(GlobalFormatUserData));
+  MagmaFormatUserData *user_data =
+      (MagmaFormatUserData *)palloc0(sizeof(MagmaFormatUserData));
 
   if (formatterName != NULL &&
       (strncasecmp(formatterName, "magmatp", sizeof("magmatp") - 1) == 0)) {
@@ -2865,8 +2913,8 @@ Datum magma_insert(PG_FUNCTION_ARGS) {
   ExternalInsertDesc eid = ps->ps_ext_insert_desc;
   TupleTableSlot *tts = ps->ps_tuple_table_slot;
 
-  GlobalFormatUserData *user_data =
-      (GlobalFormatUserData *)(eid->ext_ps_user_data);
+  MagmaFormatUserData *user_data =
+      (MagmaFormatUserData *)(eid->ext_ps_user_data);
 
   user_data->colValues = slot_get_values(tts);
   user_data->colIsNulls = slot_get_isnull(tts);
@@ -3050,8 +3098,8 @@ Datum magma_insert_finish(PG_FUNCTION_ARGS) {
   PlugStorage ps = (PlugStorage)(fcinfo->context);
   ExternalInsertDesc eid = ps->ps_ext_insert_desc;
 
-  GlobalFormatUserData *user_data =
-      (GlobalFormatUserData *)(eid->ext_ps_user_data);
+  MagmaFormatUserData *user_data =
+      (MagmaFormatUserData *)(eid->ext_ps_user_data);
 
   MagmaFormatEndInsertMagmaFormatC(user_data->fmt);
 
@@ -3159,7 +3207,6 @@ Datum magma_transaction(PG_FUNCTION_ARGS) {
       break;
     case PS_TXN_CMD_GET_SNAPSHOT: {
       MagmaClientC_CleanupTableInfo(client);
-      int magmaTableFullNamesSize = list_length(ps->magma_talbe_full_names);
       int i = 0;
       ListCell *lc;
       foreach (lc, ps->magma_talbe_full_names) {
@@ -3186,7 +3233,6 @@ Datum magma_transaction(PG_FUNCTION_ARGS) {
     }
     case PS_TXN_CMD_GET_TRANSACTIONID: {
       MagmaClientC_CleanupTableInfo(client);
-      int magmaTableFullNamesSize = list_length(ps->magma_talbe_full_names);
       int i = 0;
       ListCell *lc;
       foreach (lc, ps->magma_talbe_full_names) {
@@ -3410,22 +3456,22 @@ static MagmaFormatC *create_magma_formatter_instance(List *fmt_opts_defelem,
 }
 
 static MagmaClientC *create_magma_client_instance() {
-  if (global_magma_client != NULL) {
-    MagmaClientC_ResetMagmaClient4Reuse(&global_magma_client);
-    return global_magma_client;
+  if (magma_client_instance != NULL) {
+    MagmaClientC_ResetMagmaClient4Reuse(&magma_client_instance);
+    return magma_client_instance;
   }
 
-  global_magma_client = MagmaClientC_NewMagmaClient(magma_nodes_url);
-  MagmaResult *result = MagmaClientC_GetResult(global_magma_client);
+  magma_client_instance = MagmaClientC_NewMagmaClient(magma_nodes_url);
+  MagmaResult *result = MagmaClientC_GetResult(magma_client_instance);
   if (result->level == MAGMA_ERROR) {
-    MagmaClientC_FreeMagmaClient(&global_magma_client);
+    MagmaClientC_FreeMagmaClient(&magma_client_instance);
     elog(ERROR, "%s", result->message);
   }
-  return global_magma_client;
+  return magma_client_instance;
 }
 
 static void init_magma_format_user_data_for_read(
-    TupleDesc tup_desc, GlobalFormatUserData *user_data) {
+    TupleDesc tup_desc, MagmaFormatUserData *user_data) {
   user_data->numberOfColumns = tup_desc->natts;
   user_data->colNames = palloc0(sizeof(char *) * user_data->numberOfColumns);
   user_data->colDatatypes = palloc0(sizeof(int) * user_data->numberOfColumns);
@@ -3452,7 +3498,7 @@ static void init_magma_format_user_data_for_read(
 }
 
 static void init_magma_format_user_data_for_write(
-    TupleDesc tup_desc, GlobalFormatUserData *user_data, Relation relation) {
+    TupleDesc tup_desc, MagmaFormatUserData *user_data, Relation relation) {
   user_data->numberOfColumns = tup_desc->natts;
   user_data->colNames = palloc0(sizeof(char *) * user_data->numberOfColumns);
   user_data->colDatatypes = palloc0(sizeof(int) * user_data->numberOfColumns);
@@ -3475,7 +3521,7 @@ static void init_magma_format_user_data_for_write(
 }
 
 static void build_magma_tuple_descrition_for_read(
-    Plan *plan, Relation relation, GlobalFormatUserData *user_data, bool skipTid) {
+    Plan *plan, Relation relation, MagmaFormatUserData *user_data, bool skipTid) {
   user_data->colToReads = palloc0(sizeof(bool) * user_data->numberOfColumns);
 
   for (int i = 0; i < user_data->numberOfColumns; ++i)
diff --git a/contrib/orc/orc.c b/contrib/orc/orc.c
index 8e45c1c..8c2baf8 100644
--- a/contrib/orc/orc.c
+++ b/contrib/orc/orc.c
@@ -626,7 +626,6 @@ Datum orc_beginscan(PG_FUNCTION_ARGS)
 		char *token = find_filesystem_credential_with_uri(uri_str);
 		SetToken(uri_str, token);
 	}
-	file_scan_desc->fs_ps_scan_state = scan_state; /* for orc rescan */
 	build_file_splits(uri, scan_state, user_data);
 
 	FreeExternalTableUri(uri);
@@ -671,8 +670,6 @@ Datum orc_beginscan(PG_FUNCTION_ARGS)
 Datum orc_getnext_init(PG_FUNCTION_ARGS)
 {
 	PlugStorage ps = (PlugStorage) (fcinfo->context);
-	PlanState *plan_state = ps->ps_plan_state;
-	ExternalScanState *ext_scan_state = ps->ps_ext_scan_state;
 
 	ExternalSelectDesc ext_select_desc = NULL;
 	/*
@@ -826,6 +823,7 @@ Datum orc_rescan(PG_FUNCTION_ARGS)
 {
 	PlugStorage ps = (PlugStorage) (fcinfo->context);
 	FileScanDesc fsd = ps->ps_file_scan_desc;
+	ScanState *scan_state = ps->ps_scan_state;
 	Relation relation = fsd->fs_rd;
 	TupleDesc tup_desc = RelationGetDescr(relation);
 
@@ -845,7 +843,7 @@ Datum orc_rescan(PG_FUNCTION_ARGS)
 
 		/* 3 Build file splits */
 		Uri *uri = ParseExternalTableUri(fsd->fs_uri);
-		build_file_splits(uri, fsd->fs_ps_scan_state, user_data);
+		build_file_splits(uri, scan_state, user_data);
 
 		/* 4 Build tuple description */
 		Plan *plan = fsd->fs_ps_plan;
diff --git a/src/all_src_files.txt b/src/all_src_files.txt
index f9899dd..4d85a15 100644
--- a/src/all_src_files.txt
+++ b/src/all_src_files.txt
@@ -933,6 +933,14 @@ include/catalog/pg_type.h
 include/catalog/pg_type_encoding.h
 include/catalog/pg_user_mapping.h
 include/catalog/pg_window.h
+#include "catalog/skylon_elabel.h"
+#include "catalog/skylon_elabel_attribute.h"
+#include "catalog/skylon_graph_elabel.h"
+#include "catalog/skylon_graph_vlabel.h"
+#include "catalog/skylon_graph.h"
+#include "catalog/skylon_index.h"
+#include "catalog/skylon_vlabel.h"
+#include "catalog/skylon_vlabel_attribute.h"
 include/catalog/toasting.h
 include/catalog/external/externalmd.h
 include/catalog/external/itemmd.h
diff --git a/src/backend/access/appendonly/aomd.c b/src/backend/access/appendonly/aomd.c
index b640019..77805eb 100644
--- a/src/backend/access/appendonly/aomd.c
+++ b/src/backend/access/appendonly/aomd.c
@@ -104,6 +104,38 @@ FormatAOSegmentFileName(
 	sprintf(filepathname, "%s/%u", basepath, pseudoSegNo);
 }
 
+void
+FormatAOSegmentIndexFileName(
+              char *basepath,
+              int segno,
+              int idxId,
+              int col,
+              int numCols,
+              int32 *fileSegNo,
+              char *filepathname)
+{
+  int pseudoSegNo;
+
+  if (col < 0)
+  {
+    /*
+     * Row oriented Append-Only.
+     */
+    pseudoSegNo = segno;
+  }
+  else
+  {
+    /*
+     * Column oriented Append-only.
+     */
+    pseudoSegNo = ((segno - 1) * numCols) + (col + 1);
+  }
+
+  *fileSegNo = pseudoSegNo;
+
+  sprintf(filepathname, "%s/%u_%u", basepath, pseudoSegNo, idxId);
+}
+
 /*
  * Make an Append Only relation file segment file name.
  *
@@ -132,6 +164,29 @@ MakeAOSegmentFileName(
 	pfree(basepath);
 }
 
+void
+MakeAOSegmentIndexFileName(
+              Relation rel,
+              int idxId,
+              int segno,
+              int col,
+              int32 *fileSegNo,
+              char *filepathname)
+{
+  char  *basepath;
+  int32   fileSegNoLocal;
+  int numCols;
+
+  /* Get base path for this relation file */
+  basepath = relpath(rel->rd_node);
+  numCols = rel->rd_att->natts;
+
+  FormatAOSegmentIndexFileName(basepath, segno, idxId, col, numCols, &fileSegNoLocal, filepathname);
+
+  *fileSegNo = fileSegNoLocal;
+
+  pfree(basepath);
+}
 /*
  * Open an Append Only relation file segment
  *
diff --git a/src/backend/access/bitmap/bitmapinsert.c b/src/backend/access/bitmap/bitmapinsert.c
index dcdf09e..7c7f0ce 100644
--- a/src/backend/access/bitmap/bitmapinsert.c
+++ b/src/backend/access/bitmap/bitmapinsert.c
@@ -2563,7 +2563,8 @@ _bitmap_doinsert(Relation rel, ItemPointerData ht_ctid, Datum *attdata,
 		scanKey = (ScanKey) (((char *)scanKeys) + attno * sizeof(ScanKeyData));
 
 		ScanKeyEntryInitialize(scanKey, SK_ISNULL, attno + 1, 
-							   BTEqualStrategyNumber, InvalidOid, opfuncid, 0);
+							   BTEqualStrategyNumber, InvalidOid, opfuncid, 0,
+							   InvalidAttrNumber, InvalidOid);
 
 		if (nulls[attno])
 		{
diff --git a/src/backend/access/bitmap/bitmappages.c b/src/backend/access/bitmap/bitmappages.c
index 2aa7fec..f9c4c0e 100644
--- a/src/backend/access/bitmap/bitmappages.c
+++ b/src/backend/access/bitmap/bitmappages.c
@@ -293,7 +293,7 @@ _bitmap_init_buildstate(Relation index, BMBuildState *bmstate)
 
 			ScanKeyEntryInitialize(&(bmstate->bm_lov_scanKeys[attno]), SK_ISNULL, 
 							   attno + 1, BTEqualStrategyNumber, InvalidOid, 
-							   opfuncid, 0);
+							   opfuncid, 0,  InvalidAttrNumber, InvalidOid);
 		}
 
 		bmstate->bm_lov_scanDesc = index_beginscan(bmstate->bm_lov_heap,
diff --git a/src/backend/access/bitmap/bitmapsearch.c b/src/backend/access/bitmap/bitmapsearch.c
index 7f6a3bb..1f14ab3 100644
--- a/src/backend/access/bitmap/bitmapsearch.c
+++ b/src/backend/access/bitmap/bitmapsearch.c
@@ -452,7 +452,9 @@ _bitmap_findbitmaps(IndexScanDesc scan, ScanDirection dir  __attribute__((unused
 								   scan->keyData[keyNo].sk_strategy,
 								   scan->keyData[keyNo].sk_subtype, 
 								   scan->keyData[keyNo].sk_func.fn_oid,
-								   scan->keyData[keyNo].sk_argument);
+								   scan->keyData[keyNo].sk_argument,
+								   InvalidAttrNumber,
+								   InvalidOid);
 		}
 
 		/* When there are no scan keys, all bitmap vectors are included,
diff --git a/src/backend/access/common/scankey.c b/src/backend/access/common/scankey.c
index 2524369..7a09504 100644
--- a/src/backend/access/common/scankey.c
+++ b/src/backend/access/common/scankey.c
@@ -33,21 +33,28 @@ ScanKeyEntryInitialize(ScanKey entry,
 					   AttrNumber attributeNumber,
 					   StrategyNumber strategy,
 					   Oid subtype,
-					   RegProcedure procedure,
-					   Datum argument)
+					   RegProcedure opProcedure,
+					   Datum argument,
+					   AttrNumber attributeNumberOld,
+					   RegProcedure outputProcedure)
 {
 	entry->sk_flags = flags;
 	entry->sk_attno = attributeNumber;
 	entry->sk_strategy = strategy;
 	entry->sk_subtype = subtype;
 	entry->sk_argument = argument;
-	if (RegProcedureIsValid(procedure))
-		fmgr_info(procedure, &entry->sk_func);
+	if (RegProcedureIsValid(opProcedure))
+		fmgr_info(opProcedure, &entry->sk_func);
 	else
 	{
 		Assert(flags & SK_SEARCHNULL);
 		MemSet(&entry->sk_func, 0, sizeof(entry->sk_func));
 	}
+	entry->sk_attnoold = attributeNumberOld;
+	if (RegProcedureIsValid(outputProcedure))
+		fmgr_info(outputProcedure, &entry->sk_out_func);
+	else
+	        MemSet(&entry->sk_out_func, 0, sizeof(entry->sk_out_func));
 }
 
 /*
diff --git a/src/backend/access/external/plugstorage.c b/src/backend/access/external/plugstorage.c
index f34f77c..6afc47d 100644
--- a/src/backend/access/external/plugstorage.c
+++ b/src/backend/access/external/plugstorage.c
@@ -432,8 +432,6 @@ ExternalSelectDesc InvokePlugStorageFormatGetNextInit(FmgrInfo *func,
 	FunctionCallInfoData fcinfo;
 
 	psdata.type              = T_PlugStorageData;
-	psdata.ps_plan_state     = planState;
-	psdata.ps_ext_scan_state = extScanState;
 
 	InitFunctionCallInfoData(fcinfo,  // FunctionCallInfoData
 	                         func,    // FmgrInfo
@@ -495,13 +493,53 @@ bool InvokePlugStorageFormatGetNext(FmgrInfo *func,
 }
 
 void InvokePlugStorageFormatReScan(FmgrInfo *func,
-                                   FileScanDesc fileScanDesc)
+                                   FileScanDesc fileScanDesc,
+                                   ScanState* scanState,
+                                   MagmaSnapshot* snapshot,
+                                   IndexRuntimeKeyInfo* runtimeKeyInfo,
+                                   int numRuntimeKeys,
+                                   TupleTableSlot *tupTableSlot)
 {
 	PlugStorageData psdata;
 	FunctionCallInfoData fcinfo;
 
-	psdata.type              = T_PlugStorageData;
-	psdata.ps_file_scan_desc = fileScanDesc;
+	psdata.type                = T_PlugStorageData;
+	psdata.ps_scan_state       = scanState;
+	psdata.ps_file_scan_desc   = fileScanDesc;
+	psdata.runtime_key_info    = runtimeKeyInfo;
+	psdata.num_run_time_keys   = numRuntimeKeys;
+	psdata.ps_tuple_table_slot = tupTableSlot;
+
+        if (strncmp(fileScanDesc->fs_formatter_name, "magma", strlen("magma")) == 0)
+        {
+                Insist(snapshot != NULL);
+
+                // save current transaction in snapshot
+                psdata.ps_snapshot.currentTransaction.txnId =
+                    snapshot->currentTransaction.txnId;
+                psdata.ps_snapshot.currentTransaction.txnStatus =
+                    snapshot->currentTransaction.txnStatus;;
+
+                psdata.ps_snapshot.cmdIdInTransaction = snapshot->cmdIdInTransaction;
+
+                // allocate txnActions
+                psdata.ps_snapshot.txnActions.txnActionStartOffset =
+                    snapshot->txnActions.txnActionStartOffset;
+                psdata.ps_snapshot.txnActions.txnActions =
+                    (MagmaTxnAction *)palloc0(sizeof(MagmaTxnAction) * snapshot->txnActions
+                        .txnActionSize);
+
+                // save txnActionsp
+                psdata.ps_snapshot.txnActions.txnActionSize = snapshot->txnActions
+                    .txnActionSize;
+                for (int i = 0; i < snapshot->txnActions.txnActionSize; ++i)
+                {
+                        psdata.ps_snapshot.txnActions.txnActions[i].txnId =
+                              snapshot->txnActions.txnActions[i].txnId;
+                        psdata.ps_snapshot.txnActions.txnActions[i].txnStatus =
+                              snapshot->txnActions.txnActions[i].txnStatus;
+                }
+        }
 
 	InitFunctionCallInfoData(fcinfo,  // FunctionCallInfoData
 	                         func,    // FmgrInfo
@@ -512,6 +550,12 @@ void InvokePlugStorageFormatReScan(FmgrInfo *func,
 	// Invoke function
 	FunctionCallInvoke(&fcinfo);
 
+	// free memory for magma snapshot
+	if (strncmp(fileScanDesc->fs_formatter_name, "magma", strlen("magma")) == 0)
+	{
+	        pfree(psdata.ps_snapshot.txnActions.txnActions);
+	}
+
 	// We do not expect a null result
 	if (fcinfo.isnull)
 	{
diff --git a/src/backend/access/index/catquery.c b/src/backend/access/index/catquery.c
index 5c8b44e..f4b8e36 100644
--- a/src/backend/access/index/catquery.c
+++ b/src/backend/access/index/catquery.c
@@ -124,15 +124,21 @@ ARGV: \-meta\ \/Users\/jianl\/Git\/hawq\/mn\/cdb\-pg\/\.\.\/gpMgmt\/bin\/gppylib
 #include "catalog/gp_configuration.h"
 #include "catalog/gp_segment_config.h"
 #include "catalog/gp_san_config.h"
-
 #include "catalog/gp_fastsequence.h"
-
 #include "catalog/gp_master_mirroring.h"
 #include "catalog/gp_persistent.h"
 #include "catalog/gp_global_sequence.h"
 #include "catalog/gp_version.h"
-#include "catalog/toasting.h"
 #include "catalog/gp_policy.h"
+#include "catalog/skylon_elabel.h"
+#include "catalog/skylon_elabel_attribute.h"
+#include "catalog/skylon_graph_elabel.h"
+#include "catalog/skylon_graph_vlabel.h"
+#include "catalog/skylon_graph.h"
+#include "catalog/skylon_index.h"
+#include "catalog/skylon_vlabel.h"
+#include "catalog/skylon_vlabel_attribute.h"
+#include "catalog/toasting.h"
 
 #include "miscadmin.h"
 #include "storage/fd.h"
diff --git a/src/backend/access/index/gperf.init b/src/backend/access/index/gperf.init
index da33b47..60e30ec 100644
--- a/src/backend/access/index/gperf.init
+++ b/src/backend/access/index/gperf.init
@@ -100,8 +100,16 @@ ARGV: \-meta\ \/Users\/yjin\/hawq_main\/gpMgmt\/bin\/gppylib\/data\/2\.0\.json\
 #include "catalog/gp_persistent.h"
 #include "catalog/gp_global_sequence.h"
 #include "catalog/gp_version.h"
-#include "catalog/toasting.h"
 #include "catalog/gp_policy.h"
+#include "catalog/skylon_elabel.h"
+#include "catalog/skylon_elabel_attribute.h"
+#include "catalog/skylon_graph_elabel.h"
+#include "catalog/skylon_graph_vlabel.h"
+#include "catalog/skylon_graph.h"
+#include "catalog/skylon_index.h"
+#include "catalog/skylon_vlabel.h"
+#include "catalog/skylon_vlabel_attribute.h"
+#include "catalog/toasting.h"
 
 #include "miscadmin.h"
 #include "storage/fd.h"
diff --git a/src/backend/access/nbtree/nbtsearch.c b/src/backend/access/nbtree/nbtsearch.c
index d01fcba..545ea06 100644
--- a/src/backend/access/nbtree/nbtsearch.c
+++ b/src/backend/access/nbtree/nbtsearch.c
@@ -725,7 +725,9 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
 									   InvalidStrategy,
 									   cur->sk_subtype,
 									   cmp_proc,
-									   cur->sk_argument);
+									   cur->sk_argument,
+									   InvalidAttrNumber,
+									   InvalidOid);
 			}
 		}
 	}
diff --git a/src/backend/access/orc/orcam.c b/src/backend/access/orc/orcam.c
index cc4893b..f226844 100644
--- a/src/backend/access/orc/orcam.c
+++ b/src/backend/access/orc/orcam.c
@@ -32,6 +32,7 @@
 #include "executor/executor.h"
 #include "miscadmin.h"
 #include "nodes/execnodes.h"
+#include "nodes/plannodes.h"
 #include "optimizer/newPlanner.h"
 #include "storage/cwrapper/hdfs-file-system-c.h"
 #include "storage/cwrapper/orc-format-c.h"
@@ -77,6 +78,54 @@ typedef struct OrcFormatData {
   struct varlena **colFixedLenUDT;
 } OrcFormatData;
 
+static void initOrcFormatIndexUserData(TupleDesc tup_desc,
+                                       OrcFormatData *orcFormatData,
+                                       bool *colToReads, List *columnsInIndex,
+                                       bool *colToReadInIndex) {
+  int natts = list_length(columnsInIndex) + 1;
+  orcFormatData->numberOfColumns = natts;
+  orcFormatData->colNames = palloc0(sizeof(char *) * natts);
+  orcFormatData->colDatatypes = palloc0(sizeof(int) * natts);
+  orcFormatData->colDatatypeMods = palloc0(sizeof(uint64) * natts);
+  orcFormatData->colRawValues = palloc0(sizeof(char *) * natts);
+  orcFormatData->colValLength = palloc0(sizeof(uint64) * natts);
+  orcFormatData->colTimestamp = palloc0(sizeof(TimestampType) * natts);
+  orcFormatData->colFixedLenUDT = palloc0(sizeof(struct varlena *) * natts);
+  int count = 0;
+  for (int i = 0; i < tup_desc->natts; ++i) {
+    for (int j = 0; j < list_length(columnsInIndex); j++) {
+      if ((int)list_nth_oid(columnsInIndex, j) - 1 == i) {
+        // allocate memory for colFixedLenUDT[i] of fixed-length type in advance
+        bool isFixedLengthType = tup_desc->attrs[i]->attlen > 0 ? true : false;
+        if (isFixedLengthType) {
+          orcFormatData->colFixedLenUDT[count] = (struct valena *)palloc0(
+              tup_desc->attrs[i]->attlen + sizeof(uint32_t));
+        }
+        orcFormatData->colNames[count] = palloc0(NAMEDATALEN);
+        strcpy(orcFormatData->colNames[count],
+               tup_desc->attrs[i]->attname.data);
+        orcFormatData->colDatatypes[count] =
+            map_hawq_type_to_common_plan((int)(tup_desc->attrs[i]->atttypid));
+        orcFormatData->colDatatypeMods[count] = tup_desc->attrs[i]->atttypmod;
+        if (orcFormatData->colDatatypes[count] == CHARID &&
+            tup_desc->attrs[i]->atttypmod == -1) {
+          // XXX(chiyang): From orc.c to determine BPCHAR's typemod
+          orcFormatData->colDatatypeMods[count] =
+              strlen(tup_desc->attrs[i]->attname.data) + VARHDRSZ;
+        }
+        if (colToReads[i]) {
+          colToReadInIndex[count] = true;
+        }
+        count++;
+      }
+    }
+  }
+  orcFormatData->colNames[count] = palloc0(NAMEDATALEN);
+  strcpy(orcFormatData->colNames[count], "tid");
+  orcFormatData->colDatatypes[count] = BIGINTID;
+  orcFormatData->colDatatypeMods[count] = -1;
+}
+
 static void initOrcFormatUserData(TupleDesc tup_desc,
                                   OrcFormatData *orcFormatData) {
   int natts = tup_desc->natts;
@@ -128,7 +177,7 @@ static freeOrcFormatUserData(OrcFormatData *orcFormatData) {
   pfree(orcFormatData->colNames);
 }
 
-static void checkOrcError(OrcFormatData *orcFormatData) {
+void checkOrcError(OrcFormatData *orcFormatData) {
   ORCFormatCatchedError *e = ORCFormatGetErrorORCFormatC(orcFormatData->fmt);
   if (e->errCode != ERRCODE_SUCCESSFUL_COMPLETION) {
     ORCFormatCatchedError errBuf = *e;
@@ -152,6 +201,85 @@ static void addFilesystemCredential(const char *uri) {
   }
 }
 
+void constructOrcFormatOptionString(StringInfoData *option, Relation rel,
+                                    ResultRelSegFileInfo *segfileinfo,
+                                    AppendOnlyEntry *aoentry) {
+  initStringInfo(option);
+  appendStringInfoChar(option, '{');
+
+  // neglect for UPDATE/DELETE
+  if (segfileinfo) {
+    appendStringInfo(option, "\"logicEof\": %" PRId64, segfileinfo->eof[0]);
+    appendStringInfo(option, ", \"uncompressedEof\": %lld, ",
+                     segfileinfo->uncompressed_eof[0]);
+  }
+
+  appendStringInfo(
+      option, "\"stripeSize\": %" PRId64,
+      ((StdRdOptions *)(rel->rd_options))->stripesize * 1024 * 1024);
+  appendStringInfo(option, ", \"rowIndexStride\": %" PRId64,
+                   ((StdRdOptions *)(rel->rd_options))->rowindexstride);
+  appendStringInfo(option, ", \"blockSize\": %" PRId64,
+                   ((StdRdOptions *)(rel->rd_options))->compressblocksize);
+  if (aoentry->compresstype)
+    appendStringInfo(option, ", %s", aoentry->compresstype);
+
+  // transform bloomfilter option from column names to column indexes.
+  if (((StdRdOptions *)(rel->rd_options))->bloomfilter) {
+    TupleDesc tupDesc = rel->rd_att;
+    int attrNum = tupDesc->natts;
+    char **attrNames = palloc0(attrNum * sizeof(char *));
+    for (int i = 0; i < attrNum; ++i) {
+      int nameLen =
+          strlen(((Form_pg_attribute)(tupDesc->attrs[i]))->attname.data);
+      char *attribute = palloc0(nameLen + 1);
+      strncpy(attribute, ((Form_pg_attribute)(tupDesc->attrs[i]))->attname.data,
+              nameLen);
+      attrNames[i] = attribute;
+    }
+
+    StringInfoData bloomFilterOptsStr;
+    initStringInfo(&bloomFilterOptsStr);
+    char *token = strtok(((StdRdOptions *)(rel->rd_options))->bloomfilter, ",");
+    while (token) {
+      for (int attrIdx = 0; attrIdx < attrNum; ++attrIdx) {
+        if (strncasecmp(token, attrNames[attrIdx],
+                        strlen(attrNames[attrIdx])) == 0) {
+          appendStringInfo(&bloomFilterOptsStr, "%d", attrIdx + 1);
+          appendStringInfoChar(&bloomFilterOptsStr, ',');
+          break;
+        }
+      }
+      token = strtok(NULL, ",");
+    }
+    if (bloomFilterOptsStr.data[bloomFilterOptsStr.len - 1] == ',')
+      bloomFilterOptsStr.data[bloomFilterOptsStr.len - 1] = '\0';
+    appendStringInfo(option, ", \"bloomfilter\": [%s]",
+                     pstrdup(bloomFilterOptsStr.data));
+  }
+
+  appendStringInfoChar(option, '}');
+}
+
+// FIXME(sxwang): In fact, upstream logic should ensure that FileSplit are all
+// valid.
+static bool IsValidFileSplit(FileSplit split, Oid idxId) {
+  return split->replicaGroup_id == idxId;
+}
+
+// Filter invalid fileSplit.
+static int32 GetSplitCount(List *fileSplits, Oid idxId) {
+  int32 splitCount = list_length(fileSplits);
+  int32 ret = 0;
+  for (int32 i = 0; i < splitCount; ++i) {
+    if (IsValidFileSplit(list_nth(fileSplits, i), idxId)) {
+      ++ret;
+    }
+  }
+  return ret;
+}
+
+>>>>>>> 7910d663d... step2
 OrcInsertDescData *orcBeginInsert(Relation rel,
                                   ResultRelSegFileInfo *segfileinfo) {
   OrcInsertDescData *insertDesc =
@@ -367,6 +495,14 @@ void orcReScan(ScanState *scanState) {
 OrcScanDescData *orcBeginRead(Relation rel, Snapshot snapshot, TupleDesc desc,
                               List *fileSplits, bool *colToReads,
                               void *pushDown) {
+  return orcBeginReadWithOptionsStr(rel, snapshot, desc, fileSplits, colToReads,
+                                    pushDown, "{}");
+}
+
+OrcScanDescData *orcBeginReadWithOptionsStr(Relation rel, Snapshot snapshot,
+                                            TupleDesc desc, List *fileSplits,
+                                            bool *colToReads, void *pushDown,
+                                            const char *optsStr) {
   OrcScanDescData *scanDesc = palloc0(sizeof(OrcScanDescData));
   OrcFormatData *orcFormatData = scanDesc->orcFormatData =
       palloc0(sizeof(OrcFormatData));
@@ -377,20 +513,30 @@ OrcScanDescData *orcBeginRead(Relation rel, Snapshot snapshot, TupleDesc desc,
     desc = RelationGetDescr(rel);
 
   scanDesc->rel = rel;
-  orcFormatData->fmt = ORCFormatNewORCFormatC("{}", 0);
+  orcFormatData->fmt = ORCFormatNewORCFormatC(optsStr, 0);
   initOrcFormatUserData(desc, orcFormatData);
 
-  int32 splitCount = list_length(fileSplits);
+  int32 splitCount = GetSplitCount(fileSplits, InvalidOid);
   ORCFormatFileSplit *splits = palloc0(sizeof(ORCFormatFileSplit) * splitCount);
   int32 filePathMaxLen = AOSegmentFilePathNameLen(rel) + 1;
-  for (int32 i = 0; i < splitCount; ++i) {
+  int32 orgSplitCount = list_length(fileSplits);
+  for (int32 i = 0, j = 0; i < orgSplitCount; ++i) {
     FileSplit split = (FileSplitNode *)list_nth(fileSplits, i);
-    splits[i].start = split->offsets;
-    splits[i].len = split->lengths;
-    splits[i].eof = split->logiceof;
-    splits[i].fileName = palloc0(filePathMaxLen);
-    MakeAOSegmentFileName(rel, split->segno, -1, dummyPlaceholder,
-                          splits[i].fileName);
+    if (IsValidFileSplit(split, InvalidOid)) {
+      splits[j].start = split->offsets;
+      splits[j].len = split->lengths;
+      splits[j].eof = split->logiceof;
+
+      if (split->ext_file_uri_string) {
+        // XXX(chiyang): hack way to manage split info manually
+        splits[j].fileName = split->ext_file_uri_string;
+      } else {
+        splits[j].fileName = palloc0(filePathMaxLen);
+        MakeAOSegmentFileName(rel, split->segno, -1, dummyPlaceholder,
+                              splits[j].fileName);
+      }
+      ++j;
+    }
   }
 
   if (splitCount > 0)
@@ -419,6 +565,108 @@ OrcScanDescData *orcBeginRead(Relation rel, Snapshot snapshot, TupleDesc desc,
   return scanDesc;
 }
 
+void orcIndexReadNext(OrcScanDescData *scanData, TupleTableSlot *slot,
+                      List *columnsInIndex) {
+  OrcFormatData *orcFormatData = scanData->orcFormatData;
+  bool *nulls = slot_get_isnull(slot);
+  bool *idxnulls = palloc0(sizeof(bool) * orcFormatData->numberOfColumns);
+  memset(idxnulls, true, orcFormatData->numberOfColumns);
+  Datum *values = slot_get_values(slot);
+  TupleDesc tupleDesc = slot->tts_tupleDescriptor;
+  int natts = tupleDesc->natts;
+  memset(nulls, true, natts);
+
+  uint64_t rowId;
+  bool res = ORCFormatNextORCFormatWithRowIdC(
+      orcFormatData->fmt, orcFormatData->colRawValues,
+      orcFormatData->colValLength, idxnulls, &rowId);
+
+  checkOrcError(orcFormatData);
+  int idx = 0;
+  if (res) {
+    for (int32_t i = 0; i < natts; ++i) {
+      // can't find the column in columnsInIndex
+      if (!list_member_oid(columnsInIndex, i + 1)) continue;
+      // index data is null
+      if (idxnulls[idx]) {
+        idx++;
+        continue;
+      }
+      nulls[i] = false;
+      switch (tupleDesc->attrs[i]->atttypid) {
+        case HAWQ_TYPE_BOOL: {
+          values[i] = BoolGetDatum(*(bool *)(orcFormatData->colRawValues[idx]));
+          break;
+        }
+        case HAWQ_TYPE_INT2: {
+          values[i] =
+              Int16GetDatum(*(int16_t *)(orcFormatData->colRawValues[idx]));
+          break;
+        }
+        case HAWQ_TYPE_INT4: {
+          values[i] =
+              Int32GetDatum(*(int32_t *)(orcFormatData->colRawValues[idx]));
+          break;
+        }
+        case HAWQ_TYPE_INT8:
+        case HAWQ_TYPE_TIME:
+        case HAWQ_TYPE_TIMESTAMP:
+        case HAWQ_TYPE_TIMESTAMPTZ: {
+          values[i] =
+              Int64GetDatum(*(int64_t *)(orcFormatData->colRawValues[idx]));
+          break;
+        }
+        case HAWQ_TYPE_FLOAT4: {
+          values[i] =
+              Float4GetDatum(*(float *)(orcFormatData->colRawValues[idx]));
+          break;
+        }
+        case HAWQ_TYPE_FLOAT8: {
+          values[i] =
+              Float8GetDatum(*(double *)(orcFormatData->colRawValues[idx]));
+          break;
+        }
+        case HAWQ_TYPE_DATE: {
+          values[i] =
+              Int32GetDatum(*(int32_t *)(orcFormatData->colRawValues[idx]) -
+                            POSTGRES_EPOCH_JDATE + UNIX_EPOCH_JDATE);
+          break;
+        }
+        default: {
+          // Check whether value[i] is fixed length udt.
+          bool isFixedLengthType =
+              tupleDesc->attrs[i]->attlen > 0 ? true : false;
+          bool isPassByVal = tupleDesc->attrs[i]->attbyval;
+          if (isFixedLengthType) {
+            if (isPassByVal) {  // pass by val
+              struct varlena *var =
+                  (struct varlena *)(orcFormatData->colRawValues[idx]);
+              uint32 valLen = *(uint32 *)(var->vl_len_);
+              memcpy((void *)&values[i], var->vl_dat, valLen);
+            } else {  // pass by pointer
+              SET_VARSIZE((struct varlena *)(orcFormatData->colRawValues[idx]),
+                          orcFormatData->colValLength[idx]);
+              values[i] = PointerGetDatum(orcFormatData->colRawValues[idx] +
+                                          sizeof(uint32_t));
+            }
+          } else {
+            SET_VARSIZE((struct varlena *)(orcFormatData->colRawValues[idx]),
+                        orcFormatData->colValLength[idx]);
+            values[i] = PointerGetDatum(orcFormatData->colRawValues[idx]);
+          }
+          break;
+        }
+      }
+      idx++;
+    }
+    TupSetVirtualTupleNValid(slot, slot->tts_tupleDescriptor->natts);
+    ItemPointerSetRowIdToFakeCtid(&scanData->cdb_fake_ctid, rowId);
+    slot_set_ctid(slot, &scanData->cdb_fake_ctid);
+  } else {
+    ExecClearTuple(slot);
+  }
+}
+
 void orcReadNext(OrcScanDescData *scanData, TupleTableSlot *slot) {
   OrcFormatData *orcFormatData = scanData->orcFormatData;
   bool *nulls = slot_get_isnull(slot);
@@ -914,7 +1162,7 @@ uint64 orcEndUpdate(OrcUpdateDescData *updateDesc) {
   return callback.processedTupleCount;
 }
 
-int64_t *orcCreateIndex(Relation rel, int idxId, List *segno, int64 *eof,
+int64_t *orcCreateIndex(Relation rel, Oid idxId, List *segno, int64 *eof,
                         List *columnsToRead, int sortIdx) {
   checkOushuDbExtensiveFeatureSupport("ORC INDEX");
   OrcScanDescData *scanDesc = palloc0(sizeof(OrcScanDescData));
@@ -939,6 +1187,10 @@ int64_t *orcCreateIndex(Relation rel, int idxId, List *segno, int64 *eof,
   for (int i = 0; i < sortIdx; i++) {
     sortIdxList[i] = list_nth_int(columnsToRead, i) - 1;
   }
+  int *segnoList = palloc0(sizeof(int) * splitCount);
+  for (int i = 0; i < splitCount; i++) {
+    segnoList[i] = list_nth_int(segno, i);
+  }
 
   ORCFormatFileSplit *splits = palloc0(sizeof(ORCFormatFileSplit) * splitCount);
   int32 filePathMaxLen = AOSegmentFilePathNameLen(rel) + 1;
@@ -954,7 +1206,148 @@ int64_t *orcCreateIndex(Relation rel, int idxId, List *segno, int64 *eof,
       idxId, splits, splitCount, eof, columnsToReadList, sortIdxList, sortIdx,
       orcFormatData->colNames, orcFormatData->colDatatypes,
       orcFormatData->colDatatypeMods, orcFormatData->numberOfColumns,
-      gp_session_id, rm_seg_tmp_dirs);
+      gp_session_id, rm_seg_tmp_dirs, segnoList);
+}
+
+static int OrcoidComparator(const void *arg1, const void *arg2) {
+  Oid oid1 = *(const Oid *)arg1;
+  Oid oid2 = *(const Oid *)arg2;
+
+  if (oid1 > oid2) return 1;
+  if (oid1 < oid2) return -1;
+  return 0;
+}
+
+void orcBeginIndexOnlyScan(ScanState *scanState, Oid idxId,
+                           List *columnsInIndex) {
+  Assert(scanState->scan_state == SCAN_INIT ||
+         scanState->scan_state == SCAN_DONE);
+  Assert(length(columnsInIndex) > 0);
+
+  Relation rel = scanState->ss_currentRelation;
+  int natts = rel->rd_att->natts;
+  bool *colToReads = palloc0(sizeof(bool) * natts);
+  GetNeededColumnsForScan((Node *)scanState->ps.plan->targetlist, colToReads,
+                          natts);
+  OrcIndexOnlyScan *plan =
+      (OrcIndexOnlyScan *)(((IndexScanState *)scanState)->ss.ps.plan);
+  GetNeededColumnsForScan((Node *)(plan->indexqualorig), colToReads, natts);
+
+  GetNeededColumnsForScan((Node *)scanState->ps.plan->qual, colToReads, natts);
+
+  ((IndexScanState *)scanState)->scandesc = orcBeginIndexOnlyRead(
+      rel, idxId, columnsInIndex, scanState->ps.state->es_snapshot, NULL,
+      scanState->splits, colToReads, scanState->ps.plan);
+
+  pfree(colToReads);
+  scanState->scan_state = SCAN_SCAN;
+}
+
+TupleTableSlot *orcIndexOnlyScanNext(ScanState *scanState) {
+  orcIndexReadNext(
+      ((IndexScanState *)scanState)->scandesc, scanState->ss_ScanTupleSlot,
+      ((IndexScan *)(((IndexScanState *)scanState)->ss.ps.plan))->idxColummns);
+  return scanState->ss_ScanTupleSlot;
+}
+
+void orcEndIndexOnlyScan(ScanState *scanState) {
+  if ((((IndexScanState *)scanState)->ss.scan_state & SCAN_SCAN) != 0) {
+    OrcScanDescData *scanDesc = ((IndexScanState *)scanState)->scandesc;
+
+    orcEndRead(scanDesc);
+
+    pfree(scanDesc);
+    scanState->scan_state = SCAN_INIT;
+  }
+}
+
+void orcIndexOnlyReScan(ScanState *scanState) {
+  orcResetRead(((IndexScanState *)scanState)->scandesc);
+}
+
+OrcScanDescData *orcBeginIndexOnlyRead(Relation rel, Oid idxId,
+                                       List *columnsInIndex, Snapshot snapshot,
+                                       TupleDesc desc, List *fileSplits,
+                                       bool *colToReads, void *pushDown) {
+  OrcScanDescData *scanDesc = palloc0(sizeof(OrcScanDescData));
+  OrcFormatData *orcFormatData = scanDesc->orcFormatData =
+      palloc0(sizeof(OrcFormatData));
+
+  RelationIncrementReferenceCount(rel);
+
+  if (desc == NULL) desc = RelationGetDescr(rel);
+
+  scanDesc->rel = rel;
+  orcFormatData->fmt = ORCFormatNewORCFormatC("{}", 0);
+  ORCFormatSetIndexFlag(orcFormatData->fmt);
+  bool *colToReadInIndex =
+      palloc0(sizeof(bool) * (list_length(columnsInIndex) + 1));
+  initOrcFormatIndexUserData(desc, orcFormatData, colToReads, columnsInIndex,
+                             colToReadInIndex);
+
+  int32 splitCount = GetSplitCount(fileSplits, idxId);
+  ORCFormatFileSplit *splits = palloc0(sizeof(ORCFormatFileSplit) * splitCount);
+  int lenOfIdxId = 0;
+  Oid idxCount = idxId;
+  while (idxCount) {
+    lenOfIdxId++;
+    idxCount /= 10;
+  }
+  if (lenOfIdxId == 0) lenOfIdxId = 1;
+  int32 filePathMaxLen = AOSegmentFilePathNameLen(rel) + lenOfIdxId + 2;
+  int32 orgSplitCount = list_length(fileSplits);
+  for (int32 i = 0, j = 0; i < orgSplitCount; ++i) {
+    FileSplit split = (FileSplitNode *)list_nth(fileSplits, i);
+    if (IsValidFileSplit(split, idxId)) {
+      splits[j].start = split->offsets;
+      splits[j].len = split->lengths;
+      splits[j].eof = split->logiceof;
+      splits[j].fileName = palloc0(filePathMaxLen);
+      MakeAOSegmentIndexFileName(rel, idxId, split->segno, -1, dummyPlaceholder,
+                                 splits[j].fileName);
+      ++j;
+    }
+  }
+
+  if (splitCount > 0) addFilesystemCredential(splits[0].fileName);
+
+  void *qualList = NULL;
+  CommonPlanContext ctx;
+  ctx.univplan = NULL;
+  Plan *plan = (Plan *)pushDown;
+
+  /*
+   * 1. the varattno of indexqualorig consistent with table columns info
+   * 2. the varattno of indexqualorig does not match the index file
+   * 3. adjust the "columnsInIndex" order
+   * 4. fix indexqualorig varattno utilization "columnsInIndex" in
+   * do_convert_expr_to_common_plan to be consistent with index file
+   */
+  int len = length(columnsInIndex);
+  Oid *value = palloc(len * sizeof(Oid));
+  for (int i = 0; i < len; ++i) {
+    value[i] = list_nth_oid(columnsInIndex, i);
+  }
+  qsort(value, len, sizeof(Oid), OrcoidComparator);
+  List *colIdxs = NIL;
+  for (int i = 0; i < len; ++i) {
+    colIdxs = lappend_oid(colIdxs, value[i]);
+  }
+  pfree(value);
+  qualList = convert_orcscan_indexqualorig_to_common_plan(plan, &ctx, colIdxs);
+
+  ORCFormatBeginORCFormatC(
+      orcFormatData->fmt, splits, splitCount, colToReadInIndex,
+      orcFormatData->colNames, orcFormatData->colDatatypes,
+      orcFormatData->colDatatypeMods, orcFormatData->numberOfColumns, qualList);
+  checkOrcError(orcFormatData);
+
+  ItemPointerSetInvalid(&scanDesc->cdb_fake_ctid);
+
+  for (int32 i = 0; i < splitCount; ++i) pfree(splits[i].fileName);
+  pfree(splits);
+
+  return scanDesc;
 }
 
 bool isDirectDispatch(Plan *plan) {
diff --git a/src/backend/access/orc/orcsegfiles.c b/src/backend/access/orc/orcsegfiles.c
index 8fea0a8..8a1a6a0 100644
--- a/src/backend/access/orc/orcsegfiles.c
+++ b/src/backend/access/orc/orcsegfiles.c
@@ -21,10 +21,14 @@
 
 #include "access/orcsegfiles.h"
 
+#include "access/aomd.h"
 #include "access/filesplit.h"
 #include "access/genam.h"
+#include "catalog/catalog.h"
 #include "nodes/relation.h"
+#include "cdb/cdbmetadatacache.h"
 #include "cdb/cdbvars.h"
+#include "storage/fd.h"
 #include "utils/builtins.h"
 #include "utils/fmgroids.h"
 
@@ -105,7 +109,31 @@ void insertOrcSegnoEntry(AppendOnlyEntry *aoEntry, int segNo, float8 tupleCount,
   heap_close(segRel, RowExclusiveLock);
 }
 
-void deleteOrcIndexFileInfo(AppendOnlyEntry *aoEntry, int idxOid)
+void deleteOrcIndexHdfsFiles(Relation rel, int32 segmentFileNum, int32 idx)
+{
+  RelFileNode rd_node = rel->rd_node;
+  char *basepath = relpath(rel->rd_node);
+  HdfsFileInfo *file_info;
+  char *path = (char*)palloc(MAXPGPATH + 1);
+
+  FormatAOSegmentIndexFileName(basepath, segmentFileNum, idx,  -1, 0, &segmentFileNum, path);
+
+  RemovePath(path, 0);
+
+  if (!IsLocalPath(path) && Gp_role == GP_ROLE_DISPATCH)
+  {
+    // Remove Hdfs block locations info in Metadata Cache
+    file_info = CreateHdfsFileInfo(rd_node, segmentFileNum);
+    LWLockAcquire(MetadataCacheLock, LW_EXCLUSIVE);
+    RemoveHdfsFileBlockLocations(file_info);
+    LWLockRelease(MetadataCacheLock);
+    DestroyHdfsFileInfo(file_info);
+  }
+
+  pfree(path);
+}
+
+void deleteOrcIndexFileInfo(Relation rel, AppendOnlyEntry *aoEntry, int idxOid)
 {
   if (aoEntry->blkdirrelid == 0) return;
   Relation segRel = heap_open(aoEntry->blkdirrelid, RowExclusiveLock);
@@ -118,6 +146,10 @@ void deleteOrcIndexFileInfo(AppendOnlyEntry *aoEntry, int idxOid)
   HeapTuple tuple;
   while ((tuple = systable_getnext(scan)))
   {
+    int segno = DatumGetInt32(fastgetattr(tuple, Anum_pg_orcseg_idx_segno, desc, NULL));
+    /* delete hdfs index files */
+    deleteOrcIndexHdfsFiles(rel, segno, idxOid);
+    /* delete catalog info */
     simple_heap_delete(segRel, &tuple->t_self);
   }
 
diff --git a/src/backend/bootstrap/bootparse.y b/src/backend/bootstrap/bootparse.y
index 94be0d3..21dc3e4 100755
--- a/src/backend/bootstrap/bootparse.y
+++ b/src/backend/bootstrap/bootparse.y
@@ -67,6 +67,14 @@
 #include "catalog/pg_type.h"
 #include "catalog/pg_user_mapping.h"
 #include "catalog/pg_tidycat.h"
+#include "catalog/skylon_elabel.h"
+#include "catalog/skylon_elabel_attribute.h"
+#include "catalog/skylon_graph_elabel.h"
+#include "catalog/skylon_graph_vlabel.h"
+#include "catalog/skylon_graph.h"
+#include "catalog/skylon_index.h"
+#include "catalog/skylon_vlabel.h"
+#include "catalog/skylon_vlabel_attribute.h"
 #include "catalog/toasting.h"
 #include "commands/defrem.h"
 #include "miscadmin.h"
@@ -320,6 +328,36 @@ Boot_CreateStmt:
 							case GpConfigHistoryRelationId:
 								typid = GP_CONFIGURATION_HISTORY_RELTYPE_OID;
 								break;
+/* relation id: 4850 - skylon_vlabel 20190603 */
+							case VlabelRelationId:
+								typid = SKYLON_VLABEL_RELTYPE_OID;
+/* relation id: 4851 - skylon_elabel 20200224 */
+							case ElabelRelationId:
+								typid = SKYLON_ELABEL_RELTYPE_OID;
+/* relation id: 4852 - skylon_vlabel_attribute 20200224 */
+							case VlabelAttrRelationId:
+								typid = SKYLON_VLABEL_ATTRIBUTE_RELTYPE_OID;
+								break;
+/* relation id: 4853 - skylon_elabel_attribute 20200224 */
+							case ElabelAttrRelationId:
+								typid = SKYLON_ELABEL_ATTRIBUTE_RELTYPE_OID;
+								break;
+/* relation id: 4854 - skylon_graph_vlabel 20200224 */
+							case GraphVlabelRelationId:
+								typid = SKYLON_GRAPH_VLABEL_RELTYPE_OID;
+								break;
+/* relation id: 4855 - skylon_graph_elabel 20200224 */
+							case GraphElabelRelationId:
+								typid = SKYLON_GRAPH_ELABEL_RELTYPE_OID;
+								break;
+/* relation id: 4856 - skylon_graph 20200224 */
+							case GraphRelationId:
+								typid = SKYLON_GRAPH_RELTYPE_OID;
+								break;
+/* relation id: 4857 - skylon_index 20200821 */
+							case SkylonIndexRelationId:
+								typid = SKYLON_INDEX_RELTYPE_OID;
+								break;
 /* relation id: 5029 - gp_db_interfaces 20101104 */
 							case GpDbInterfacesRelationId:
 								typid = GP_DB_INTERFACES_RELTYPE_OID;
diff --git a/src/backend/catalog/Makefile b/src/backend/catalog/Makefile
index f8302e9..24c8bdc 100644
--- a/src/backend/catalog/Makefile
+++ b/src/backend/catalog/Makefile
@@ -16,6 +16,7 @@ OBJS = catalog.o dependency.o heap.o index.o indexing.o namespace.o aclchk.o \
        pg_exttable.o pg_extprotocol.o pg_filesystem.o pg_largeobject.o pg_namespace.o \
        pg_operator.o pg_proc.o pg_proc_callback.o pg_shdepend.o \
        pg_type.o toasting.o aoseg.o \
+       skylon_vlabel_attribute.o skylon_vlabel.o skylon_elabel_attribute.o skylon_elabel.o skylon_graph.o skylon_graph_vlabel.o skylon_graph_elabel.o skylon_index.o \
        pg_attribute_encoding.o pg_compression.o quicklz_compression.o
 
 SUBDIRS = caql core external
@@ -108,6 +109,7 @@ POSTGRES_BKI_SRCS := $(addprefix $(top_srcdir)/src/include/catalog/,\
 	pg_database.h pg_tablespace.h pg_pltemplate.h \
 	pg_authid.h pg_auth_members.h pg_shdepend.h pg_shdescription.h pg_resqueue.h \
 	gp_configuration.h gp_policy.h gp_version.h \
+	skylon_vlabel.h skylon_elabel.h skylon_vlabel_attribute.h skylon_elabel_attribute.h skylon_graph_vlabel.h skylon_graph_elabel.h skylon_graph.h skylon_index.h \
 	gp_segment_config.h gp_san_config.h \
 	gp_verification_history.h \
 	pg_window.h \
diff --git a/src/backend/catalog/aclchk.c b/src/backend/catalog/aclchk.c
index d8e4b22..062b624 100644
--- a/src/backend/catalog/aclchk.c
+++ b/src/backend/catalog/aclchk.c
@@ -43,6 +43,8 @@
 #include "catalog/pg_filespace.h"
 #include "catalog/pg_filesystem.h"
 #include "catalog/pg_type.h"
+#include "catalog/skylon_graph_elabel.h"
+#include "catalog/skylon_graph_vlabel.h"
 #include "cdb/cdbpartition.h"
 #include "commands/dbcommands.h"
 #include "foreign/foreign.h"
@@ -84,6 +86,9 @@ static AclMode pg_aclmask(AclObjectKind objkind, Oid table_oid, Oid roleid,
 
 static bool is_sequence(Oid object_oid);
 
+extern char *graphVertexTableName(char *gname,char *vname);
+
+extern char *graphEdgeTableName(char *gname,char *ename);
 
 #ifdef ACLDEBUG
 static void
@@ -635,6 +640,76 @@ objectNamesToOids(GrantObjectType objtype, List *objnames)
 
 				relOid = RangeVarGetRelid(relvar, false, false /*allowHcatalog*/);
 				objects = lappend_oid(objects, relOid);
+
+				/*
+				 * If relval represents a graph, then we add all vertex and edge
+				 * tables into ojbnames such that someone who is being given
+				 * privileges to access the graph will be granted to access its
+				 * vertexs and edges.
+				 */
+
+				if (0 != caql_getcount(NULL,
+							cql("SELECT COUNT(*) FROM skylon_graph "
+								" WHERE graphname = :1 ",
+								CStringGetDatum(relvar->relname)))) {
+					cqContext cqc;
+					cqContext *pcqCtx;
+					Relation relation;
+					HeapTuple tuple;
+					Form_skylon_graph_elabel formElabel;
+					Form_skylon_graph_vlabel formVlabel;
+					Oid relid;
+					char *elabelTableName, *vlabelTableName;
+
+					/*
+					 * Try to get all edge tables.
+					 */
+					relation = heap_open(GraphElabelRelationId, AccessShareLock);
+
+					pcqCtx = caql_beginscan(
+						caql_addrel(cqclr(&cqc), relation),
+						cql("SELECT * FROM skylon_graph_elabel"
+							" WHERE graphname = :1",
+							CStringGetDatum(relvar->relname)));
+					while (HeapTupleIsValid(tuple = caql_getnext(pcqCtx))) {
+						formElabel = (Form_skylon_graph_elabel) GETSTRUCT(tuple);
+						elabelTableName = graphEdgeTableName(relvar->relname,
+																&(NameStr(formElabel->elabelname)[0]));
+						relid = RelnameGetRelid(elabelTableName);
+						pfree(elabelTableName);
+
+						if (OidIsValid(relid)) {
+							objects = lappend_oid(objects, relid);
+						}
+					}
+					caql_endscan(pcqCtx);
+					heap_close(relation, AccessShareLock);
+
+					/*
+					 * Try to get all vertex tables.
+					 */
+					relation = heap_open(GraphVlabelRelationId, AccessShareLock);
+
+					pcqCtx = caql_beginscan(
+						caql_addrel(cqclr(&cqc), relation),
+						cql("SELECT * FROM skylon_graph_vlabel"
+							" WHERE graphname = :1",
+							CStringGetDatum(relvar->relname)));
+
+					while (HeapTupleIsValid(tuple = caql_getnext(pcqCtx))) {
+						formVlabel = (Form_skylon_graph_vlabel) GETSTRUCT(tuple);
+						vlabelTableName = graphVertexTableName(relvar->relname,
+																&(NameStr(formVlabel->vlabelname)[0]));
+						relid = RelnameGetRelid(vlabelTableName);
+						pfree(vlabelTableName);
+
+						if (OidIsValid(relid)) {
+							objects = lappend_oid(objects, relid);
+						}
+					}
+					caql_endscan(pcqCtx);
+					heap_close(relation, AccessShareLock);
+				}
 			}
 			break;
 		case ACL_OBJECT_DATABASE:
@@ -2197,7 +2272,9 @@ static const char *const no_priv_msg[MAX_ACL_KIND] =
 	/* ACL_KIND_FOREIGN_SERVER */
 	gettext_noop("permission denied for foreign server %s"),
 	/* ACL_KIND_EXTPROTOCOL */
-	gettext_noop("permission denied for external protocol %s")	
+	gettext_noop("permission denied for external protocol %s"),
+	/* ACL_CLASS_GRAPH */
+	gettext_noop("permission denied for graph %s")
 };
 
 static const char *const not_owner_msg[MAX_ACL_KIND] =
@@ -2233,7 +2310,9 @@ static const char *const not_owner_msg[MAX_ACL_KIND] =
 	/* ACL_KIND_FOREIGN_SERVER */
 	gettext_noop("must be owner of foreign server %s"),
 	/* ACL_KIND_EXTPROTOCOL */
-	gettext_noop("must be owner of external protocol %s")
+	gettext_noop("must be owner of external protocol %s"),
+	/* ACL_CLASS_GRAPH */
+	gettext_noop("must be owner of graph %s")
 };
 
 
diff --git a/src/backend/catalog/catalog.c b/src/backend/catalog/catalog.c
index c7b1c47..0b97f95 100755
--- a/src/backend/catalog/catalog.c
+++ b/src/backend/catalog/catalog.c
@@ -14,7 +14,6 @@
  *
  *-------------------------------------------------------------------------
  */
-
 #include "postgres.h"
 
 #include <fcntl.h>
@@ -51,6 +50,15 @@
 #include "catalog/toasting.h"
 #include "catalog/gp_policy.h"
 
+#include "catalog/skylon_elabel.h"
+#include "catalog/skylon_elabel_attribute.h"
+#include "catalog/skylon_graph_elabel.h"
+#include "catalog/skylon_graph_vlabel.h"
+#include "catalog/skylon_graph.h"
+#include "catalog/skylon_index.h"
+#include "catalog/skylon_vlabel.h"
+#include "catalog/skylon_vlabel_attribute.h"
+
 #include "miscadmin.h"
 #include "storage/fd.h"
 #include "utils/fmgroids.h"
@@ -826,6 +834,20 @@ relationId == GpSanConfigRelationId ||
 relationId == GpConfigurationRelationId || 
 /* relation id: 5006 - gp_configuration_history 20101104 */
 relationId == GpConfigHistoryRelationId || 
+///* relation id: 4850 - pg_vlabel 20200224*/
+//relationId == VlabelRelationId ||
+///* relation id: 4851 - pg_elabel 20200224*/
+//relationId == ElabelRelationId ||
+///* relation id: 4852 - pg_vlabel_attribute 20200224*/
+//relationId == VlabelAttrRelationId ||
+///* relation id: 4853 - pg_elabel_attribute 20200224*/
+//relationId == ElabelAttrRelationId ||
+///* relation id: 4854 - pg_graph_vlabel 20200224*/
+//relationId == GraphVlabelRelationId ||
+///* relation id: 4855 - pg_graph_elabel 20200224*/
+//relationId == GraphElabelRelationId ||
+///* relation id: 4856 - pg_graph 20200224*/
+//relationId == GraphRelationId ||
 /* relation id: 5029 - gp_db_interfaces 20101104 */
 relationId == GpDbInterfacesRelationId || 
 /* relation id: 5030 - gp_interfaces 20101104 */
diff --git a/src/backend/catalog/core/catcoregen.py b/src/backend/catalog/core/catcoregen.py
index 327b560..0f05d3d 100644
--- a/src/backend/catalog/core/catcoregen.py
+++ b/src/backend/catalog/core/catcoregen.py
@@ -420,6 +420,14 @@ CatCoreTableTemplate = """
 #include "catalog/catcore.h"
 #include "catalog/catalog.h"
 #include "catalog/gp_configuration.h"
+#include "catalog/skylon_vlabel.h"
+#include "catalog/skylon_elabel.h"
+#include "catalog/skylon_vlabel_attribute.h"
+#include "catalog/skylon_elabel_attribute.h"
+#include "catalog/skylon_graph_vlabel.h"
+#include "catalog/skylon_graph_elabel.h"
+#include "catalog/skylon_graph.h"
+#include "catalog/skylon_index.h"
 #include "catalog/gp_master_mirroring.h"
 #include "catalog/gp_policy.h"
 #include "catalog/gp_san_config.h"
diff --git a/src/backend/catalog/dependency.c b/src/backend/catalog/dependency.c
index f70081a..606af47 100644
--- a/src/backend/catalog/dependency.c
+++ b/src/backend/catalog/dependency.c
@@ -50,6 +50,14 @@
 #include "catalog/pg_type.h"
 #include "catalog/pg_type_encoding.h"
 #include "catalog/pg_user_mapping.h"
+#include "catalog/skylon_elabel.h"
+#include "catalog/skylon_elabel_attribute.h"
+#include "catalog/skylon_graph.h"
+#include "catalog/skylon_graph_elabel.h"
+#include "catalog/skylon_graph_vlabel.h"
+#include "catalog/skylon_index.h"
+#include "catalog/skylon_vlabel.h"
+#include "catalog/skylon_vlabel_attribute.h"
 #include "cdb/cdbpartition.h"
 #include "commands/comment.h"
 #include "commands/dbcommands.h"
@@ -1070,6 +1078,18 @@ doDeletion(const ObjectAddress *object)
 			elog(NOTICE, "dependency: not yet implemented!");
 			break;
 			
+		case OCLASS_GRAPH:
+      RemoveGraphByOid(object->objectId, false);
+      break;
+
+		case OCLASS_VLABEL:
+		  RemoveVlabelByOid(object->objectId);
+		  break;
+
+		case OCLASS_ELABEL:
+		  RemoveElabelByOid(object->objectId);
+		  break;
+
 		default:
 			elog(ERROR, "unrecognized object class: %u",
 				 object->classId);
@@ -1796,6 +1816,18 @@ getObjectClass(const ObjectAddress *object)
 		case CompressionRelationId:
 			Assert(object->objectSubId == 0);
 			return OCLASS_COMPRESSION;
+
+    case GraphRelationId:
+      Assert(object->objectSubId == 0);
+      return OCLASS_GRAPH;
+
+    case VlabelRelationId:
+      Assert(object->objectSubId == 0);
+      return OCLASS_VLABEL;
+
+    case ElabelRelationId:
+      Assert(object->objectSubId == 0);
+      return OCLASS_ELABEL;
 	}
 
 	/* shouldn't get here */
@@ -2197,6 +2229,24 @@ getObjectDescription(const ObjectAddress *object)
 				elog(NOTICE, "NOT YET IMPLEMENTED");
 				break;
 			}
+    case OCLASS_GRAPH:
+      {
+        appendStringInfo(&buffer, _("graph %s"),
+                         RelidGetName(object->objectId));
+        break;
+      }
+    case OCLASS_VLABEL:
+      {
+        appendStringInfo(&buffer, _("vertex %s"),
+                         RelidGetName(object->objectId));
+        break;
+      }
+    case OCLASS_ELABEL:
+      {
+        appendStringInfo(&buffer, _("edge %s"),
+                         RelidGetName(object->objectId));
+        break;
+      }
 		default:
 			appendStringInfo(&buffer, "unrecognized object %u %u %d",
 							 object->classId,
diff --git a/src/backend/catalog/index.c b/src/backend/catalog/index.c
index 2966982..d32d360 100644
--- a/src/backend/catalog/index.c
+++ b/src/backend/catalog/index.c
@@ -1254,10 +1254,9 @@ index_drop(Oid indexId)
 						ObjectIdGetDatum(aoEntry->blkdirrelid))))
 		{
 			Assert(aoEntry != NULL);
-			deleteOrcIndexFileInfo(aoEntry, indexId);
+			deleteOrcIndexFileInfo(userHeapRelation, aoEntry, indexId);
 			pfree(aoEntry);
 		}
-		/* todo: need to dispatch drop index to clean index data */
 	}
 
 	/*
diff --git a/src/backend/catalog/namespace.c b/src/backend/catalog/namespace.c
index 98ef039..df3441e 100644
--- a/src/backend/catalog/namespace.c
+++ b/src/backend/catalog/namespace.c
@@ -34,6 +34,7 @@
 #include "catalog/pg_operator.h"
 #include "catalog/pg_proc.h"
 #include "catalog/pg_type.h"
+#include "catalog/skylon_graph.h"
 #include "commands/dbcommands.h"
 #include "commands/schemacmds.h"
 #include "miscadmin.h"
@@ -226,6 +227,54 @@ bool RelationExists(const RangeVar *relation, Oid dboid)
 	return OidIsValid(namespaceId) && OidIsValid(relId);
 }
 
+char *RelidGetName(Oid relid) {
+  Relation pgclassrel = heap_open(RelationRelationId, RowExclusiveLock);
+  cqContext cqctmp;
+  HeapTuple pgclasstup = caql_getfirst(
+      caql_addrel(cqclr(&cqctmp), pgclassrel),
+      cql("SELECT * FROM pg_class "
+          " WHERE oid = :1 ",
+          ObjectIdGetDatum(relid)));
+  if(!HeapTupleIsValid(pgclasstup))
+    return NULL;
+  Form_pg_class pgclassForm = (Form_pg_class) GETSTRUCT(pgclasstup);
+
+  char *name = pstrdup(NameStr(pgclassForm->relname));
+
+  heap_close(pgclassrel, RowExclusiveLock);
+  heap_freetuple(pgclasstup);
+  return name;
+}
+
+RangeVar *RelidGetRangeVar(Oid relid) {
+  Relation pgclassrel = heap_open(RelationRelationId, RowExclusiveLock);
+  cqContext cqctmp;
+  HeapTuple pgclasstup = caql_getfirst(
+      caql_addrel(cqclr(&cqctmp), pgclassrel),
+      cql("SELECT * FROM pg_class "
+          " WHERE oid = :1 ",
+          ObjectIdGetDatum(relid)));
+  Form_pg_class pgclassForm = (Form_pg_class) GETSTRUCT(pgclasstup);
+  Relation pgnmrel = heap_open(NamespaceRelationId, RowExclusiveLock);
+  HeapTuple pgnmtup = caql_getfirst(
+      caql_addrel(cqclr(&cqctmp), pgnmrel),
+      cql("SELECT * FROM pg_namespace "
+          " WHERE oid = :1 ",
+          ObjectIdGetDatum(pgclassForm->relnamespace)));
+  Form_pg_namespace pgnmForm = (Form_pg_namespace) GETSTRUCT(pgnmtup);
+
+  RangeVar *rangeVar = makeRangeVar(NULL,
+                                    pstrdup(NameStr(pgnmForm->nspname)),
+                                    pstrdup(NameStr(pgclassForm->relname)),
+                                    -1);
+
+  heap_close(pgnmrel, RowExclusiveLock);
+  heap_freetuple(pgnmtup);
+  heap_close(pgclassrel, RowExclusiveLock);
+  heap_freetuple(pgclasstup);
+  return rangeVar;
+}
+
 /*
  * RangeVarGetRelid
  *		Given a RangeVar describing an existing relation,
@@ -2003,6 +2052,28 @@ FindDefaultConversionProc(int4 for_encoding, int4 to_encoding)
 	return InvalidOid;
 }
 
+char* findGraphSchema(char *graph) {
+  char *schema = NULL;
+  recomputeNamespacePath();
+  ListCell *l;
+  cqContext cqc;
+  Relation skylon_graph_rel = heap_open(GraphRelationId, RowExclusiveLock);
+  foreach(l, namespaceSearchPath)
+  {
+    Oid namespaceId = lfirst_oid(l);
+    char *thisschema = get_namespace_name(namespaceId);
+    if (0 < caql_getcount(
+          caql_addrel(cqclr(&cqc), skylon_graph_rel),
+          cql("SELECT COUNT(*) FROM skylon_graph "
+            " WHERE graphname = :1 AND schemaname = :2",
+            CStringGetDatum(graph), CStringGetDatum(thisschema)))){
+      schema = thisschema;
+      break;
+    }
+  }
+  heap_close(skylon_graph_rel, RowExclusiveLock);
+  return schema;
+}
 
 
 /*
diff --git a/src/backend/catalog/skylon_elabel.c b/src/backend/catalog/skylon_elabel.c
new file mode 100644
index 0000000..921576f
--- /dev/null
+++ b/src/backend/catalog/skylon_elabel.c
@@ -0,0 +1,62 @@
+////////////////////////////////////////////////////////////////////////////
+// Copyright 2016, Oushu Inc.
+// All rights reserved.
+//
+// Author:
+////////////////////////////////////////////////////////////////////////////
+
+#include "postgres.h"
+
+#include "access/fileam.h"
+#include "access/genam.h"
+#include "access/heapam.h"
+#include "catalog/catquery.h"
+#include "catalog/dependency.h"
+#include "catalog/indexing.h"
+#include "catalog/pg_proc.h"
+#include "catalog/pg_type.h"
+#include "catalog/skylon_elabel.h"
+#include "mb/pg_wchar.h"
+#include "utils/array.h"
+#include "utils/builtins.h"
+#include "utils/fmgroids.h"
+#include "utils/lsyscache.h"
+#include "utils/syscache.h"
+#include "utils/uri.h"
+
+void InsertElabelEntry(const char* elabelname, const char* schemaname,
+                       const char* fromvlabel, const char* tovlabel) {
+  Relation skylon_elabel_rel;
+  HeapTuple skylon_elabel_tuple = NULL;
+  bool nulls[Natts_skylon_elabel];
+  Datum values[Natts_skylon_elabel];
+  cqContext cqc;
+  cqContext* pcqCtx;
+
+  MemSet(values, 0, sizeof(values));
+  MemSet(nulls, false, sizeof(nulls));
+
+  /*
+   * Open and lock the pg_exttable catalog.
+   */
+  skylon_elabel_rel = heap_open(ElabelRelationId, RowExclusiveLock);
+
+  pcqCtx = caql_beginscan(caql_addrel(cqclr(&cqc), skylon_elabel_rel),
+                          cql("INSERT INTO skylon_elabel", NULL));
+  NameData name1;
+  namestrcpy(&name1, elabelname);
+  values[Anum_skylon_elabel_elabelname - 1] = NameGetDatum(&name1);
+  NameData name2;
+  namestrcpy(&name2, schemaname);
+  values[Anum_skylon_elabel_schemaname - 1] = NameGetDatum(&name2);
+  NameData name3;
+  namestrcpy(&name3, fromvlabel);
+  values[Anum_skylon_elabel_fromvlabel - 1] = NameGetDatum(&name3);
+  NameData name4;
+  namestrcpy(&name4, tovlabel);
+  values[Anum_skylon_elabel_tovlabel - 1] = NameGetDatum(&name4);
+  skylon_elabel_tuple = caql_form_tuple(pcqCtx, values, nulls);
+  caql_insert(pcqCtx, skylon_elabel_tuple);
+  caql_endscan(pcqCtx);
+  heap_close(skylon_elabel_rel, RowExclusiveLock);
+}
diff --git a/src/backend/catalog/skylon_elabel_attribute.c b/src/backend/catalog/skylon_elabel_attribute.c
new file mode 100644
index 0000000..bf934c8
--- /dev/null
+++ b/src/backend/catalog/skylon_elabel_attribute.c
@@ -0,0 +1,66 @@
+////////////////////////////////////////////////////////////////////////////
+// Copyright 2016, Oushu Inc.
+// All rights reserved.
+//
+// Author:
+////////////////////////////////////////////////////////////////////////////
+
+#include "postgres.h"
+
+#include "access/fileam.h"
+#include "access/genam.h"
+#include "access/heapam.h"
+#include "catalog/catquery.h"
+#include "catalog/dependency.h"
+#include "catalog/indexing.h"
+#include "catalog/pg_proc.h"
+#include "catalog/pg_type.h"
+#include "catalog/skylon_elabel_attribute.h"
+#include "mb/pg_wchar.h"
+#include "utils/array.h"
+#include "utils/builtins.h"
+#include "utils/fmgroids.h"
+#include "utils/lsyscache.h"
+#include "utils/syscache.h"
+#include "utils/uri.h"
+
+void InsertElabelAttrEntry(const char* schemaname, const char* elabelname, const char* attrname,
+                           Oid attrtypid, int4 primaryrank, int4 rank) {
+  Relation skylon_elabel_attribute_rel;
+  HeapTuple skylon_elabel_attribute_tuple = NULL;
+  bool nulls[Natts_skylon_elabel_attribute];
+  Datum values[Natts_skylon_elabel_attribute];
+  cqContext cqc;
+  cqContext* pcqCtx;
+
+  MemSet(values, 0, sizeof(values));
+  MemSet(nulls, false, sizeof(nulls));
+
+  /*
+   * Open and lock the pg_exttable catalog.
+   */
+  skylon_elabel_attribute_rel =
+      heap_open(ElabelAttrRelationId, RowExclusiveLock);
+
+  pcqCtx = caql_beginscan(caql_addrel(cqclr(&cqc), skylon_elabel_attribute_rel),
+                          cql("INSERT INTO skylon_elabel_attribute", NULL));
+  NameData name0;
+  namestrcpy(&name0, schemaname);
+  values[Anum_skylon_elabel_attribute_schemaname - 1] = NameGetDatum(&name0);
+  NameData name1;
+  namestrcpy(&name1, elabelname);
+  values[Anum_skylon_elabel_attribute_elabelname - 1] = NameGetDatum(&name1);
+  NameData name2;
+  namestrcpy(&name2, attrname);
+  values[Anum_skylon_elabel_attribute_attrname - 1] = NameGetDatum(&name2);
+  values[Anum_skylon_elabel_attribute_attrtypid - 1] =
+      ObjectIdGetDatum(attrtypid);
+  values[Anum_skylon_elabel_attribute_primaryrank - 1] = Int32GetDatum(primaryrank);
+  values[Anum_skylon_elabel_attribute_rank - 1] = Int32GetDatum(rank);
+  skylon_elabel_attribute_tuple = caql_form_tuple(pcqCtx, values, nulls);
+  caql_insert(pcqCtx, skylon_elabel_attribute_tuple);
+  caql_endscan(pcqCtx);
+  heap_close(skylon_elabel_attribute_rel, RowExclusiveLock);
+}
+
+
diff --git a/src/backend/catalog/skylon_graph.c b/src/backend/catalog/skylon_graph.c
new file mode 100644
index 0000000..275123b
--- /dev/null
+++ b/src/backend/catalog/skylon_graph.c
@@ -0,0 +1,60 @@
+////////////////////////////////////////////////////////////////////////////
+// Copyright 2016, Oushu Inc.
+// All rights reserved.
+//
+// Author:
+////////////////////////////////////////////////////////////////////////////
+
+#include "postgres.h"
+
+#include "access/fileam.h"
+#include "access/genam.h"
+#include "access/heapam.h"
+#include "catalog/catquery.h"
+#include "catalog/dependency.h"
+#include "catalog/indexing.h"
+#include "catalog/pg_proc.h"
+#include "catalog/pg_type.h"
+#include "catalog/skylon_graph.h"
+#include "mb/pg_wchar.h"
+#include "utils/array.h"
+#include "utils/builtins.h"
+#include "utils/fmgroids.h"
+#include "utils/lsyscache.h"
+#include "utils/syscache.h"
+#include "utils/uri.h"
+
+void
+InsertGraphEntry(const char* graphname,
+                  const char* schemaname){
+  Relation  skylon_graph_rel;
+  HeapTuple skylon_graph_tuple = NULL;
+  bool    nulls[Natts_skylon_graph];
+  Datum   values[Natts_skylon_graph];
+  cqContext cqc;
+  cqContext  *pcqCtx;
+
+  MemSet(values, 0, sizeof(values));
+  MemSet(nulls, false, sizeof(nulls));
+
+    /*
+     * Open and lock the pg_exttable catalog.
+     */
+  skylon_graph_rel = heap_open(GraphRelationId, RowExclusiveLock);
+
+  pcqCtx = caql_beginscan(
+      caql_addrel(cqclr(&cqc), skylon_graph_rel),
+      cql("INSERT INTO skylon_graph",
+        NULL));
+  NameData  name1;
+  namestrcpy(&name1, graphname);
+  values[Anum_skylon_graph_graphname - 1] = NameGetDatum(&name1);
+  NameData  name2;
+  namestrcpy(&name2, schemaname);
+  values[Anum_skylon_graph_schemaname - 1] = NameGetDatum(&name2);
+  skylon_graph_tuple = caql_form_tuple(pcqCtx, values, nulls);
+  caql_insert(pcqCtx, skylon_graph_tuple);
+  caql_endscan(pcqCtx);
+  heap_close(skylon_graph_rel, RowExclusiveLock);
+}
+
diff --git a/src/backend/catalog/skylon_graph_elabel.c b/src/backend/catalog/skylon_graph_elabel.c
new file mode 100644
index 0000000..f16ae75
--- /dev/null
+++ b/src/backend/catalog/skylon_graph_elabel.c
@@ -0,0 +1,59 @@
+////////////////////////////////////////////////////////////////////////////
+// Copyright 2016, Oushu Inc.
+// All rights reserved.
+//
+// Author:
+////////////////////////////////////////////////////////////////////////////
+
+#include "postgres.h"
+
+#include "access/fileam.h"
+#include "access/genam.h"
+#include "access/heapam.h"
+#include "catalog/catquery.h"
+#include "catalog/dependency.h"
+#include "catalog/indexing.h"
+#include "catalog/pg_proc.h"
+#include "catalog/pg_type.h"
+#include "catalog/skylon_graph_elabel.h"
+#include "mb/pg_wchar.h"
+#include "utils/array.h"
+#include "utils/builtins.h"
+#include "utils/fmgroids.h"
+#include "utils/lsyscache.h"
+#include "utils/syscache.h"
+#include "utils/uri.h"
+
+void InsertGraphElabelEntry(const char* schemaname, const char* graphname, const char* elabelname, Oid reloid) {
+  Relation skylon_graph_elabel_rel;
+  HeapTuple skylon_graph_elabel_tuple = NULL;
+  bool nulls[Natts_skylon_graph_elabel];
+  Datum values[Natts_skylon_graph_elabel];
+  cqContext cqc;
+  cqContext* pcqCtx;
+
+  MemSet(values, 0, sizeof(values));
+  MemSet(nulls, false, sizeof(nulls));
+
+  /*
+   * Open and lock the pg_exttable catalog.
+   */
+  skylon_graph_elabel_rel = heap_open(GraphElabelRelationId, RowExclusiveLock);
+
+  pcqCtx = caql_beginscan(caql_addrel(cqclr(&cqc), skylon_graph_elabel_rel),
+                          cql("INSERT INTO skylon_graph_elabel", NULL));
+  NameData name0;
+  namestrcpy(&name0, schemaname);
+  values[Anum_skylon_graph_elabel_schemaname - 1] = NameGetDatum(&name0);
+  NameData name1;
+  namestrcpy(&name1, graphname);
+  values[Anum_skylon_graph_elabel_graphname - 1] = NameGetDatum(&name1);
+  NameData name2;
+  namestrcpy(&name2, elabelname);
+  values[Anum_skylon_graph_elabel_elabelname - 1] = NameGetDatum(&name2);
+  values[Anum_skylon_graph_elabel_reloid - 1] = ObjectIdGetDatum(reloid);
+  skylon_graph_elabel_tuple = caql_form_tuple(pcqCtx, values, nulls);
+  caql_insert(pcqCtx, skylon_graph_elabel_tuple);
+  caql_endscan(pcqCtx);
+  heap_close(skylon_graph_elabel_rel, RowExclusiveLock);
+}
diff --git a/src/backend/catalog/skylon_graph_vlabel.c b/src/backend/catalog/skylon_graph_vlabel.c
new file mode 100644
index 0000000..2b7f0b8
--- /dev/null
+++ b/src/backend/catalog/skylon_graph_vlabel.c
@@ -0,0 +1,59 @@
+////////////////////////////////////////////////////////////////////////////
+// Copyright 2016, Oushu Inc.
+// All rights reserved.
+//
+// Author:
+////////////////////////////////////////////////////////////////////////////
+
+#include "postgres.h"
+
+#include "access/fileam.h"
+#include "access/genam.h"
+#include "access/heapam.h"
+#include "catalog/catquery.h"
+#include "catalog/dependency.h"
+#include "catalog/indexing.h"
+#include "catalog/pg_proc.h"
+#include "catalog/pg_type.h"
+#include "catalog/skylon_graph_vlabel.h"
+#include "mb/pg_wchar.h"
+#include "utils/array.h"
+#include "utils/builtins.h"
+#include "utils/fmgroids.h"
+#include "utils/lsyscache.h"
+#include "utils/syscache.h"
+#include "utils/uri.h"
+
+void InsertGraphVlabelEntry(const char* schemaname, const char* graphname, const char* vlabelname, Oid reloid) {
+  Relation skylon_graph_vlabel_rel;
+  HeapTuple skylon_graph_vlabel_tuple = NULL;
+  bool nulls[Natts_skylon_graph_vlabel];
+  Datum values[Natts_skylon_graph_vlabel];
+  cqContext cqc;
+  cqContext* pcqCtx;
+
+  MemSet(values, 0, sizeof(values));
+  MemSet(nulls, false, sizeof(nulls));
+
+  /*
+   * Open and lock the pg_exttable catalog.
+   */
+  skylon_graph_vlabel_rel = heap_open(GraphVlabelRelationId, RowExclusiveLock);
+
+  pcqCtx = caql_beginscan(caql_addrel(cqclr(&cqc), skylon_graph_vlabel_rel),
+                          cql("INSERT INTO skylon_graph_vlabel", NULL));
+  NameData name0;
+  namestrcpy(&name0, schemaname);
+  values[Anum_skylon_graph_vlabel_schemaname - 1] = NameGetDatum(&name0);
+  NameData name1;
+  namestrcpy(&name1, graphname);
+  values[Anum_skylon_graph_vlabel_graphname - 1] = NameGetDatum(&name1);
+  NameData name2;
+  namestrcpy(&name2, vlabelname);
+  values[Anum_skylon_graph_vlabel_vlabelname - 1] = NameGetDatum(&name2);
+  values[Anum_skylon_graph_vlabel_reloid - 1] = ObjectIdGetDatum(reloid);
+  skylon_graph_vlabel_tuple = caql_form_tuple(pcqCtx, values, nulls);
+  caql_insert(pcqCtx, skylon_graph_vlabel_tuple);
+  caql_endscan(pcqCtx);
+  heap_close(skylon_graph_vlabel_rel, RowExclusiveLock);
+}
diff --git a/src/backend/catalog/skylon_index.c b/src/backend/catalog/skylon_index.c
new file mode 100644
index 0000000..7a0c98b
--- /dev/null
+++ b/src/backend/catalog/skylon_index.c
@@ -0,0 +1,73 @@
+////////////////////////////////////////////////////////////////////////////
+// Copyright 2016, Oushu Inc.
+// All rights reserved.
+//
+// Author:
+////////////////////////////////////////////////////////////////////////////
+
+
+#include "postgres.h"
+
+#include "catalog/skylon_index.h"
+#include "catalog/pg_type.h"
+#include "catalog/pg_proc.h"
+#include "access/genam.h"
+#include "catalog/catquery.h"
+#include "access/fileam.h"
+#include "access/heapam.h"
+#include "catalog/dependency.h"
+#include "catalog/indexing.h"
+#include "mb/pg_wchar.h"
+#include "utils/array.h"
+#include "utils/builtins.h"
+#include "utils/lsyscache.h"
+#include "utils/syscache.h"
+#include "utils/fmgroids.h"
+#include "utils/uri.h"
+
+void InsertSkylonIndexEntry(const char* schemaname , const char* graphname,
+                       const char* elename, char indextype, const char* indexname,
+                       const int2* indexkeys, int indexkeysnum, const int2* includekeys, int includekeysnum) {
+  Relation  skylon_index_rel;
+  HeapTuple skylon_index_tuple = NULL;
+  bool    nulls[Natts_skylon_index];
+  Datum   values[Natts_skylon_index];
+  cqContext cqc;
+  cqContext  *pcqCtx;
+
+  MemSet(values, 0, sizeof(values));
+  MemSet(nulls, false, sizeof(nulls));
+
+  skylon_index_rel = heap_open(SkylonIndexRelationId, RowExclusiveLock);
+
+  pcqCtx = caql_beginscan(
+      caql_addrel(cqclr(&cqc), skylon_index_rel),
+      cql("INSERT INTO skylon_index",
+        NULL));
+  NameData  name1;
+  namestrcpy(&name1, schemaname);
+  values[Anum_skylon_index_schemaname - 1] = NameGetDatum(&name1);
+  NameData  name2;
+  namestrcpy(&name2, graphname);
+  values[Anum_skylon_index_graphname - 1] = NameGetDatum(&name2);
+  NameData  name3;
+  namestrcpy(&name3, elename);
+  values[Anum_skylon_index_elename - 1] = NameGetDatum(&name3);
+  NameData  name4;
+  namestrcpy(&name4, indexname);
+  values[Anum_skylon_index_indexname - 1] = NameGetDatum(&name4);
+  values[Anum_skylon_index_indextype - 1] = CharGetDatum(indextype);
+  int2vector *indkeys = buildint2vector(NULL, indexkeysnum);
+  for (int i = 0; i < indexkeysnum; i++)
+    indkeys->values[i] = indexkeys[i];
+  values[Anum_skylon_index_indexkeys - 1] = PointerGetDatum(indkeys);
+  int2vector *incldkeys = buildint2vector(NULL, includekeysnum);
+  for (int i = 0; i < includekeysnum; i++)
+    incldkeys->values[i] = includekeys[i];
+  values[Anum_skylon_index_includekeys - 1] = PointerGetDatum(incldkeys);
+
+  skylon_index_tuple = caql_form_tuple(pcqCtx, values, nulls);
+  caql_insert(pcqCtx, skylon_index_tuple);
+  caql_endscan(pcqCtx);
+  heap_close(skylon_index_rel, RowExclusiveLock);
+}
diff --git a/src/backend/catalog/skylon_vlabel.c b/src/backend/catalog/skylon_vlabel.c
new file mode 100644
index 0000000..89770f7
--- /dev/null
+++ b/src/backend/catalog/skylon_vlabel.c
@@ -0,0 +1,60 @@
+////////////////////////////////////////////////////////////////////////////
+// Copyright 2016, Oushu Inc.
+// All rights reserved.
+//
+// Author:
+////////////////////////////////////////////////////////////////////////////
+
+
+#include "postgres.h"
+
+#include "catalog/skylon_vlabel.h"
+#include "catalog/pg_type.h"
+#include "catalog/pg_proc.h"
+#include "access/genam.h"
+#include "catalog/catquery.h"
+#include "access/fileam.h"
+#include "access/heapam.h"
+#include "catalog/dependency.h"
+#include "catalog/indexing.h"
+#include "mb/pg_wchar.h"
+#include "utils/array.h"
+#include "utils/builtins.h"
+#include "utils/lsyscache.h"
+#include "utils/syscache.h"
+#include "utils/fmgroids.h"
+#include "utils/uri.h"
+
+void
+InsertVlabelEntry(const char* vlabelname,
+                  const char* schemaname){
+  Relation  skylon_vlabel_rel;
+  HeapTuple skylon_vlabel_tuple = NULL;
+  bool    nulls[Natts_skylon_vlabel];
+  Datum   values[Natts_skylon_vlabel];
+  cqContext cqc;
+  cqContext  *pcqCtx;
+
+  MemSet(values, 0, sizeof(values));
+  MemSet(nulls, false, sizeof(nulls));
+
+    /*
+     * Open and lock the pg_exttable catalog.
+     */
+  skylon_vlabel_rel = heap_open(VlabelRelationId, RowExclusiveLock);
+
+  pcqCtx = caql_beginscan(
+      caql_addrel(cqclr(&cqc), skylon_vlabel_rel),
+      cql("INSERT INTO skylon_vlabel",
+        NULL));
+  NameData  name1;
+  namestrcpy(&name1, vlabelname);
+  values[Anum_skylon_vlabel_vlabelname - 1] = NameGetDatum(&name1);
+  NameData  name2;
+  namestrcpy(&name2, schemaname);
+  values[Anum_skylon_vlabel_schemaname - 1] = NameGetDatum(&name2);
+  skylon_vlabel_tuple = caql_form_tuple(pcqCtx, values, nulls);
+  caql_insert(pcqCtx, skylon_vlabel_tuple);
+  caql_endscan(pcqCtx);
+  heap_close(skylon_vlabel_rel, RowExclusiveLock);
+}
diff --git a/src/backend/catalog/skylon_vlabel_attribute.c b/src/backend/catalog/skylon_vlabel_attribute.c
new file mode 100644
index 0000000..da5fee9
--- /dev/null
+++ b/src/backend/catalog/skylon_vlabel_attribute.c
@@ -0,0 +1,64 @@
+////////////////////////////////////////////////////////////////////////////
+// Copyright 2016, Oushu Inc.
+// All rights reserved.
+//
+// Author:
+////////////////////////////////////////////////////////////////////////////
+
+#include "postgres.h"
+
+#include "access/fileam.h"
+#include "access/genam.h"
+#include "access/heapam.h"
+#include "catalog/catquery.h"
+#include "catalog/dependency.h"
+#include "catalog/indexing.h"
+#include "catalog/pg_proc.h"
+#include "catalog/pg_type.h"
+#include "catalog/skylon_vlabel_attribute.h"
+#include "mb/pg_wchar.h"
+#include "utils/array.h"
+#include "utils/builtins.h"
+#include "utils/fmgroids.h"
+#include "utils/lsyscache.h"
+#include "utils/syscache.h"
+#include "utils/uri.h"
+
+void InsertVlabelAttrEntry(const char* schemaname, const char* vlabelname, const char* attrname,
+                           Oid attrtypid, int4 primaryrank, int4 rank) {
+  Relation skylon_vlabel_attribute_rel;
+  HeapTuple skylon_vlabel_attribute_tuple = NULL;
+  bool nulls[Natts_skylon_vlabel_attribute];
+  Datum values[Natts_skylon_vlabel_attribute];
+  cqContext cqc;
+  cqContext* pcqCtx;
+
+  MemSet(values, 0, sizeof(values));
+  MemSet(nulls, false, sizeof(nulls));
+
+  /*
+   * Open and lock the pg_exttable catalog.
+   */
+  skylon_vlabel_attribute_rel =
+      heap_open(VlabelAttrRelationId, RowExclusiveLock);
+
+  pcqCtx = caql_beginscan(caql_addrel(cqclr(&cqc), skylon_vlabel_attribute_rel),
+                          cql("INSERT INTO skylon_vlabel_attribute", NULL));
+  NameData name0;
+  namestrcpy(&name0, schemaname);
+  values[Anum_skylon_vlabel_attribute_schemaname - 1] = NameGetDatum(&name0);
+  NameData name1;
+  namestrcpy(&name1, vlabelname);
+  values[Anum_skylon_vlabel_attribute_vlabelname - 1] = NameGetDatum(&name1);
+  NameData name2;
+  namestrcpy(&name2, attrname);
+  values[Anum_skylon_vlabel_attribute_attrname - 1] = NameGetDatum(&name2);
+  values[Anum_skylon_vlabel_attribute_attrtypid - 1] =
+      ObjectIdGetDatum(attrtypid);
+  values[Anum_skylon_vlabel_attribute_primaryrank - 1] = Int32GetDatum(primaryrank);
+  values[Anum_skylon_vlabel_attribute_rank - 1] = Int32GetDatum(rank);
+  skylon_vlabel_attribute_tuple = caql_form_tuple(pcqCtx, values, nulls);
+  caql_insert(pcqCtx, skylon_vlabel_attribute_tuple);
+  caql_endscan(pcqCtx);
+  heap_close(skylon_vlabel_attribute_rel, RowExclusiveLock);
+}
diff --git a/src/backend/catalog/system_views.sql b/src/backend/catalog/system_views.sql
index 091f1a6..8ec420c 100644
--- a/src/backend/catalog/system_views.sql
+++ b/src/backend/catalog/system_views.sql
@@ -855,3 +855,32 @@ CREATE VIEW pg_remote_logins AS
 		 LEFT JOIN pg_authid A ON (A.oid = C.rcowner);
 
 REVOKE ALL ON pg_remote_credentials FROM public;
+
+CREATE VIEW skylon_vertex AS
+	SELECT n.vlabelname AS vertexname, n.schemaname AS schemaname FROM skylon_vlabel AS n;
+	
+CREATE VIEW skylon_edge AS
+	SELECT n.elabelname AS edgename, n.schemaname AS schemaname, n.fromvlabel AS fromvertex, n.tovlabel AS tovertex
+	FROM skylon_elabel AS n;
+	
+CREATE VIEW skylon_vertex_attribute AS
+	SELECT n.schemaname AS schemaname, n.vlabelname AS vertexname, 
+	n.attrname AS attrname, n.attrtypid AS attrtypid, n.primaryrank AS primaryrank, n.rank AS rank
+	FROM skylon_vlabel_attribute AS n;
+	
+CREATE VIEW skylon_edge_attribute AS
+	SELECT n.schemaname AS schemaname, n.elabelname AS edgename, 
+	n.attrname AS attrname, n.attrtypid AS attrtypid, n.primaryrank AS primaryrank, n.rank AS rank
+	FROM skylon_elabel_attribute AS n;
+	
+CREATE VIEW skylon_graph_vertex AS
+	SELECT n.schemaname AS schemaname, n.graphname AS graphname, n.vlabelname AS vertexname,
+	m.location AS location, l.relname
+	FROM skylon_graph_vlabel AS n, pg_exttable AS m, pg_class AS l
+	WHERE n.reloid = m.reloid AND n.reloid = l.oid;
+	
+CREATE VIEW skylon_graph_edge AS
+	SELECT n.schemaname AS schemaname, n.graphname AS graphname, n.elabelname AS edgename,
+	m.location AS location, l.relname
+	FROM skylon_graph_elabel AS n, pg_exttable AS m, pg_class AS l
+	WHERE n.reloid = m.reloid AND n.reloid = l.oid;
diff --git a/src/backend/cdb/Makefile b/src/backend/cdb/Makefile
index 24808ed..1803d70 100644
--- a/src/backend/cdb/Makefile
+++ b/src/backend/cdb/Makefile
@@ -85,7 +85,6 @@ OBJS = cdbanalyze.o \
 	   cdbdatalocality.o \
 	   dispatcher.o \
 	   dispatcher_mgt.o \
-	   scheduler.o \
 	   workermgr.o \
 	   executormgr.o \
 	   poolmgr.o \
diff --git a/src/backend/cdb/cdbdatalocality.c b/src/backend/cdb/cdbdatalocality.c
index 5a6615d..67704b8 100644
--- a/src/backend/cdb/cdbdatalocality.c
+++ b/src/backend/cdb/cdbdatalocality.c
@@ -6,9 +6,9 @@
  * to you under the Apache License, Version 2.0 (the
  * "License"); you may not use this file except in compliance
  * with the License.  You may obtain a copy of the License at
- * 
+ *
  *   http://www.apache.org/licenses/LICENSE-2.0
- * 
+ *
  * Unless required by applicable law or agreed to in writing,
  * software distributed under the License is distributed on an
  * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
@@ -102,6 +102,9 @@ typedef struct HostnameIndexEntry {
  */
 typedef struct collect_scan_rangetable_context {
 	plan_tree_base_prefix base;
+	List *indexscan_range_tables;  // range table for index only scan
+	List *indexscan_indexs;  // index oid for range table
+	List *parquetscan_range_tables; // range table for parquet scan
 	List *range_tables; // range table for scan only
 	List *full_range_tables;  // full range table
 } collect_scan_rangetable_context;
@@ -428,11 +431,17 @@ static void AOGetSegFileDataLocation(Relation relation,
 		int* allblocks, GpPolicy *targetPolicy);
 
 static void ParquetGetSegFileDataLocation(Relation relation,
-		AppendOnlyEntry *aoEntry, Snapshot metadataSnapshot,
+		Oid segrelid, Snapshot metadataSnapshot, List *idx_scan_ids,
 		split_to_segment_mapping_context *context, int64 splitsize,
 		Relation_Data *rel_data, int* hitblocks,
 		int* allblocks, GpPolicy *targetPolicy);
 
+static void ParquetGetSegFileDataLocationWrapper(
+    Relation relation, AppendOnlyEntry *aoEntry, Snapshot metadataSnapshot,
+    split_to_segment_mapping_context *context, int64 splitsize,
+    Relation_Data *rel_data, int *hitblocks, int *allblocks,
+    GpPolicy *targetPolicy);
+
 static void ExternalGetHdfsFileDataLocation(
     Relation relation, split_to_segment_mapping_context *context,
     int64 splitsize, Relation_Data *rel_data, int *allblocks,
@@ -451,7 +460,7 @@ static void ExternalGetMagmaRangeDataLocation(
 Oid LookupCustomProtocolBlockLocationFunc(char *protoname);
 
 static BlockLocation *fetch_hdfs_data_block_location(char *filepath, int64 len,
-		int *block_num, RelFileNode rnode, uint32_t segno, double* hit_ratio);
+		int *block_num, RelFileNode rnode, uint32_t segno, double* hit_ratio, bool index_scan);
 
 static void free_hdfs_data_block_location(BlockLocation *locations,
 		int block_num);
@@ -614,6 +623,9 @@ static void init_datalocality_context(PlannedStmt *plannedstmt,
 
 	context->chsl_context.relations = NIL;
 	context->srtc_context.range_tables = NIL;
+	context->srtc_context.indexscan_indexs = NIL;
+	context->srtc_context.indexscan_range_tables = NIL;
+	context->srtc_context.parquetscan_range_tables = NIL;
 	context->srtc_context.full_range_tables = plannedstmt->rtable;
 	context->srtc_context.base.node = (Node *)plannedstmt;
 
@@ -671,27 +683,40 @@ static void init_datalocality_context(PlannedStmt *plannedstmt,
 	return;
 }
 
-bool collect_scan_rangetable(Node *node,
-		collect_scan_rangetable_context *cxt) {
-	if (NULL == node) return false;
-
-	switch (nodeTag(node)) {
-	case T_ExternalScan:
-	case T_MagmaIndexScan:
-	case T_MagmaIndexOnlyScan:
-	case T_OrcIndexScan:
-	case T_OrcIndexOnlyScan:
-	case T_AppendOnlyScan:
-	case T_ParquetScan: {
-		RangeTblEntry  *rte = rt_fetch(((Scan *)node)->scanrelid,
-											   cxt->full_range_tables);
-		cxt->range_tables = lappend(cxt->range_tables, rte);
-	}
-	default:
-		break;
-	}
+bool collect_scan_rangetable(Node *node, collect_scan_rangetable_context *cxt) {
+  if (NULL == node) return false;
+
+  switch (nodeTag(node)) {
+    case T_OrcIndexScan:  // Same as T_OrcIndexOnlyScan
+    case T_OrcIndexOnlyScan: {
+      RangeTblEntry *rte =
+          rt_fetch(((Scan *)node)->scanrelid, cxt->full_range_tables);
+      cxt->indexscan_range_tables =
+          lappend_oid(cxt->indexscan_range_tables, rte->relid);
+      cxt->indexscan_indexs =
+          lappend_oid(cxt->indexscan_indexs, ((IndexScan *)node)->indexid);
+      cxt->range_tables = lappend(cxt->range_tables, rte);
+      break;
+    }
+    // FIXME(sxwang): Should we append relid to parquetscan_range_tables for all
+    // these kind of scan?
+    case T_ExternalScan:        // Fall to ParquetScan
+    case T_MagmaIndexScan:      // Fall to ParquetScan
+    case T_MagmaIndexOnlyScan:  // Fall to ParquetScan
+    case T_AppendOnlyScan:      // Fall to ParquetScan
+    case T_ParquetScan: {
+      RangeTblEntry *rte =
+          rt_fetch(((Scan *)node)->scanrelid, cxt->full_range_tables);
+      cxt->parquetscan_range_tables =
+          lappend_oid(cxt->parquetscan_range_tables, rte->relid);
+      cxt->range_tables = lappend(cxt->range_tables, rte);
+      break;
+    }
+    default:
+      break;
+  }
 
-	return plan_tree_walker(node, collect_scan_rangetable, cxt);
+  return plan_tree_walker(node, collect_scan_rangetable, cxt);
 }
 
 /*
@@ -1019,7 +1044,7 @@ int64 get_block_locations_and_calculate_table_size(split_to_segment_mapping_cont
 				} else {
 				  // for orc table, we just reuse the parquet logic here
 					rel_data->type = DATALOCALITY_PARQUET;
-					ParquetGetSegFileDataLocation(rel, aoEntry, ActiveSnapshot, context,
+					ParquetGetSegFileDataLocationWrapper(rel, aoEntry, ActiveSnapshot, context,
 							context->split_size, rel_data, &hitblocks,
 							&allblocks, targetPolicy);
 				}
@@ -1436,7 +1461,7 @@ search_host_in_stat_context(split_to_segment_mapping_context *context,
  */
 static BlockLocation *
 fetch_hdfs_data_block_location(char *filepath, int64 len, int *block_num,
-		RelFileNode rnode, uint32_t segno, double* hit_ratio) {
+		RelFileNode rnode, uint32_t segno, double* hit_ratio, bool index_scan) {
 	// for fakse test, the len of file always be zero
 	if(len == 0  && !debug_fake_datalocality){
 		*hit_ratio = 0.0;
@@ -1448,7 +1473,7 @@ fetch_hdfs_data_block_location(char *filepath, int64 len, int *block_num,
 	uint64_t beginTime;
 	beginTime = gettime_microsec();
 
-	if (metadata_cache_enable) {
+	if (metadata_cache_enable && !index_scan) {
 		file_info = CreateHdfsFileInfo(rnode, segno);
 		if (metadata_cache_testfile && metadata_cache_testfile[0]) {
 			locations = GetHdfsFileBlockLocationsForTest(filepath, len, block_num);
@@ -1663,7 +1688,7 @@ static void AOGetSegFileDataLocation(Relation relation,
 				FormatAOSegmentFileName(basepath, segno, -1, 0, &segno, segfile_path);
 				double hit_ratio=0.0;
 				locations = fetch_hdfs_data_block_location(segfile_path, logic_len,
-						&block_num, relation->rd_node, segno, &hit_ratio);
+						&block_num, relation->rd_node, segno, &hit_ratio, false);
 				*allblocks += block_num;
 				*hitblocks += block_num * hit_ratio;
 				//fake data locality need to recalculate logic length
@@ -1737,7 +1762,7 @@ static void AOGetSegFileDataLocation(Relation relation,
 				FormatAOSegmentFileName(basepath, segno, -1, 0, &segno, segfile_path);
 				double hit_ratio = 0.0;
 				locations = fetch_hdfs_data_block_location(segfile_path, logic_len,
-						&block_num, relation->rd_node, segno, &hit_ratio);
+						&block_num, relation->rd_node, segno, &hit_ratio, false);
 				*allblocks += block_num;
 				*hitblocks += block_num * hit_ratio;
 				//fake data locality need to recalculate logic length
@@ -1822,7 +1847,7 @@ static void AOGetSegFileDataLocation(Relation relation,
 				FormatAOSegmentFileName(basepath, segno, -1, 0, &segno, segfile_path);
 				double hit_ratio = 0.0;
 				locations = fetch_hdfs_data_block_location(segfile_path, logic_len,
-						&block_num, relation->rd_node, segno, &hit_ratio);
+						&block_num, relation->rd_node, segno, &hit_ratio, false);
 				*allblocks += block_num;
 				*hitblocks += block_num * hit_ratio;
 				if ((locations != NULL) && (block_num > 0)) {
@@ -1883,7 +1908,7 @@ static void AOGetSegFileDataLocation(Relation relation,
 				FormatAOSegmentFileName(basepath, segno, -1, 0, &segno, segfile_path);
 				double hit_ratio = 0.0;
 				locations = fetch_hdfs_data_block_location(segfile_path, logic_len,
-						&block_num, relation->rd_node, segno, &hit_ratio);
+						&block_num, relation->rd_node, segno, &hit_ratio, false);
 				*allblocks += block_num;
 				*hitblocks += block_num * hit_ratio;
 				//fake data locality need to recalculate logic length
@@ -1959,15 +1984,37 @@ static void AOGetSegFileDataLocation(Relation relation,
 	return;
 }
 
+static List *GetAllIdxScanIds(collect_scan_rangetable_context *context,
+                              Oid rd_id) {
+  List *ret = NULL;
+
+  ListCell *table;
+  ListCell *id;
+  for (table = list_head(context->indexscan_range_tables),
+      id = list_head(context->indexscan_indexs);
+       table != NULL; table = lnext(table), id = lnext(id)) {
+    if (lfirst_oid(table) == rd_id) {
+      if (ret == NULL) {
+        ret = list_make1_oid(lfirst_oid(id));
+      } else {
+        ret = lappend_oid(ret, lfirst_oid(id));
+      }
+    }
+  }
+
+  return ret;
+}
+
 /*
  * ParquetGetSegFileDataLocation: fetch the data location of the
  * segment files of the Parquet relation.
  */
 static void ParquetGetSegFileDataLocation(Relation relation,
-		AppendOnlyEntry *aoEntry, Snapshot metadataSnapshot,
+		Oid segrelid, Snapshot metadataSnapshot, List* idx_scan_ids,
 		split_to_segment_mapping_context *context, int64 splitsize,
 		Relation_Data *rel_data, int* hitblocks,
 		int* allblocks, GpPolicy *targetPolicy) {
+	bool index_scan = idx_scan_ids != NULL;
 	char *basepath;
 	char *segfile_path;
 	int filepath_maxlen;
@@ -1979,10 +2026,10 @@ static void ParquetGetSegFileDataLocation(Relation relation,
 	SysScanDesc parquetscan;
 
 	basepath = relpath(relation->rd_node);
-	filepath_maxlen = strlen(basepath) + 9;
+	filepath_maxlen = strlen(basepath) + 25;
 	segfile_path = (char *) palloc0(filepath_maxlen);
 
-	pg_parquetseg_rel = heap_open(aoEntry->segrelid, AccessShareLock);
+	pg_parquetseg_rel = heap_open(segrelid, AccessShareLock);
 	pg_parquetseg_dsc = RelationGetDescr(pg_parquetseg_rel);
 	parquetscan = systable_beginscan(pg_parquetseg_rel, InvalidOid, FALSE,
 			metadataSnapshot, 0, NULL);
@@ -1991,22 +2038,38 @@ static void ParquetGetSegFileDataLocation(Relation relation,
 		BlockLocation *locations;
 		int block_num = 0;
 		Relation_File *file;
-
-		int segno = DatumGetInt32(
-				fastgetattr(tuple, Anum_pg_parquetseg_segno, pg_parquetseg_dsc, NULL));
-		int64 logic_len = (int64) DatumGetFloat8(
-				fastgetattr(tuple, Anum_pg_parquetseg_eof, pg_parquetseg_dsc, NULL));
+		int segno = 0;
+		int64 logic_len = 0;
+		Oid idx_scan_id = InvalidOid;
+		if (index_scan){
+		  idx_scan_id = DatumGetObjectId(
+		      fastgetattr(tuple, Anum_pg_orcseg_idx_idxoid, pg_parquetseg_dsc, NULL));
+		 if  (!list_member_oid(idx_scan_ids, idx_scan_id)) continue;
+		  segno = DatumGetInt32(
+		      fastgetattr(tuple, Anum_pg_orcseg_idx_segno, pg_parquetseg_dsc, NULL));
+		  logic_len = (int64) DatumGetFloat8(
+		      fastgetattr(tuple, Anum_pg_orcseg_idx_eof, pg_parquetseg_dsc, NULL));
+		} else {
+		  segno = DatumGetInt32(
+		      fastgetattr(tuple, Anum_pg_parquetseg_segno, pg_parquetseg_dsc, NULL));
+		  logic_len = (int64) DatumGetFloat8(
+		      fastgetattr(tuple, Anum_pg_parquetseg_eof, pg_parquetseg_dsc, NULL));
+		}
 		context->total_metadata_logic_len += logic_len;
 		bool isRelationHash = true;
 		if (targetPolicy->nattrs == 0) {
 			isRelationHash = false;
 		}
 
+		if (index_scan) {
+		  FormatAOSegmentIndexFileName(basepath, segno, idx_scan_id, -1, 0, &segno, segfile_path);
+		} else {
+		  FormatAOSegmentFileName(basepath, segno, -1, 0, &segno, segfile_path);
+		}
 		if (!context->keep_hash || !isRelationHash) {
-			FormatAOSegmentFileName(basepath, segno, -1, 0, &segno, segfile_path);
 			double hit_ratio = 0.0;
 			locations = fetch_hdfs_data_block_location(segfile_path, logic_len,
-					&block_num, relation->rd_node, segno, &hit_ratio);
+					&block_num, relation->rd_node, segno, &hit_ratio, index_scan);
 			*allblocks += block_num;
 			*hitblocks += block_num * hit_ratio;
 			//fake data locality need to recalculate logic length
@@ -2044,7 +2107,8 @@ static void ParquetGetSegFileDataLocation(Relation relation,
 					splits[realSplitNum].host = -1;
 					splits[realSplitNum].is_local_read = true;
 					splits[realSplitNum].range_id = -1;
-					splits[realSplitNum].replicaGroup_id = -1;
+					// XXX(sxwang): hack way to pass idx_scan_id.
+					splits[realSplitNum].replicaGroup_id = idx_scan_id;
 					splits[realSplitNum].offset = offset;
 					splits[realSplitNum].length = file->locations[realSplitNum].length;
 					splits[realSplitNum].logiceof = logic_len;
@@ -2064,10 +2128,9 @@ static void ParquetGetSegFileDataLocation(Relation relation,
 				rel_data->files = lappend(rel_data->files, file);
 			}
 		} else {
-			FormatAOSegmentFileName(basepath, segno, -1, 0, &segno, segfile_path);
 			double hit_ratio = 0.0;
 			locations = fetch_hdfs_data_block_location(segfile_path, logic_len,
-					&block_num, relation->rd_node, segno, &hit_ratio);
+					&block_num, relation->rd_node, segno, &hit_ratio, index_scan);
 			*allblocks += block_num;
 			*hitblocks += block_num * hit_ratio;
 			File_Split *split = (File_Split *) palloc(sizeof(File_Split));
@@ -2075,7 +2138,8 @@ static void ParquetGetSegFileDataLocation(Relation relation,
 			file->segno = segno;
 			split->offset = 0;
 			split->range_id = -1;
-			split->replicaGroup_id = -1;
+			// XXX(sxwang): hack way to pass idx_scan_id.
+			split->replicaGroup_id = idx_scan_id;
 			split->length = logic_len;
 			split->logiceof = logic_len;
 			split->host = -1;
@@ -2119,6 +2183,30 @@ static void ParquetGetSegFileDataLocation(Relation relation,
 	return;
 }
 
+static void ParquetGetSegFileDataLocationWrapper(
+    Relation relation, AppendOnlyEntry *aoEntry, Snapshot metadataSnapshot,
+    split_to_segment_mapping_context *context, int64 splitsize,
+    Relation_Data *rel_data, int *hitblocks, int *allblocks,
+    GpPolicy *targetPolicy) {
+  // ParquetScan
+  if (list_member_oid(context->srtc_context.parquetscan_range_tables,
+                      relation->rd_id)) {
+    ParquetGetSegFileDataLocation(relation, aoEntry->segrelid, metadataSnapshot,
+                                  NULL, context, splitsize, rel_data, hitblocks,
+                                  allblocks, targetPolicy);
+  }
+  // IndexScan
+  if (list_member_oid(context->srtc_context.indexscan_range_tables,
+                      relation->rd_id)) {
+    List *idx_scan_ids =
+        GetAllIdxScanIds(&(context->srtc_context), relation->rd_id);
+    ParquetGetSegFileDataLocation(
+        relation, aoEntry->blkdirrelid, metadataSnapshot, idx_scan_ids, context,
+        splitsize, rel_data, hitblocks, allblocks, targetPolicy);
+    list_free(idx_scan_ids);
+  }
+}
+
 static void InvokeHDFSProtocolBlockLocation(Oid    procOid,
                                             List  *locs,
                                             List **blockLocations)
@@ -5976,8 +6064,8 @@ run_allocation_algorithm(SplitAllocResult *result, List *virtual_segments, Query
 	uint64_t run_datalocality = 0;
 	run_datalocality = gettime_microsec();
 	int dl_overall_time = run_datalocality - before_run_allocation;
-    
-    context->cal_datalocality_time_us = dl_overall_time; 
+
+    context->cal_datalocality_time_us = dl_overall_time;
 
 	if(debug_datalocality_time){
 		elog(LOG, "datalocality overall execution time: %d us. \n", dl_overall_time);
@@ -5985,7 +6073,7 @@ run_allocation_algorithm(SplitAllocResult *result, List *virtual_segments, Query
 
     result->datalocalityTime = (double)(context->metadata_cache_time_us + context->alloc_resource_time_us + context->cal_datalocality_time_us)/ 1000;
     appendStringInfo(result->datalocalityInfo, "DFS metadatacache: %.3f ms; resource allocation: %.3f ms; datalocality calculation: %.3f ms.",
-            (double)context->metadata_cache_time_us/1000, (double)context->alloc_resource_time_us/1000, (double)context->cal_datalocality_time_us/1000);  
+            (double)context->metadata_cache_time_us/1000, (double)context->alloc_resource_time_us/1000, (double)context->cal_datalocality_time_us/1000);
 
 	return alloc_result;
 }
@@ -6396,7 +6484,7 @@ calculate_planner_segment_num(PlannedStmt *plannedstmt, Query *query,
 			}
 			uint64_t after_rm_allocate_resource = gettime_microsec();
 			int eclaspeTime = after_rm_allocate_resource - before_rm_allocate_resource;
-        
+
             context.alloc_resource_time_us = eclaspeTime;
 
 			if(debug_datalocality_time){
@@ -6824,4 +6912,3 @@ char *search_hostname_by_ipaddr(const char *ipaddr) {
   }
   return entry->hostname;
 }
-
diff --git a/src/backend/cdb/cdbexplain.c b/src/backend/cdb/cdbexplain.c
index 1ed3395..f3bc3a6 100644
--- a/src/backend/cdb/cdbexplain.c
+++ b/src/backend/cdb/cdbexplain.c
@@ -35,7 +35,6 @@
 #include "cdb/cdbexplain.h"             /* me */
 #include "cdb/cdbpartition.h"
 #include "cdb/cdbvars.h"                /* Gp_segment */
-#include "cdb/scheduler.h"
 #include "executor/executor.h"          /* ExecStateTreeWalker */
 #include "executor/instrument.h"        /* Instrumentation */
 #include "lib/stringinfo.h"             /* StringInfo */
@@ -275,11 +274,10 @@ static int
 cdbexplain_countLeafPartTables(PlanState *planstate);
 static CdbVisitOpt newplan_collectExecStats(MyInstrumentation *instr,
                                      CdbExplain_SendStatCtx *ctx);
-
-static void cdbexplain_depositSchedulerStatsToNode(
-    PlanState *planstate, struct SchedulerData *scheduler_data,
-    MyInstrumentation **myInstr, int iInst, int stageNo,
-    struct CdbExplain_ShowStatCtx *showstatctx);
+static CdbVisitOpt myinstrument_walk_node(
+    MyInstrumentation *instr,
+    CdbVisitOpt (*walker)(MyInstrumentation *instr, void *context),
+    void *context);
 
 /*
  * cdbexplain_localExecStats
@@ -519,6 +517,36 @@ static CdbVisitOpt newplan_collectExecStats(MyInstrumentation *myinstr,
   return CdbVisit_Walk;
 }
 
+CdbVisitOpt myinstrument_walk_node(
+    MyInstrumentation *instr,
+    CdbVisitOpt (*walker)(MyInstrumentation *instr, void *context),
+    void *context) {
+  CdbVisitOpt whatnext;
+
+  if (instr == NULL) return CdbVisit_Walk;
+
+  whatnext = walker(instr, context);
+  if (whatnext == CdbVisit_Walk) {
+    if (instr->leftTree && whatnext == CdbVisit_Walk)
+      whatnext = myinstrument_walk_node(instr->leftTree, walker, context);
+    if (instr->rightTree && whatnext == CdbVisit_Walk)
+      whatnext = myinstrument_walk_node(instr->rightTree, walker, context);
+    if (instr->subTree && whatnext == CdbVisit_Walk)
+      whatnext = myinstrument_walk_node(instr->subTree, walker, context);
+    if (instr->subplan && whatnext == CdbVisit_Walk) {
+      whatnext = myinstrument_walk_node(instr->subplan, walker, context);
+    }
+    if (instr->subplanSibling && whatnext == CdbVisit_Walk) {
+      whatnext = myinstrument_walk_node(instr->subplanSibling, walker, context);
+    }
+  } else if (whatnext == CdbVisit_Skip) {
+    whatnext = CdbVisit_Walk;
+  }
+
+  Assert(whatnext != CdbVisit_Skip);
+  return whatnext;
+}
+
 /*
  * cdbexplain_sendExecStats
  *    Called by qExec process to send EXPLAIN ANALYZE statistics to qDisp.
@@ -2399,128 +2427,3 @@ cdbexplain_countLeafPartTables(PlanState *planstate)
 	Oid root_oid = getrelid(scan->scanrelid, planstate->state->es_range_table);
 	return countLeafPartTables(root_oid);
 }
-
-typedef struct CdbExplain_RecvSchedulerStatCtx {
-  CdbExplain_ShowStatCtx *showstatctx;
-  struct SchedulerData *scheduler_data;
-  int stageNo;
-} CdbExplain_RecvSchedulerStatCtx;
-
-CdbVisitOpt cdbexplain_recvSchedulerStatWalker(PlanState *planstate,
-                                               void *context) {
-  CdbExplain_RecvSchedulerStatCtx *ctx =
-      (CdbExplain_RecvSchedulerStatCtx *)context;
-
-  SchedulerSliceStats *sliceStat = &ctx->scheduler_data->slices[ctx->stageNo];
-  if (ctx->stageNo > 0)
-    cdbexplain_depositSchedulerStatsToNode(
-        planstate, ctx->scheduler_data, sliceStat->instr, sliceStat->iStatInst,
-        ctx->stageNo, ctx->showstatctx);
-  sliceStat->iStatInst++;
-
-  if (IsA(planstate, MotionState)) {
-    cdbexplain_recvSchedulerExecStats(planstate->lefttree, ctx->scheduler_data,
-                                      ((Motion *)planstate->plan)->motionID,
-                                      ctx->showstatctx);
-    return CdbVisit_Skip;
-  }
-
-  return CdbVisit_Walk;
-}
-
-void cdbexplain_recvSchedulerExecStats(
-    struct PlanState *planstate, struct SchedulerData *scheduler_data,
-    int stageNo, struct CdbExplain_ShowStatCtx *showstatctx) {
-  if (!planstate || !planstate->instrument || !showstatctx) return;
-
-  CdbExplain_RecvSchedulerStatCtx ctx;
-  ctx.scheduler_data = scheduler_data;
-  ctx.showstatctx = showstatctx;
-  ctx.stageNo = stageNo;
-  SchedulerSliceStats *sliceStat = &ctx.scheduler_data->slices[ctx.stageNo];
-  sliceStat->iStatInst = 1;
-
-  MemoryContext oldcxt;
-  oldcxt = MemoryContextSwitchTo(showstatctx->explaincxt);
-
-  planstate_walk_node(planstate, cdbexplain_recvSchedulerStatWalker, &ctx);
-  if (ctx.stageNo > 0)
-    Assert(sliceStat->iStatInst * ctx.scheduler_data->segmentNum ==
-           sliceStat->nStatInst);
-
-  MemoryContextSwitchTo(oldcxt);
-}
-
-/*
- * cdbexplain_depositSchedulerStatsToNode
- *
- * Called by cdbexplain_recvSchedulerStatWalker to update the given
- * PlanState node's Instrument node with statistics received from
- * workers or collected locally.  Attaches a CdbExplain_NodeSummary
- * block to the Instrument node.  If top node of slice, per-slice
- * statistics are transferred from the StatHdr to the SliceSummary.
- */
-static void cdbexplain_depositSchedulerStatsToNode(
-    PlanState *planstate, struct SchedulerData *scheduler_data,
-    MyInstrumentation **myInstr, int iInst, int stageNo,
-    struct CdbExplain_ShowStatCtx *showstatctx) {
-  Instrumentation *instr = planstate->instrument;
-
-  /*
-   * ns is the node summary across all QEs of the segworker group. It also
-   * contains detailed "unsummarized" raw stat for a node across all QEs in
-   * current segworker group (in the insts array)
-   */
-  CdbExplain_NodeSummary *ns;
-  int nInst;
-
-  Insist(instr);
-
-  /* Caller already switched to EXPLAIN context. */
-  Assert(CurrentMemoryContext == showstatctx->explaincxt);
-
-  /* Allocate NodeSummary block. */
-  nInst = scheduler_data->segmentNum;
-  ns = (CdbExplain_NodeSummary *)palloc0(sizeof(*ns) - sizeof(ns->insts) +
-                                         nInst * sizeof(ns->insts[0]));
-  ns->segindex0 = 0;
-  ns->ninst = nInst;
-  ns->ntuples.vcnt = scheduler_data->segmentNum;
-
-  int step = scheduler_data->slices[stageNo].nStatInst / nInst;
-
-  for (int i = 0; i < scheduler_data->segmentNum; i++, iInst+=step) {
-    ns->insts[i].startup = myInstr[iInst]->firstTupleBatch / 1000000.0;
-    ns->insts[i].total = myInstr[iInst]->counter / 1000000.0;
-    ns->insts[i].firststart.tv_sec = myInstr[iInst]->firstStart / 1000000;
-    ns->insts[i].firststart.tv_usec = myInstr[iInst]->firstStart % 1000000;
-    ns->insts[i].ntuples = myInstr[iInst]->tupleCount;
-    ns->insts[i].execmemused = myInstr[iInst]->execmemused;
-    ns->insts[i].workmemused = myInstr[iInst]->workmemused;
-    ns->insts[i].workmemwanted = myInstr[iInst]->workmemwanted;
-    ns->ntuples.vsum += myInstr[iInst]->tupleCount;
-    ns->execmemused.vsum += myInstr[iInst]->execmemused;
-    ns->workmemused.vsum += myInstr[iInst]->workmemused;
-    ns->workmemwanted.vsum += myInstr[iInst]->workmemwanted;
-    if (instr->ntuples <= ns->insts[i].ntuples) {
-      ns->ntuples.imax = i;
-      strcpy(ns->ntuples.hostnamemax, scheduler_data->hosts[i]);
-      ns->ntuples.vmax = ns->insts[i].ntuples;
-      instr->ntuples = ns->insts[i].ntuples;
-      instr->startup = ns->insts[i].startup;
-      instr->total = ns->insts[i].total;
-      instr->firststart = ns->insts[i].firststart;
-    }
-    if (instr->totalLast <= ns->insts[i].total) {
-      ns->ntuples.ilast = i;
-      strcpy(ns->ntuples.hostnamelast, scheduler_data->hosts[i]);
-      ns->ntuples.vlast = ns->insts[i].ntuples;
-      instr->startupLast = ns->insts[i].startup;
-      instr->totalLast = ns->insts[i].total;
-      instr->firststartLast = ns->insts[i].firststart;
-    }
-  }
-
-  /* Attach our new NodeSummary to the Instrumentation node. */
-  instr->cdbNodeSummary = ns;
-} /* cdbexplain_depositSchedulerStatsToNode */
diff --git a/src/backend/cdb/cdbplan.c b/src/backend/cdb/cdbplan.c
index 3bc21d9..10bb0d2 100644
--- a/src/backend/cdb/cdbplan.c
+++ b/src/backend/cdb/cdbplan.c
@@ -393,9 +393,13 @@ plan_tree_mutator(Node *node,
 
 			FLATCOPY(newidxscan, idxscan, MagmaIndexScan);
 			SCANMUTATE(newidxscan, idxscan);
+
+			MUTATE(newidxscan->indexqual, idxscan->indexqual, List *);
 			MUTATE(newidxscan->indexqualorig, idxscan->indexqualorig, List *);
 			MUTATE(newidxscan->uriList, idxscan->uriList, List *);
 			MUTATE(newidxscan->fmtOpts, idxscan->fmtOpts, List *);
+			MUTATE(newidxscan->indexstrategy, idxscan->indexstrategy, List *);
+			MUTATE(newidxscan->indexsubtype, idxscan->indexsubtype, List *);
 			// MUTATE(newidxscan->indexname, idxscan->indexname, char*);
 			newidxscan->fmtType = idxscan->fmtType;
 			return (Node *) newidxscan;
@@ -409,9 +413,13 @@ plan_tree_mutator(Node *node,
 
 			FLATCOPY(newidxscan, idxscan, MagmaIndexOnlyScan);
 			SCANMUTATE(newidxscan, idxscan);
+
+			MUTATE(newidxscan->indexqual, idxscan->indexqual, List *);
 			MUTATE(newidxscan->indexqualorig, idxscan->indexqualorig, List *);
 			MUTATE(newidxscan->uriList, idxscan->uriList, List *);
 			MUTATE(newidxscan->fmtOpts, idxscan->fmtOpts, List *);
+			MUTATE(newidxscan->indexstrategy, idxscan->indexstrategy, List *);
+			MUTATE(newidxscan->indexsubtype, idxscan->indexsubtype, List *);
 			// MUTATE(newidxscan->indexname, idxscan->indexname, char*);
 			newidxscan->fmtType = idxscan->fmtType;
 			return (Node *) newidxscan;
diff --git a/src/backend/cdb/dispatcher.c b/src/backend/cdb/dispatcher.c
index f29514a..517974b 100644
--- a/src/backend/cdb/dispatcher.c
+++ b/src/backend/cdb/dispatcher.c
@@ -1167,7 +1167,6 @@ static void dispatcher_serialize_common_plan(DispatchData *data, CommonPlanConte
                    new_executor_enable_partitioned_hashjoin_mode);
     univPlanAddGuc(ctx->univplan, "enable_external_sort",
                    new_executor_enable_external_sort_mode);
-    univPlanAddGuc(ctx->univplan, "new_scheduler", "off");
     univPlanAddGuc(ctx->univplan, "filter_pushdown", orc_enable_filter_pushdown);
     univPlanAddGuc(ctx->univplan, "magma_enable_shm", magma_enable_shm);
     sprintf(numberStrBuf, "%d", magma_shm_limit_per_block * 1024);
diff --git a/src/backend/cdb/dispatcher_new.c b/src/backend/cdb/dispatcher_new.c
index abf0c5c..77939ef 100644
--- a/src/backend/cdb/dispatcher_new.c
+++ b/src/backend/cdb/dispatcher_new.c
@@ -695,7 +695,6 @@ static void dispatcher_serialize_common_plan(MainDispatchData *data,
                    new_executor_enable_partitioned_hashjoin_mode);
     univPlanAddGuc(ctx->univplan, "enable_external_sort",
                    new_executor_enable_external_sort_mode);
-    univPlanAddGuc(ctx->univplan, "new_scheduler", "off");
     univPlanAddGuc(ctx->univplan, "filter_pushdown",
                    orc_enable_filter_pushdown);
     univPlanAddGuc(ctx->univplan, "magma_enable_shm", magma_enable_shm);
@@ -1515,6 +1514,15 @@ struct CdbDispatchResults *getDispatchResults(struct MyDispatchTask *task) {
   return ((CommonDispatchData *)task->refDispatchData)->results;
 }
 
+void checkQdError(void *dispatchData) {
+  if (Gp_role == GP_ROLE_DISPATCH) {
+    MainDispatchData *data = (MainDispatchData *)dispatchData;
+    if (data && mainDispatchHasError(data))
+      ereport(ERROR, (errcode(ERRCODE_GP_INTERCONNECTION_ERROR),
+                      errmsg(CDB_MOTION_LOST_CONTACT_STRING)));
+  }
+}
+
 const char *taskIdToString(struct MyDispatchTask *task) {
   StringInfoData str;
   initStringInfo(&str);
diff --git a/src/backend/commands/copy.c b/src/backend/commands/copy.c
index 20fd6a4..dfb070a 100644
--- a/src/backend/commands/copy.c
+++ b/src/backend/commands/copy.c
@@ -157,6 +157,8 @@ static int calculate_virtual_segment_number(List* candidateRelations);
 
 static bool checkMultibytesDelimFound(const char *ptr, const char *delim);
 
+extern bool parseAndTransformAsGraph(ParseState *pstate, RangeVar *rangeVar);
+
 /* ==========================================================================
  * The follwing macros aid in major refactoring of data processing code (in
  * CopyFrom(+Dispatch)). We use macros because in some cases the code must be in
@@ -994,6 +996,8 @@ DoCopy(const CopyStmt *stmt, const char *queryString)
 	/* Allocate workspace and zero all fields */
 	cstate = (CopyStateData *) palloc0(sizeof(CopyStateData));
 
+	parseAndTransformAsGraph(NULL, stmt->relation);
+
 	/* Extract options from the statement node tree */
 	foreach(option, stmt->options)
 	{
diff --git a/src/backend/commands/explain.c b/src/backend/commands/explain.c
index 6d27cc0..256d683 100644
--- a/src/backend/commands/explain.c
+++ b/src/backend/commands/explain.c
@@ -570,13 +570,6 @@ ExplainOnePlan_internal(PlannedStmt *plannedstmt,
                                      LocallyExecutingSliceIndex(estate),
                                      es->showstatctx,
                                      mainDispatchGetSegNum(queryDesc->estate->mainDispatchData));
-        if (estate->scheduler_data) {
-          scheduler_receive_computenode_stats(queryDesc->estate->scheduler_data,
-                                              queryDesc->planstate);
-          cdbexplain_recvSchedulerExecStats(queryDesc->planstate,
-                                            queryDesc->estate->scheduler_data,
-                                            0, es->showstatctx);
-        }
 	} else {
 		CommonPlanContext ctx;
 		queryDesc->newPlanForceAuto = true;
@@ -793,8 +786,6 @@ ExplainOnePlan_internal(PlannedStmt *plannedstmt,
 	{
 	  if (estate->mainDispatchData)
 	    mainDispatchPrintStats(buf, estate->mainDispatchData);
-	  else if (estate->scheduler_data)
-	    scheduler_print_stats(estate->scheduler_data, buf);
 		appendStringInfo(buf, "Data locality statistics:\n");
 		if(plannedstmt->datalocalityInfo ==NULL){
 		  appendStringInfo(buf, "  no data locality information in this query\n");
diff --git a/src/backend/commands/indexcmds.c b/src/backend/commands/indexcmds.c
index fdd065e..887b67b 100644
--- a/src/backend/commands/indexcmds.c
+++ b/src/backend/commands/indexcmds.c
@@ -57,6 +57,7 @@
 #include "catalog/pg_resqueue.h"
 #include "catalog/pg_authid.h"
 #include "catalog/pg_type.h"
+#include "catalog/skylon_index.h"
 #include "cdb/cdbpartition.h"
 #include "commands/dbcommands.h"
 #include "commands/defrem.h"
@@ -126,7 +127,8 @@ bool CDBCreateIndex(IndexStmt *stmt, Oid relationOid, Oid indexOid)
 		if (fstotal)
 		{
 			target_segment_num = fstotal->totalfilesegs;
-			if (target_segment_num == 0)
+			int tuplecount = fstotal->totaltuples;
+			if (target_segment_num == 0 || tuplecount == 0)
 			{
 				elog(LOG, "CDBCreateIndex need not to dispatch create index statement, for not data in orc files.\n");
 				relation_close(rel, AccessShareLock);
@@ -204,10 +206,7 @@ bool CDBCreateIndex(IndexStmt *stmt, Oid relationOid, Oid indexOid)
 		if (HeapTupleIsValid(atttuple))
 		{
 			Form_pg_attribute tuple = (Form_pg_attribute) GETSTRUCT(atttuple);
-			if (!tuple->attnotnull)
-			{
-				stmt->columnsToRead = lappend_int(stmt->columnsToRead, tuple->attnum);
-			}
+			stmt->columnsToRead = lappend_int(stmt->columnsToRead, tuple->attnum);
 		}
 		caql_endscan(attcqCtx);
 	}
@@ -229,6 +228,13 @@ bool CDBCreateIndex(IndexStmt *stmt, Oid relationOid, Oid indexOid)
 	DispatchDataResult result;
 	mainDispatchStmtNode(stmt, NULL, resource, &result);
 	DropQueryContextInfo(stmt->contextdisp);
+	/* free resource */
+	if (resource)
+	{
+		FreeResource(resource);
+		resource = NULL;
+		SetActiveQueryResource(savedResource);
+	}
 	return true;
 }
 
@@ -236,6 +242,15 @@ void CDBDefineIndex(IndexStmt *stmt)
 {
 	Relation rel = relation_open(stmt->relationOid, NoLock);
 	/* 1. get orc file infos belong to this qe */
+
+	/* there might be a situation where the number
+	 * of QE's is greater than the number of Index files
+	 */
+	if (GetQEIndex() >= list_length(stmt->allidxinfos))
+	{
+		relation_close(rel, NoLock);
+		return;
+	}
 	NativeOrcIndexFile *idxs = (NativeOrcIndexFile *)(list_nth(stmt->allidxinfos, GetQEIndex()));
 	/* 2. call native orc index interface to build index data */
 	int keyCount = list_length(stmt->indexParams) - list_length(stmt->indexIncludingParams);
@@ -350,6 +365,30 @@ DefineIndex(Oid relationId,
 	          (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
 	              errmsg("included columns must not intersect with key columns")));
 
+	if(stmt->graphele) {
+	  int indexnum = list_length(stmt->graphIndexAttnum);
+	  int includenum = list_length(stmt->graphIncludeAttnum);
+	  int2 *attnums = palloc0(indexnum * sizeof(int));
+	  int2 *includeattnums = palloc0(includenum * sizeof(int));
+	  ListCell *cell;
+	  int i = 0;
+	  foreach(cell, stmt->graphIndexAttnum) {
+	    Value *num = (Value *) lfirst(cell);
+	    attnums[i] = (int2)num->val.ival;
+	    i++;
+	  }
+	  i = 0;
+    foreach(cell, stmt->graphIncludeAttnum) {
+      Value *num = (Value *) lfirst(cell);
+      includeattnums[i] = (int2)num->val.ival;
+      i++;
+    }
+	  char indexType = stmt->reverse ? 'r' : 'n';
+	  InsertSkylonIndexEntry(stmt->graphele->catalogname , stmt->graphele->schemaname,
+	                         stmt->graphele->relname, indexType, stmt->idxname,
+	                         attnums, indexnum, includeattnums, includenum);
+	}
+
 	/*
 	 * count key attributes in index
 	 */
@@ -1715,6 +1754,37 @@ RemoveIndex(RangeVar *relation, DropBehavior behavior)
 
 	indOid = RangeVarGetRelid(relation, false, false /*allowHcatalog*/);
 
+	Relation pgclassrel = heap_open(RelationRelationId, RowExclusiveLock);
+	cqContext cqctmp;
+	HeapTuple pgclasstup = caql_getfirst(
+	    caql_addrel(cqclr(&cqctmp), pgclassrel),
+	    cql("SELECT * FROM pg_class "
+	        " WHERE oid = :1 ",
+	        ObjectIdGetDatum(indOid)));
+	Form_pg_class pgclassForm = (Form_pg_class) GETSTRUCT(pgclasstup);
+	Relation pgnmrel = heap_open(NamespaceRelationId, RowExclusiveLock);
+	HeapTuple pgnmtup = caql_getfirst(
+	    caql_addrel(cqclr(&cqctmp), pgnmrel),
+	    cql("SELECT * FROM pg_namespace "
+	        " WHERE oid = :1 ",
+	        ObjectIdGetDatum(pgclassForm->relnamespace)));
+	Form_pg_namespace pgnmForm = (Form_pg_namespace) GETSTRUCT(pgnmtup);
+
+	Relation  skylon_index_rel;
+	cqContext cqc;
+	skylon_index_rel = heap_open(SkylonIndexRelationId, RowExclusiveLock);
+	caql_getcount(
+	    caql_addrel(cqclr(&cqc), skylon_index_rel),
+	    cql("DELETE FROM skylon_index "
+	        " WHERE indexname = :1 AND schemaname = :2",
+	        CStringGetDatum(relation->relname), NameGetDatum(&pgnmForm->nspname)));
+	heap_close(skylon_index_rel, RowExclusiveLock);
+
+	heap_close(pgnmrel, RowExclusiveLock);
+	heap_freetuple(pgnmtup);
+	heap_close(pgclassrel, RowExclusiveLock);
+	heap_freetuple(pgclasstup);
+
 	if (Gp_role == GP_ROLE_DISPATCH)
 	{
 		LockRelationOid(RelationRelationId, RowExclusiveLock);
diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c
index fd93525..1d0a767 100644
--- a/src/backend/commands/tablecmds.c
+++ b/src/backend/commands/tablecmds.c
@@ -79,6 +79,14 @@
 #include "catalog/pg_authid.h"
 #include "catalog/pg_partition.h"
 #include "catalog/pg_partition_rule.h"
+#include "catalog/skylon_elabel.h"
+#include "catalog/skylon_elabel_attribute.h"
+#include "catalog/skylon_graph.h"
+#include "catalog/skylon_graph_elabel.h"
+#include "catalog/skylon_graph_vlabel.h"
+#include "catalog/skylon_index.h"
+#include "catalog/skylon_vlabel.h"
+#include "catalog/skylon_vlabel_attribute.h"
 #include "catalog/toasting.h"
 #include "cdb/cdbappendonlyam.h"
 #include "cdb/cdbparquetam.h"
@@ -245,6 +253,20 @@ typedef struct NewColumnValue
 	ExprState  *exprstate;		/* execution state */
 } NewColumnValue;
 
+extern Query *transformCreateExternalStmtImpl(ParseState *pstate,
+                                          CreateExternalStmt *stmt,
+                                          List **extras_before,
+                                          List **extras_after);
+
+extern Query *transformCreateStmtImpl(ParseState *pstate,
+                                      CreateStmt *stmt,
+                                      List **extras_before,
+                                      List **extras_after);
+
+extern List *transformAsGraphName(ParseState *pstate, RangeVar *rangeVar);
+
+extern bool parseAndTransformAsGraph(ParseState *pstate, RangeVar *rangeVar);
+
 static void truncate_check_rel(Relation rel);
 static void MergeConstraintsIntoExisting(Relation child_rel, Relation parent_rel);
 static void MergeAttributesIntoExisting(Relation child_rel, Relation parent_rel, List *inhAttrNameList,
@@ -991,6 +1013,1076 @@ static Datum AddDefaultPageRowGroupSize(Datum relOptions, List *defList){
 	}
 }
 
+static Oid CreateInCatalog(Oid tablespace_id, Oid namespaceId, RangeVar *rel, int2 attnum, char relkind) {
+  CreateStmt *createStmt = makeNode(CreateStmt);
+  createStmt->base.relKind = relkind;
+  createStmt->base.relation = rel;
+  ParseState *pstate = make_parsestate(NULL);
+  pstate->p_next_resno = 1;
+  List *extras_before = NIL;
+  List *extras_after = NIL;
+  createStmt = transformCreateStmtImpl(pstate,
+                                       createStmt,
+                                       &extras_before,
+                                       &extras_before)->utilityStmt;
+  ItemPointerData persistentTid;
+  int64     persistentSerialNum;
+  Relation pg_class_desc = heap_open(RelationRelationId, RowExclusiveLock);
+  Oid relid = GetNewRelFileNode(tablespace_id, tablespace_id == GLOBALTABLESPACE_OID,
+                    pg_class_desc,
+                    relstorage_is_ao(pg_class_desc->rd_rel->relstorage));
+  Relation new_rel_desc = heap_create(rel->relname,
+                             namespaceId,
+                             tablespace_id,
+                             relid,
+                             BuildDescForRelation(createStmt->base.tableElts),
+                             InvalidOid,
+                             createStmt->base.relKind,
+                             RELSTORAGE_HEAP,
+                             tablespace_id == GLOBALTABLESPACE_OID,
+                             allowSystemTableModsDDL,
+                             false);
+  new_rel_desc->rd_rel->relnatts = attnum;
+  new_rel_desc->rd_rel->relkind = relkind;
+  InsertPgClassTuple(pg_class_desc, new_rel_desc, relid, (Datum) 0);
+
+  heap_close(new_rel_desc, NoLock);
+  heap_close(pg_class_desc, RowExclusiveLock);
+  return relid;
+}
+
+static void InsertAttribute(Oid relid, ColumnDef *colDef,int rank) {
+  Relation attrdesc = heap_open(AttributeRelationId, RowExclusiveLock);
+  cqContext cqc;
+  cqContext  *patCtx = caql_beginscan(
+      caql_addrel(cqclr(&cqc), attrdesc),
+      cql("INSERT INTO pg_attribute ",
+        NULL));
+
+  FormData_pg_attribute attributeD;
+  HeapTuple attributeTuple = heap_addheader(Natts_pg_attribute,
+                  false,
+                  ATTRIBUTE_TUPLE_SIZE,
+                  (void *) &attributeD);
+  HeapTuple typeTuple = typenameType(NULL, colDef->typname);
+  Form_pg_type tform = (Form_pg_type) GETSTRUCT(typeTuple);
+  Form_pg_attribute attribute = (Form_pg_attribute) GETSTRUCT(attributeTuple);
+  Oid typeOid = HeapTupleGetOid(typeTuple);
+  attribute->attrelid = relid;
+  namestrcpy(&(attribute->attname), colDef->colname);
+  attribute->atttypid = colDef->typname->typid;
+  attribute->attstattarget = -1;
+  attribute->attlen = tform->typlen;
+  attribute->attcacheoff = -1;
+  attribute->atttypmod = colDef->typname->typmod;
+  attribute->attnum = rank;
+  attribute->attbyval = tform->typbyval;
+  attribute->attndims = list_length(colDef->typname->arrayBounds);
+  attribute->attstorage = tform->typstorage;
+  attribute->attalign = tform->typalign;
+  attribute->attnotnull = colDef->is_not_null;
+  attribute->atthasdef = false;
+  attribute->attisdropped = false;
+  attribute->attislocal = colDef->is_local;
+  attribute->attinhcount = colDef->inhcount;
+
+  ReleaseType(typeTuple);
+
+  caql_insert(patCtx, attributeTuple);
+
+  caql_endscan(patCtx);
+  heap_close(attrdesc, RowExclusiveLock);
+}
+
+extern void
+DefineVlabel(CreateVlabelStmt *createVlStmt)
+{
+  /*
+   * Get tablespace, database, schema for the relation
+   */
+  RangeVar *rel = createVlStmt->relation;
+  // get tablespace name for the relation
+  Oid tablespace_id = (gp_upgrade_mode) ? DEFAULTTABLESPACE_OID : GetDefaultTablespace();
+  if (!OidIsValid(tablespace_id))
+  {
+    tablespace_id = get_database_dts(MyDatabaseId);
+  }
+
+  char *filespace_name = NULL;
+  GetFilespacePathForTablespace(tablespace_id, &filespace_name);
+
+  if (filespace_name) {
+    int len = strlen(filespace_name);
+    if ((len > 0) && (filespace_name[len - 1] == '/'))
+      filespace_name[len - 1] = '\0';
+  }
+
+  char *tablespace_name = get_tablespace_name(tablespace_id);
+
+  // get database name for the relation
+  char *database_name = rel->catalogname ? rel->catalogname : get_database_name(MyDatabaseId);
+
+  // get schema name for the relation
+  char *schema_name = get_namespace_name(RangeVarGetCreationNamespace(rel));
+
+  // get table name for the relation
+  char *table_name = rel->relname;
+
+  Oid namespaceId = get_namespace_oid(schema_name);
+
+  if (get_relname_relid(table_name, namespaceId) != InvalidOid)
+    ereport(ERROR,
+        (errcode(ERRCODE_DUPLICATE_TABLE),
+         errmsg("vertex \"%s\" already exists",
+                table_name),
+         errOmitLocation(true)));
+
+  Oid relationId = CreateInCatalog(tablespace_id, namespaceId, rel,
+                                   list_length(createVlStmt->tableElts), RELKIND_VIEW);
+
+  ObjectAddress depender = {VlabelRelationId, relationId, 0, };
+  ObjectAddress ref = {NamespaceRelationId, namespaceId, 0, };
+  recordDependencyOn(&depender, &ref, DEPENDENCY_NORMAL);
+
+  {
+  cqContext cqc;
+  Relation VlabelRelation = heap_open(VlabelRelationId, RowExclusiveLock);
+  HeapTuple vlabelTuple= caql_getfirst(caql_addrel(cqclr(&cqc), VlabelRelation),
+                                       cql("SELECT * FROM skylon_vlabel"
+      " WHERE vlabelname = :1 "
+      "AND schemaname = :2",
+      CStringGetDatum(rel->relname),
+      CStringGetDatum(schema_name)));
+  if(HeapTupleIsValid(vlabelTuple))
+    ereport(ERROR,
+      (errcode(ERRCODE_UNDEFINED_COLUMN),
+       errmsg("vertex \"%s\" already exists in \"%s\"",
+              rel->relname, schema_name)));
+  heap_close(VlabelRelation, RowExclusiveLock);
+  }
+  if (filespace_name)
+    pfree(filespace_name);
+
+  List     *schema = createVlStmt->tableElts;
+  TupleDesc descriptor = BuildDescForRelation(schema);
+  InsertVlabelEntry(table_name, schema_name);
+  ListCell *cell;
+  int32 order = 1;
+  foreach(cell,schema) {
+    Node* node=lfirst(cell);
+    if(nodeTag(node) == T_ColumnDef) {
+      ColumnDef* def = (ColumnDef*)node;
+      char* colname = def->colname;
+      Oid typeid = def->typname->typid;
+      List *keys = createVlStmt->constraints?((Constraint*)lfirst(list_head(
+                   createVlStmt->constraints)))->keys : NULL;
+      ListCell *lc;
+      int32 primary = 0;
+      int32 tmp = 1;
+      foreach(lc, keys){
+        Value *key = (Value*)lfirst(lc);
+        if(strcmp(colname, key->val.str) == 0) {
+          primary = tmp;
+          break;
+        }
+        tmp++;
+      }
+      if(!keys && order==1)
+        primary = 1;
+      InsertVlabelAttrEntry(schema_name, createVlStmt->relation->relname, colname, typeid, primary, order);
+      InsertAttribute(relationId, def, order);
+      order++;
+    }
+  }
+}
+
+extern void
+DefineElabel(CreateElabelStmt *createElStmt)
+{
+  /*
+   * Get tablespace, database, schema for the relation
+   */
+  RangeVar *rel = createElStmt->relation;
+  // get tablespace name for the relation
+  Oid tablespace_id = (gp_upgrade_mode) ? DEFAULTTABLESPACE_OID : GetDefaultTablespace();
+  if (!OidIsValid(tablespace_id))
+  {
+    tablespace_id = get_database_dts(MyDatabaseId);
+  }
+
+  char *filespace_name = NULL;
+  GetFilespacePathForTablespace(tablespace_id, &filespace_name);
+
+  if (filespace_name) {
+    int len = strlen(filespace_name);
+    if ((len > 0) && (filespace_name[len - 1] == '/'))
+      filespace_name[len - 1] = '\0';
+  }
+
+  char *tablespace_name = get_tablespace_name(tablespace_id);
+
+  // get database name for the relation
+  char *database_name = rel->catalogname ? rel->catalogname : get_database_name(MyDatabaseId);
+
+  // get schema name for the relation
+  char *schema_name = get_namespace_name(RangeVarGetCreationNamespace(rel));
+
+  // get table name for the relation
+  char *table_name = rel->relname;
+
+  Oid namespaceId = get_namespace_oid(schema_name);
+
+  if (get_relname_relid(table_name, namespaceId) != InvalidOid)
+    ereport(ERROR,
+        (errcode(ERRCODE_DUPLICATE_TABLE),
+         errmsg("edge \"%s\" already exists",
+                table_name),
+         errOmitLocation(true)));
+
+  Oid relationId = CreateInCatalog(tablespace_id, namespaceId, rel,
+                                   list_length(createElStmt->tableElts), RELKIND_VIEW);
+
+  ObjectAddress depender = {ElabelRelationId, relationId, 0, };
+  ObjectAddress ref = {NamespaceRelationId, namespaceId, 0, };
+  recordDependencyOn(&depender, &ref, DEPENDENCY_NORMAL);
+
+  {
+  cqContext cqc;
+  Relation ElabelRelation = heap_open(ElabelRelationId, RowExclusiveLock);
+  HeapTuple elabelTuple= caql_getfirst(caql_addrel(cqclr(&cqc), ElabelRelation),
+                                       cql("SELECT * FROM skylon_elabel"
+      " WHERE elabelname = :1 "
+      "AND schemaname = :2",
+      CStringGetDatum(rel->relname),
+      CStringGetDatum(schema_name)));
+  if(HeapTupleIsValid(elabelTuple))
+    ereport(ERROR,
+      (errcode(ERRCODE_UNDEFINED_COLUMN),
+       errmsg("edge \"%s\" already exists in schema \"%s\"",
+              rel->relname, schema_name)));
+  heap_close(ElabelRelation, RowExclusiveLock);
+  }
+  {
+  cqContext cqc;
+  Relation VlabelRelation = heap_open(VlabelRelationId, RowExclusiveLock);
+  HeapTuple vlabelTuple= caql_getfirst(caql_addrel(cqclr(&cqc), VlabelRelation),
+                                       cql("SELECT * FROM skylon_vlabel"
+      " WHERE vlabelname = :1 "
+      "AND schemaname = :2",
+      CStringGetDatum(createElStmt->fromVlabel->val.str),
+      CStringGetDatum(schema_name)));
+  if(!HeapTupleIsValid(vlabelTuple))
+    ereport(ERROR,
+      (errcode(ERRCODE_UNDEFINED_COLUMN),
+       errmsg("source vertex \"%s\" does not exists in schema \"%s\"",
+              createElStmt->fromVlabel->val.str, schema_name)));
+  heap_close(VlabelRelation, RowExclusiveLock);
+  }
+
+  {
+  cqContext cqc;
+  Relation VlabelRelation = heap_open(VlabelRelationId, RowExclusiveLock);
+  HeapTuple vlabelTuple= caql_getfirst(caql_addrel(cqclr(&cqc), VlabelRelation),
+                                       cql("SELECT * FROM skylon_vlabel"
+      " WHERE vlabelname = :1 "
+      "AND schemaname = :2",
+      CStringGetDatum(createElStmt->toVlabel->val.str),
+      CStringGetDatum(schema_name)));
+  if(!HeapTupleIsValid(vlabelTuple))
+    ereport(ERROR,
+      (errcode(ERRCODE_UNDEFINED_COLUMN),
+       errmsg("dest vertex \"%s\" does not exists in schema \"%s\"",
+              createElStmt->toVlabel->val.str, schema_name)));
+  heap_close(VlabelRelation, RowExclusiveLock);
+  }
+
+  if (filespace_name)
+    pfree(filespace_name);
+
+  List     *schema = createElStmt->tableElts;
+  TupleDesc descriptor = BuildDescForRelation(schema);
+  InsertElabelEntry(table_name, schema_name, createElStmt->fromVlabel->val.str, createElStmt->toVlabel->val.str);
+
+  ListCell *cell;
+  int32 order = 1;
+  foreach(cell,schema) {
+    Node* node=lfirst(cell);
+    if(nodeTag(node) == T_ColumnDef){
+      ColumnDef* def = (ColumnDef*)node;
+      char* colname = def->colname;
+      Oid typeid = def->typname->typid;
+      List *keys = createElStmt->constraints? ((Constraint*)lfirst(list_head(
+                   createElStmt->constraints)))->keys : NULL;
+      ListCell *lc;
+      int32 primary = 0;
+      int32 tmp = 1;
+      foreach(lc, keys){
+        Value *key = (Value*)lfirst(lc);
+        if(strcmp(colname, key->val.str) == 0) {
+          primary = tmp;
+          break;
+        }
+        tmp++;
+      }
+      InsertElabelAttrEntry(schema_name, createElStmt->relation->relname, colname, typeid, primary, order);
+      InsertAttribute(relationId, def, order);
+      order++;
+    }
+  }
+}
+
+char *graphVertexTableName(char *gname,char *vname)
+{
+  char extraname[] = "skylon_vertex_";
+  int len1 = strlen(gname);
+  int len2 = strlen(vname);
+  int len3 = strlen(extraname);
+  char * newname = palloc0(len1 + len2 + len3 + 2);
+  memcpy(newname, extraname, len3);
+  memcpy(newname + len3, gname, len1);
+  memcpy(newname + len3 + len1, "_", 1);
+  memcpy(newname + len3 + len1 + 1, vname, len2);
+  newname[len1 + len2 + len3 + 1] = '\0';
+  return newname;
+}
+
+char *graphEdgeTableName(char *gname,char *ename)
+{
+  char extraname[] = "skylon_edge_";
+  int len1 = strlen(gname);
+  int len2 = strlen(ename);
+  int len3 = strlen(extraname);
+  char * newname = palloc0(len1 + len2 + len3 + 2);
+  memcpy(newname, extraname, len3);
+  memcpy(newname + len3, gname, len1);
+  memcpy(newname + len3 + len1, "_", 1);
+  memcpy(newname + len3 + len1 + 1, ename, len2);
+  newname[len1 + len2 + len3 + 1] = '\0';
+  return newname;
+}
+
+static List * vertexPrimary(const char* schemaname, const char *vname) {
+  cqContext cqc;
+  Relation catalogRelation = heap_open(VlabelAttrRelationId, RowExclusiveLock);
+
+  int primaryNum = caql_getcount(
+            caql_addrel(cqclr(&cqc), catalogRelation),
+            cql("SELECT COUNT(*) FROM skylon_vlabel_attribute "
+              " WHERE vlabelname = :1 AND schemaname = :2 "
+                "AND primaryrank > :3",
+              CStringGetDatum(vname), CStringGetDatum(schemaname), Int32GetDatum(0)));
+  ColumnDef **colVec = (ColumnDef **)palloc0(sizeof(ColumnDef*)*primaryNum);
+
+  cqContext *pcqCtx = caql_beginscan(
+      caql_addrel(cqclr(&cqc), catalogRelation),
+      cql("SELECT * FROM skylon_vlabel_attribute "
+        " WHERE vlabelname = :1 AND schemaname = :2 "
+          "AND primaryrank > :3",
+          CStringGetDatum(vname), CStringGetDatum(schemaname), Int32GetDatum(0)));
+  HeapTuple attributeTuple;
+  List *newCols = NIL;
+  while (HeapTupleIsValid(attributeTuple = caql_getnext(pcqCtx))){
+    Form_skylon_vlabel_attribute att = (Form_skylon_vlabel_attribute) GETSTRUCT(attributeTuple);
+    ColumnDef *column = makeNode(ColumnDef);
+    Value *attstr = makeString(pstrdup(NameStr(att->attrname)));
+    column->colname = attstr->val.str;
+    Type type = typeidType(att->attrtypid);
+    column->typname = SystemTypeName(typeTypeName(type));
+    column->constraints = NULL;
+    column->is_local = true;
+    column->encoding = NULL;
+    column->typname->typid = att->attrtypid;
+    ReleaseType(type);
+    colVec[att->primaryrank - 1] = column;
+  }
+  for(int i=0;i<primaryNum;i++) {
+    newCols = lappend(newCols, colVec[i]);
+  }
+  pfree(colVec);
+  caql_endscan(pcqCtx);
+  heap_close(catalogRelation, RowExclusiveLock);
+  return newCols;
+}
+
+static List * edgePrimary(const char *schemaname, const char *ename) {
+  cqContext cqc;
+  Relation catalogRelation = heap_open(ElabelAttrRelationId, RowExclusiveLock);
+
+  int primaryNum = caql_getcount(
+            caql_addrel(cqclr(&cqc), catalogRelation),
+            cql("SELECT COUNT(*) FROM skylon_elabel_attribute "
+              " WHERE elabelname = :1 AND schemaname = :2 "
+                "AND primaryrank > :3",
+              CStringGetDatum(ename), CStringGetDatum(schemaname), Int32GetDatum(0)));
+  ColumnDef **colVec = (ColumnDef **)palloc0(sizeof(ColumnDef*)*primaryNum);
+
+  cqContext *pcqCtx = caql_beginscan(
+      caql_addrel(cqclr(&cqc), catalogRelation),
+      cql("SELECT * FROM skylon_elabel_attribute "
+        " WHERE elabelname = :1 AND schemaname = :2 "
+          "AND primaryrank > :3",
+          CStringGetDatum(ename), CStringGetDatum(schemaname), Int32GetDatum(0)));
+  HeapTuple attributeTuple;
+  List *newCols = NIL;
+  while (HeapTupleIsValid(attributeTuple = caql_getnext(pcqCtx))){
+    Form_skylon_vlabel_attribute att = (Form_skylon_vlabel_attribute) GETSTRUCT(attributeTuple);
+    ColumnDef *column = makeNode(ColumnDef);
+    Value *attstr = makeString(pstrdup(NameStr(att->attrname)));
+    column->colname = attstr->val.str;
+    Type type = typeidType(att->attrtypid);
+    column->typname = SystemTypeName(typeTypeName(type));
+    column->constraints = NULL;
+    column->is_local = true;
+    column->encoding = NULL;
+    column->typname->typid = att->attrtypid;
+    ReleaseType(type);
+    colVec[att->primaryrank - 1] = column;
+  }
+  for(int i=0;i<primaryNum;i++) {
+    newCols = lappend(newCols, colVec[i]);
+  }
+  pfree(colVec);
+  caql_endscan(pcqCtx);
+  heap_close(catalogRelation, RowExclusiveLock);
+  return newCols;
+}
+
+static int4 GetTypemod(Oid namespaceId, const char *relname, const char *attname) {
+  Oid relid = caql_getoid(
+      NULL,
+      cql("SELECT oid FROM pg_class "
+        " WHERE relname = :1 "
+        " AND relnamespace = :2 ",
+        CStringGetDatum(relname),
+        ObjectIdGetDatum(namespaceId)));
+
+  Relation attrelation = heap_open(AttributeRelationId, RowExclusiveLock);
+  cqContext cqc;
+  cqContext * pcqCtx = caql_addrel(cqclr(&cqc), attrelation);
+
+  HeapTuple atttup = caql_getfirst(
+      pcqCtx,
+      cql("SELECT * FROM pg_attribute "
+        " WHERE attrelid = :1 "
+        " AND attname = :2 ",
+        ObjectIdGetDatum(relid),
+        CStringGetDatum(attname)));
+
+
+  int4 typemod = ((Form_pg_attribute) GETSTRUCT(atttup))->atttypmod;
+
+
+  heap_freetuple(atttup);
+  heap_close(attrelation, RowExclusiveLock);
+  return typemod;
+}
+
+static bool IfMakePrimaryIndex(CreateGraphStmt *createGrStmt) {
+  Datum reloptions = transformRelOptions((Datum) 0, createGrStmt->options, true, false);
+  const char * default_keywords[] = {
+    "primaryindex"
+  };
+  char     *values[ARRAY_SIZE(default_keywords)];
+  parseRelOptions(reloptions, ARRAY_SIZE(default_keywords), default_keywords, values, false);
+  if(!values[0])
+    return false;
+  if((pg_strcasecmp(values[0], "true") == 0))
+    return true;
+  else
+    return false;
+}
+
+extern void
+DefineGraph(CreateGraphStmt *createGrStmt)
+{
+  /*
+   * Get tablespace, database, schema for the relation
+   */
+  RangeVar *rel = createGrStmt->graph;
+
+  // get tablespace name for the relation
+  Oid tablespace_id = (gp_upgrade_mode) ? DEFAULTTABLESPACE_OID : GetDefaultTablespace();
+  if (!OidIsValid(tablespace_id))
+  {
+    tablespace_id = get_database_dts(MyDatabaseId);
+  }
+
+  char *filespace_name = NULL;
+  GetFilespacePathForTablespace(tablespace_id, &filespace_name);
+
+  if (filespace_name) {
+    int len = strlen(filespace_name);
+    if ((len > 0) && (filespace_name[len - 1] == '/'))
+      filespace_name[len - 1] = '\0';
+  }
+
+  char *tablespace_name = get_tablespace_name(tablespace_id);
+
+  // get database name for the relation
+  char *database_name = rel->catalogname ? rel->catalogname : get_database_name(MyDatabaseId);
+
+  // get schema name for the relation
+  char *schema_name = get_namespace_name(RangeVarGetCreationNamespace(rel));
+
+  Oid namespaceId = get_namespace_oid(schema_name);
+  // get table name for the relation
+  char *table_name = rel->relname;
+
+  if (get_relname_relid(table_name, namespaceId) != InvalidOid)
+    ereport(ERROR,
+        (errcode(ERRCODE_DUPLICATE_TABLE),
+         errmsg("graph \"%s\" already exists",
+                table_name),
+         errOmitLocation(true)));
+
+  Oid relationId = CreateInCatalog(tablespace_id, namespaceId, rel, 0, RELKIND_VIEW);
+
+  if (filespace_name)
+    pfree(filespace_name);
+
+  InsertGraphEntry(table_name, schema_name);
+
+  ObjectAddress depender = {GraphRelationId, relationId, 0, };
+  ObjectAddress ref = {NamespaceRelationId, namespaceId, 0, };
+  recordDependencyOn(&depender, &ref, DEPENDENCY_NORMAL);
+
+  bool haspk = IfMakePrimaryIndex(createGrStmt);
+  ListCell *cell;
+  foreach(cell, createGrStmt->vlabels) {
+    Value *value = lfirst(cell);
+    cqContext cqc;
+    Relation vr = heap_open(VlabelRelationId, RowExclusiveLock);
+    if (caql_getcount(
+            caql_addrel(cqclr(&cqc), vr),
+            cql("SELECT COUNT(*) FROM skylon_vlabel "
+              " WHERE vlabelName = :1 AND schemaname = :2",
+              CStringGetDatum(value->val.str), CStringGetDatum(schema_name))) == 0)
+          ereport(ERROR,
+              (errcode(ERRCODE_DUPLICATE_TABLE),
+               errmsg("vertex \"%s\" does not exist",
+                      value->val.str),
+               errOmitLocation(true)));
+    heap_close(vr, RowExclusiveLock);
+
+    Relation vLabelAttRelation = heap_open(VlabelAttrRelationId, RowExclusiveLock);
+
+    IndexStmt *pkindex = NULL;
+    List *newCols = NIL;
+    char *primarykey = NULL;
+    List *distributedBy = NIL;
+    int2* indexkeys = NULL;
+    int indexcolnum = 0;
+    {
+      int primaryNum = caql_getcount(
+                caql_addrel(cqclr(&cqc), vLabelAttRelation),
+                cql("SELECT COUNT(*) FROM skylon_vlabel_attribute "
+                  " WHERE vlabelname = :1 AND schemaname = :2 "
+                    "AND primaryrank > :3",
+                  CStringGetDatum(value->val.str), CStringGetDatum(schema_name), Int32GetDatum(0)));
+      Value **distributedVec = (Value **)palloc0(sizeof(Value*)*primaryNum);
+
+      cqContext *pcqCtx = caql_beginscan(
+          caql_addrel(cqclr(&cqc), vLabelAttRelation),
+          cql("SELECT * FROM skylon_vlabel_attribute "
+            " WHERE vlabelname = :1 AND schemaname = :2 "
+              "AND primaryrank > :3",
+              CStringGetDatum(value->val.str), CStringGetDatum(schema_name), Int32GetDatum(0)));
+      HeapTuple attributeTuple;
+      while (HeapTupleIsValid(attributeTuple = caql_getnext(pcqCtx))){
+        Form_skylon_vlabel_attribute att = (Form_skylon_vlabel_attribute) GETSTRUCT(attributeTuple);
+        distributedVec[att->primaryrank - 1] = makeString(pstrdup(NameStr(att->attrname)));
+        if(haspk) {
+          if(!indexkeys)
+            indexkeys = palloc0(primaryNum * sizeof(int2));
+          indexkeys[att->primaryrank - 1] = (int2)att->rank;
+          indexcolnum = primaryNum;
+        }
+      }
+      for(int i = 0; i < primaryNum; i++) {
+        if(haspk) {
+          if(!pkindex)
+            pkindex = makeNode(IndexStmt);
+          IndexElem* indexElem = makeNode(IndexElem);
+          indexElem->name = distributedVec[i]->val.str;;
+          pkindex->indexParams = lappend(pkindex->indexParams, indexElem);
+        }
+        distributedBy = lappend(distributedBy, (Node *)distributedVec[i]);
+      }
+      caql_endscan(pcqCtx);
+      pfree(distributedVec);
+    }
+    int colNum = caql_getcount(
+              caql_addrel(cqclr(&cqc), vLabelAttRelation),
+              cql("SELECT COUNT(*) FROM skylon_vlabel_attribute "
+                " WHERE vlabelname = :1 AND schemaname = :2",
+                CStringGetDatum(value->val.str), CStringGetDatum(schema_name)));
+    ColumnDef **colVec = (ColumnDef **)palloc0(sizeof(ColumnDef*)*colNum);
+    cqContext *pcqCtx = caql_beginscan(
+        caql_addrel(cqclr(&cqc), vLabelAttRelation),
+        cql("SELECT * FROM skylon_vlabel_attribute "
+          " WHERE vlabelname = :1 AND schemaname = :2",
+          CStringGetDatum(value->val.str), CStringGetDatum(schema_name)));
+    HeapTuple attributeTuple;
+    while (HeapTupleIsValid(attributeTuple = caql_getnext(pcqCtx))) {
+      Form_skylon_vlabel_attribute att = (Form_skylon_vlabel_attribute) GETSTRUCT(attributeTuple);
+      ColumnDef *column = makeNode(ColumnDef);
+      Value *attstr = makeString(pstrdup(NameStr(att->attrname)));
+      column->colname = attstr->val.str;
+      Type type = typeidType(att->attrtypid);
+      column->typname = SystemTypeName(typeTypeName(type));
+      column->constraints = NULL;
+      column->is_local = true;
+      column->encoding = NULL;
+      column->typname->typid = att->attrtypid;
+      if(column->typname->typid == NUMERICOID)
+        column->typname->typmod = GetTypemod(namespaceId, value->val.str, column->colname);
+      ReleaseType(type);
+      colVec[att->rank - 1] = column;
+    }
+    for(int i = 0; i < colNum; i++)
+    {
+      newCols = lappend(newCols, colVec[i]);
+    }
+    pfree(colVec);
+    caql_endscan(pcqCtx);
+    heap_close(vLabelAttRelation, RowExclusiveLock);
+    RangeVar *relationName = makeNode(RangeVar);
+    relationName->catalogname = database_name;
+    relationName->schemaname = schema_name;
+    relationName->relname = graphVertexTableName(table_name, value->val.str);
+    if(strcmp(createGrStmt->format,"magmaap") == 0)
+    {
+      CreateExternalStmt *externalStmt = makeNode(CreateExternalStmt);
+      externalStmt->base.relKind = RELKIND_RELATION;
+      externalStmt->base.relation = relationName;
+      externalStmt->base.tableElts = newCols;
+      externalStmt->base.oncommit = ONCOMMIT_NOOP;
+      externalStmt->base.distributedBy = distributedBy;
+      ExtTableTypeDesc *extDesc = makeNode(ExtTableTypeDesc);
+      extDesc->exttabletype = EXTTBL_TYPE_UNKNOWN;
+      externalStmt->exttypedesc = extDesc;
+      externalStmt->format = pstrdup(createGrStmt->format);
+      externalStmt->iswritable = TRUE;
+      if(pkindex) {
+        Constraint *cons = makeNode(Constraint);
+        cons->contype = CONSTR_PRIMARY;
+        ListCell *cell;
+        foreach(cell, pkindex->indexParams) {
+          IndexElem *ele = (IndexElem *)lfirst(cell);
+          cons->keys = lappend(cons->keys, makeString(ele->name));
+        }
+        externalStmt->base.tableElts = lappend(externalStmt->base.tableElts, cons);
+      }
+      ParseState *pstate = make_parsestate(NULL);
+      pstate->p_next_resno = 1;
+      List *extras_before = NIL;
+      List *extras_after = NIL;
+      externalStmt = transformCreateExternalStmtImpl(pstate,
+                                                   externalStmt,
+                                                &extras_before,
+                                                &extras_before)->utilityStmt;
+      DefineExternalRelation(externalStmt);
+    }
+    else
+    {
+      if(strcmp(createGrStmt->format,"orc") && strcmp(createGrStmt->format,"heap"))
+        ereport(ERROR,
+               (errcode(ERRCODE_UNDEFINED_OBJECT),
+               errmsg("unsupported format %s ",
+                      createGrStmt->format)));
+      CreateStmt *createStmt = makeNode(CreateStmt);
+      createStmt->base.relKind = RELKIND_RELATION;
+      createStmt->base.relation = relationName;
+      createStmt->base.tableElts = newCols;
+      createStmt->base.oncommit = ONCOMMIT_NOOP;
+      createStmt->base.distributedBy = distributedBy;
+      if(strcmp(createGrStmt->format,"orc") == 0)
+        createStmt->base.options = lappend(lappend(list_make1(makeDefElem("appendonly", (Node *)makeString(pstrdup("true")))),
+                                           makeDefElem("OIDS", (Node *)makeString(pstrdup("FALSE")))),
+                                           makeDefElem("ORIENTATION", (Node *)makeString(pstrdup("orc"))));
+      else
+        createStmt->base.options = list_make1(makeDefElem("appendonly", (Node *)makeString(pstrdup("false"))));
+      if(strcmp(createGrStmt->format, "heap") == 0)
+        createStmt->base.tablespacename = "pg_default";
+      if(pkindex) {
+        Constraint *cons = makeNode(Constraint);
+        cons->contype = CONSTR_PRIMARY;
+        ListCell *cell;
+        foreach(cell, pkindex->indexParams) {
+          IndexElem *ele = (IndexElem *)lfirst(cell);
+          cons->keys = lappend(cons->keys, makeString(ele->name));
+        }
+        createStmt->base.tableElts = lappend(createStmt->base.tableElts, cons);
+      }
+      ParseState *pstate = make_parsestate(NULL);
+      pstate->p_next_resno = 1;
+      List *extras_before = NIL;
+      List *extras_after = NIL;
+      createStmt = transformCreateStmt(pstate,
+                                       createStmt,
+                                    &extras_before,
+                                    &extras_before)->utilityStmt;
+      ProcessUtility((Node *)createStmt, "", NULL, TRUE, NULL, NULL);
+    }
+    if(haspk) {
+      Oid eleid = caql_getoid_only(
+          NULL,
+          NULL,
+          cql("SELECT oid FROM pg_class "
+            " WHERE relname = :1 and relnamespace = :2",
+            CStringGetDatum(value->val.str),
+            ObjectIdGetDatum(namespaceId)));
+      char *classchar1 = (char*)palloc0(7 + VARHDRSZ);
+      char *classchar2 = (char*)palloc0(7 + VARHDRSZ);
+      pg_ltoa((int32)relationId, classchar1);
+      pg_ltoa((int32)eleid, classchar2);
+      char *indexname = palloc0(sizeof(char)*(strlen(classchar1) + strlen(classchar2) + 5 + 1));
+      sprintf(indexname, "index%s%s", classchar1, classchar2);
+      RangeVar *indexrel = makeNode(RangeVar);
+      indexrel->catalogname = database_name;
+      indexrel->schemaname = schema_name;
+      indexrel->relname = indexname;
+      Oid relid = caql_getoid_only(
+          NULL,
+          NULL,
+          cql("SELECT oid FROM pg_class "
+            " WHERE relname = :1 and relnamespace = :2",
+            CStringGetDatum(relationName->relname),
+            ObjectIdGetDatum(namespaceId)));
+      IndexStmt *indexstmt = makeNode(IndexStmt);
+      indexstmt->idxname = indexname;
+      indexstmt->accessMethod = "btree";
+      indexstmt->indexParams = pkindex->indexParams;
+      indexstmt->unique = true;
+      indexstmt->primary = true;
+      indexstmt->isconstraint = true;
+      DefineIndex(relid,
+            indexstmt->idxname,
+            InvalidOid,
+            indexstmt->accessMethod,
+            indexstmt->tableSpace,
+            indexstmt->indexParams,
+            (Expr *) indexstmt->whereClause,
+            indexstmt->rangetable,
+            indexstmt->options,
+            indexstmt->unique,
+            indexstmt->primary,
+            indexstmt->isconstraint,
+            false,
+            true,
+            false,
+            true,
+            indexstmt->concurrent,
+            false,
+            indexstmt);
+      InsertSkylonIndexEntry(schema_name , table_name,
+                             value->val.str, 'd', indexname,
+                             indexkeys, indexcolnum, NULL, 0);
+    }
+    InsertGraphVlabelEntry(schema_name, table_name, value->val.str, RangeVarGetRelid(relationName, false, false));
+  }
+
+  foreach(cell, createGrStmt->elabels) {
+    Value *value = lfirst(cell);
+    cqContext cqc;
+    Relation er = heap_open(ElabelRelationId, RowExclusiveLock);
+    if (caql_getcount(
+            caql_addrel(cqclr(&cqc), er),
+            cql("SELECT COUNT(*) FROM skylon_elabel "
+              " WHERE elabelName = :1 AND schemaname = :2",
+              CStringGetDatum(value->val.str), CStringGetDatum(schema_name))) == 0)
+          ereport(ERROR,
+              (errcode(ERRCODE_DUPLICATE_TABLE),
+               errmsg("edge \"%s\" does not exist",
+                      value->val.str),
+               errOmitLocation(true)));
+    heap_close(er, RowExclusiveLock);
+    NameData name;
+    snprintf(NameStr(name), NAMEDATALEN, "%s", value->val.str);
+    Relation ElabelRelation = heap_open(ElabelRelationId, RowExclusiveLock);
+    HeapTuple elabelTuple= caql_getfirst(caql_addrel(cqclr(&cqc), ElabelRelation),
+                                         cql("SELECT * FROM skylon_elabel"
+        " WHERE elabelname = :1 AND schemaname = :2",
+        NameGetDatum(&name), CStringGetDatum(schema_name)));
+    Form_skylon_elabel elabel = (Form_skylon_elabel) GETSTRUCT(elabelTuple);
+
+    Relation ElabelAttrRelation = heap_open(ElabelAttrRelationId, RowExclusiveLock);
+
+    IndexStmt *pkindex = NULL;
+    List *newCols = NIL;
+    List *distributedBy = NIL;
+    int2* indexkeys = NULL;
+    int indexcolnum = 0;
+    {
+      List *cols=vertexPrimary(schema_name, elabel->fromvlabel.data);
+      ListCell *lc;
+      foreach(lc, cols) {
+        ColumnDef *col= (ColumnDef*)lfirst(lc);
+        ColumnDef *column = makeNode(ColumnDef);
+        column->colname = palloc0(sizeof(char)*(4 + 1 + strlen(col->colname)));
+        memcpy(column->colname, "src_", 4 * sizeof(char));
+        memcpy(column->colname + 4, col->colname, strlen(col->colname) * sizeof(char));
+        column->colname[4 + strlen(col->colname)] = '\0';
+        Type type = typeidType(col->typname->typid);
+        column->typname = SystemTypeName(typeTypeName(type));
+        column->typname->typid = col->typname->typid;
+        if(column->typname->typid == NUMERICOID)
+                column->typname->typmod = GetTypemod(namespaceId, elabel->fromvlabel.data, col->colname);
+        column->constraints = NULL;
+        column->is_local = true;
+        column->encoding = NULL;
+        ReleaseType(type);
+        newCols = lappend(newCols, column);
+        if(haspk) {
+          if(!pkindex)
+            pkindex = makeNode(IndexStmt);
+          IndexElem* indexElem = makeNode(IndexElem);
+          indexElem->name = column->colname;
+          pkindex->indexParams = lappend(pkindex->indexParams, indexElem);
+        }
+        distributedBy = lappend(distributedBy, (Node *)makeString(column->colname));
+      }
+    }
+    {
+      List *cols=vertexPrimary(schema_name, elabel->tovlabel.data);
+      ListCell *lc;
+      foreach(lc, cols) {
+        ColumnDef *col= (ColumnDef*)lfirst(lc);
+        ColumnDef *column = makeNode(ColumnDef);
+        column->colname = palloc0(sizeof(char)*(4 + 1 + strlen(col->colname)));
+        memcpy(column->colname, "dst_", 4 * sizeof(char));
+        memcpy(column->colname + 4, col->colname, strlen(col->colname) * sizeof(char));
+        column->colname[4 + strlen(col->colname)] = '\0';
+        Type type = typeidType(col->typname->typid);
+        column->typname = SystemTypeName(typeTypeName(type));
+        column->typname->typid = col->typname->typid;
+        if(column->typname->typid == NUMERICOID)
+          column->typname->typmod = GetTypemod(namespaceId, elabel->tovlabel.data, col->colname);
+        column->constraints = NULL;
+        column->is_local = true;
+        column->encoding = NULL;
+        ReleaseType(type);
+        newCols = lappend(newCols, column);
+        if(haspk) {
+          if(!pkindex)
+            pkindex = makeNode(IndexStmt);
+          IndexElem* indexElem = makeNode(IndexElem);
+          indexElem->name = column->colname;
+          pkindex->indexParams = lappend(pkindex->indexParams, indexElem);
+        }
+        distributedBy = lappend(distributedBy, (Node *)makeString(column->colname));
+      }
+    }
+    {
+      int primaryNum = caql_getcount(
+                caql_addrel(cqclr(&cqc), ElabelAttrRelation),
+                cql("SELECT COUNT(*) FROM skylon_elabel_attribute "
+                  " WHERE elabelname = :1 AND schemaname = :2 "
+                    "AND primaryrank > :3",
+                    CStringGetDatum(value->val.str), CStringGetDatum(schema_name), Int32GetDatum(0)));
+      Value **distributedVec = (Value **)palloc0(sizeof(Value*)*primaryNum);
+
+      cqContext *pcqCtx = caql_beginscan(
+          caql_addrel(cqclr(&cqc), ElabelAttrRelation),
+          cql("SELECT * FROM skylon_elabel_attribute "
+            " WHERE elabelname = :1 AND schemaname = :2 "
+              "AND primaryrank > :3",
+              CStringGetDatum(value->val.str), CStringGetDatum(schema_name), Int32GetDatum(0)));
+      HeapTuple attributeTuple;
+      while (HeapTupleIsValid(attributeTuple = caql_getnext(pcqCtx))){
+        Form_skylon_elabel_attribute att = (Form_skylon_elabel_attribute) GETSTRUCT(attributeTuple);
+        distributedVec[att->primaryrank - 1] = makeString(pstrdup(NameStr(att->attrname)));
+        if(haspk) {
+          if(!indexkeys) {
+            indexkeys = palloc0(primaryNum * sizeof(int2));
+            indexcolnum = primaryNum;
+          }
+          indexkeys[att->primaryrank - 1] = (int2) att->rank;
+        }
+      }
+      for(int i = 0; i < primaryNum; i++) {
+        if(haspk) {
+          if(!pkindex)
+            pkindex = makeNode(IndexStmt);
+          IndexElem* indexElem = makeNode(IndexElem);
+          indexElem->name = distributedVec[i]->val.str;
+          pkindex->indexParams = lappend(pkindex->indexParams, indexElem);
+        }
+        distributedBy = lappend(distributedBy, (Node *)distributedVec[i]);
+      }
+      pfree(distributedVec);
+      caql_endscan(pcqCtx);
+    }
+    {
+      int colNum = caql_getcount(
+                caql_addrel(cqclr(&cqc), ElabelAttrRelation),
+                cql("SELECT COUNT(*) FROM skylon_elabel_attribute "
+                  " WHERE elabelname = :1 AND schemaname = :2",
+                  CStringGetDatum(value->val.str), CStringGetDatum(schema_name)));
+      ColumnDef **colVec = (ColumnDef **)palloc0(sizeof(ColumnDef*)*colNum);
+      cqContext *pcqCtx = caql_beginscan(
+          caql_addrel(cqclr(&cqc), ElabelAttrRelation),
+          cql("SELECT * FROM skylon_elabel_attribute "
+            " WHERE elabelname = :1 AND schemaname = :2",
+            CStringGetDatum(value->val.str), CStringGetDatum(schema_name)));
+      HeapTuple attributeTuple;
+      while (HeapTupleIsValid(attributeTuple = caql_getnext(pcqCtx))) {
+        Form_skylon_elabel_attribute att = (Form_skylon_elabel_attribute) GETSTRUCT(attributeTuple);
+        ColumnDef *column = makeNode(ColumnDef);
+        Value *attstr = makeString(pstrdup(NameStr(att->attrname)));
+        column->colname = attstr->val.str;
+        Type type = typeidType(att->attrtypid);
+        column->typname = SystemTypeName(typeTypeName(type));
+        column->constraints = NULL;
+        column->is_local = true;
+        column->encoding = NULL;
+        column->typname->typid = att->attrtypid;
+        if(column->typname->typid == NUMERICOID)
+          column->typname->typmod = GetTypemod(namespaceId, value->val.str, column->colname);
+        ReleaseType(type);
+        colVec[att->rank - 1] = column;
+      }
+      for(int i = 0; i < colNum; i++) {
+        newCols = lappend(newCols, colVec[i]);
+      }
+      pfree(colVec);
+      caql_endscan(pcqCtx);
+    }
+    heap_close(ElabelRelation, RowExclusiveLock);
+    heap_close(ElabelAttrRelation, RowExclusiveLock);
+
+    RangeVar *relationName = makeNode(RangeVar);
+    relationName->catalogname = database_name;
+    relationName->schemaname = schema_name;
+    relationName->relname = graphEdgeTableName(table_name, value->val.str);
+    if(strcmp(createGrStmt->format, "magmaap") == 0)
+    {
+      CreateExternalStmt *externalStmt = makeNode(CreateExternalStmt);
+      externalStmt->base.relKind = RELKIND_RELATION;
+      externalStmt->base.relation = relationName;
+      externalStmt->base.tableElts = newCols;
+      externalStmt->base.oncommit = ONCOMMIT_NOOP;
+      externalStmt->base.distributedBy = distributedBy;
+      ExtTableTypeDesc *extDesc = makeNode(ExtTableTypeDesc);
+      extDesc->exttabletype = EXTTBL_TYPE_UNKNOWN;
+      externalStmt->exttypedesc = extDesc;
+      externalStmt->format = pstrdup(createGrStmt->format);
+      externalStmt->iswritable = TRUE;
+      if(pkindex) {
+        Constraint *cons = makeNode(Constraint);
+        cons->contype = CONSTR_PRIMARY;
+        ListCell *cell;
+        foreach(cell, pkindex->indexParams) {
+          IndexElem *ele = (IndexElem *)lfirst(cell);
+          cons->keys = lappend(cons->keys, makeString(ele->name));
+        }
+        externalStmt->base.tableElts = lappend(externalStmt->base.tableElts, cons);
+      }
+      ParseState *pstate = make_parsestate(NULL);
+      pstate->p_next_resno = 1;
+      List *extras_before = NIL;
+      List *extras_after = NIL;
+      externalStmt = transformCreateExternalStmtImpl(pstate,
+                                                   externalStmt,
+                                                &extras_before,
+                                                &extras_before)->utilityStmt;
+      DefineExternalRelation(externalStmt);
+    }
+    else
+    {
+      CreateStmt *createStmt = makeNode(CreateStmt);
+      createStmt->base.relKind = RELKIND_RELATION;
+      createStmt->base.relation = relationName;
+      createStmt->base.tableElts = newCols;
+      createStmt->base.oncommit = ONCOMMIT_NOOP;
+      createStmt->base.distributedBy = distributedBy;
+      if(strcmp(createGrStmt->format,"orc") == 0)
+        createStmt->base.options = lappend(lappend(list_make1(makeDefElem("appendonly", (Node *)makeString(pstrdup("true")))),
+                                           makeDefElem("OIDS", (Node *)makeString(pstrdup("FALSE")))),
+                                           makeDefElem("ORIENTATION", (Node *)makeString(pstrdup("orc"))));
+      else
+        createStmt->base.options = list_make1(makeDefElem("appendonly", (Node *)makeString(pstrdup("false"))));
+      if(strcmp(createGrStmt->format, "heap") == 0)
+        createStmt->base.tablespacename = "pg_default";
+      ParseState *pstate = make_parsestate(NULL);
+      pstate->p_next_resno = 1;
+      List *extras_before = NIL;
+      List *extras_after = NIL;
+      if(pkindex) {
+        Constraint *cons = makeNode(Constraint);
+        cons->contype = CONSTR_PRIMARY;
+        ListCell *cell;
+        foreach(cell, pkindex->indexParams) {
+          IndexElem *ele = (IndexElem *)lfirst(cell);
+          cons->keys = lappend(cons->keys, makeString(ele->name));
+        }
+        createStmt->base.tableElts = lappend(createStmt->base.tableElts, cons);
+      }
+      createStmt = transformCreateStmt(pstate,
+                                       createStmt,
+                                    &extras_before,
+                                    &extras_before)->utilityStmt;
+      ProcessUtility((Node *)createStmt, "", NULL, TRUE, NULL, NULL);
+    }
+    if(haspk) {
+      Oid eleid = caql_getoid_only(
+          NULL,
+          NULL,
+          cql("SELECT oid FROM pg_class "
+            " WHERE relname = :1 and relnamespace = :2",
+            CStringGetDatum(value->val.str),
+            ObjectIdGetDatum(namespaceId)));
+      char *classchar1 = (char*)palloc0(7 + VARHDRSZ);
+      char *classchar2 = (char*)palloc0(7 + VARHDRSZ);
+      pg_ltoa((int32)relationId, classchar1);
+      pg_ltoa((int32)eleid, classchar2);
+      char *indexname = palloc0(sizeof(char)*(strlen(classchar1) + strlen(classchar2) + 5 + 1));
+      sprintf(indexname, "index%s%s", classchar1, classchar2);
+      RangeVar *indexrel = makeNode(RangeVar);
+      indexrel->catalogname = database_name;
+      indexrel->schemaname = schema_name;
+      indexrel->relname = indexname;
+      Oid relid = caql_getoid_only(
+          NULL,
+          NULL,
+          cql("SELECT oid FROM pg_class "
+            " WHERE relname = :1 and relnamespace = :2",
+            CStringGetDatum(relationName->relname),
+            ObjectIdGetDatum(namespaceId)));
+      IndexStmt *indexstmt = makeNode(IndexStmt);
+      indexstmt->idxname = indexname;
+      indexstmt->accessMethod = "btree";
+      indexstmt->indexParams = pkindex->indexParams;
+      indexstmt->unique = true;
+      indexstmt->primary = true;
+      indexstmt->isconstraint = true;
+      DefineIndex(relid,
+            indexstmt->idxname,
+            InvalidOid,
+            indexstmt->accessMethod,
+            indexstmt->tableSpace,
+            indexstmt->indexParams,
+            (Expr *) indexstmt->whereClause,
+            indexstmt->rangetable,
+            indexstmt->options,
+            indexstmt->unique,
+            indexstmt->primary,
+            indexstmt->isconstraint,
+            false,
+            true,
+            false,
+            true,
+            indexstmt->concurrent,
+            false,
+            indexstmt);
+      InsertSkylonIndexEntry(schema_name , table_name,
+                             value->val.str, 'd', indexname,
+                             indexkeys, indexcolnum, NULL, 0);
+    }
+    InsertGraphElabelEntry(schema_name, table_name, value->val.str, RangeVarGetRelid(relationName, false, false));
+  }
+}
+
 /* ----------------------------------------------------------------
 *		DefineExternalRelation
 *				Creates a new external relation.
@@ -2063,6 +3155,324 @@ MetaTrackValidKindNsp(Form_pg_class rd_rel)
 			&& (!(isAnyTempNamespace(nsp))));
 }
 
+void RemoveVlabelByOid(Oid relid) {
+  RangeVar *rel = RelidGetRangeVar(relid);
+  RemoveVlabel(rel, false);
+}
+
+void
+RemoveVlabel(RangeVar *rel, bool missing_ok) {
+  // get database name for the relation
+  char *database_name = rel->catalogname ? rel->catalogname : get_database_name(MyDatabaseId);
+
+  // get schema name for the relation
+  char *schema_name = get_namespace_name(RangeVarGetCreationNamespace(rel));
+  {
+    Relation  skylon_graph_vlabel_rel;
+    cqContext cqc;
+
+    skylon_graph_vlabel_rel = heap_open(GraphVlabelRelationId, RowExclusiveLock);
+
+    if (0 < caql_getcount(
+          caql_addrel(cqclr(&cqc), skylon_graph_vlabel_rel),
+          cql("SELECT COUNT(*) FROM skylon_graph_vlabel "
+            " WHERE vlabelname = :1 AND schemaname = :2",
+            CStringGetDatum(rel->relname), CStringGetDatum(schema_name))))
+    {
+      ereport(ERROR,
+          (errcode(ERRCODE_UNDEFINED_OBJECT),
+           errmsg("vlabel object name \"%s\" is still referenced to graphs",
+                  rel->relname)));
+    }
+    heap_close(skylon_graph_vlabel_rel, RowExclusiveLock);
+  }
+
+  {
+    Relation  skylon_vlabel_rel;
+    cqContext cqc;
+
+    skylon_vlabel_rel = heap_open(VlabelRelationId, RowExclusiveLock);
+
+    if (0 == caql_getcount(
+          caql_addrel(cqclr(&cqc), skylon_vlabel_rel),
+          cql("DELETE FROM skylon_vlabel "
+            " WHERE vlabelname = :1 AND schemaname = :2",
+            CStringGetDatum(rel->relname), CStringGetDatum(schema_name))))
+    {
+      if(!missing_ok)
+        ereport(ERROR,
+            (errcode(ERRCODE_UNDEFINED_OBJECT),
+             errmsg("vlabel object name \"%s\" does not exist in schema %s",
+                    rel->relname, schema_name)));
+      else
+        ereport(NOTICE,
+          (errcode(ERRCODE_UNDEFINED_OBJECT),
+           errmsg("vlabel object name \"%s\" does not exist in schema %s, skipping",
+                  rel->relname, schema_name),
+                     errOmitLocation(true)));
+      heap_close(skylon_vlabel_rel, NoLock);
+      return;
+    }
+    heap_close(skylon_vlabel_rel, RowExclusiveLock);
+  }
+  {
+    Relation  skylon_vlabel_attribute_rel;
+    cqContext cqc;
+
+    skylon_vlabel_attribute_rel = heap_open(VlabelAttrRelationId, RowExclusiveLock);
+
+    caql_getcount(
+              caql_addrel(cqclr(&cqc), skylon_vlabel_attribute_rel),
+              cql("DELETE FROM skylon_vlabel_attribute "
+                " WHERE vlabelname = :1 AND schemaname = :2",
+                CStringGetDatum(rel->relname), CStringGetDatum(schema_name)));
+    heap_close(skylon_vlabel_attribute_rel, NoLock);
+  }
+  {
+    RangeVar   *vlabel = makeRangeVar(NULL, NULL, NULL, -1);
+    vlabel->catalogname = database_name;
+    vlabel->schemaname = schema_name;
+    vlabel->relname = rel->relname;
+    Oid vid = RangeVarGetRelid(vlabel, true, false /*allowHcatalog*/);
+    DeleteRelationTuple(vid);
+    deleteDependencyRecordsFor(VlabelRelationId, vid);
+  }
+}
+
+void RemoveElabelByOid(Oid relid) {
+  RangeVar *rel = RelidGetRangeVar(relid);
+  RemoveElabel(rel, false);
+}
+
+void
+RemoveElabel(RangeVar *rel, bool missing_ok) {
+  // get database name for the relation
+  char *database_name = rel->catalogname ? rel->catalogname : get_database_name(MyDatabaseId);
+
+  // get schema name for the relation
+  char *schema_name = get_namespace_name(RangeVarGetCreationNamespace(rel));
+  {
+    Relation  skylon_graph_elabel_rel;
+    cqContext cqc;
+
+    skylon_graph_elabel_rel = heap_open(GraphElabelRelationId, RowExclusiveLock);
+
+    if (0 < caql_getcount(
+          caql_addrel(cqclr(&cqc), skylon_graph_elabel_rel),
+          cql("SELECT COUNT(*) FROM skylon_graph_elabel "
+            " WHERE elabelname = :1 AND schemaname = :2",
+            CStringGetDatum(rel->relname), CStringGetDatum(schema_name))))
+    {
+      ereport(ERROR,
+          (errcode(ERRCODE_UNDEFINED_OBJECT),
+           errmsg("elabel object name \"%s\" is still referenced to graphs",
+                  rel->relname)));
+    }
+    heap_close(skylon_graph_elabel_rel, RowExclusiveLock);
+  }
+  {
+    Relation  skylon_elabel_rel;
+    cqContext cqc;
+
+    skylon_elabel_rel = heap_open(ElabelRelationId, RowExclusiveLock);
+
+    if (0 == caql_getcount(
+          caql_addrel(cqclr(&cqc), skylon_elabel_rel),
+          cql("DELETE FROM skylon_elabel "
+            " WHERE elabelname = :1 AND schemaname = :2",
+            CStringGetDatum(rel->relname), CStringGetDatum(schema_name))))
+    {
+      if(!missing_ok)
+        ereport(ERROR,
+            (errcode(ERRCODE_UNDEFINED_OBJECT),
+             errmsg("elabel object name \"%s\" does not exist in schema %s",
+                    rel->relname, schema_name)));
+      else
+        ereport(NOTICE,
+          (errcode(ERRCODE_UNDEFINED_OBJECT),
+           errmsg("elabel object name \"%s\" does not exist in schema %s, skipping",
+                  rel->relname, schema_name),
+                     errOmitLocation(true)));
+      heap_close(skylon_elabel_rel, RowExclusiveLock);
+      return;
+    }
+    heap_close(skylon_elabel_rel, RowExclusiveLock);
+  }
+  {
+    Relation  skylon_elabel_attribute_rel;
+    cqContext cqc;
+
+    skylon_elabel_attribute_rel = heap_open(ElabelAttrRelationId, RowExclusiveLock);
+
+    caql_getcount(
+              caql_addrel(cqclr(&cqc), skylon_elabel_attribute_rel),
+              cql("DELETE FROM skylon_elabel_attribute "
+                " WHERE elabelName = :1 AND schemaname = :2",
+                CStringGetDatum(rel->relname), CStringGetDatum(schema_name)));
+    heap_close(skylon_elabel_attribute_rel, NoLock);
+  }
+  {
+    RangeVar   *elabel = makeRangeVar(NULL, NULL, NULL, -1);
+    elabel->catalogname = database_name;
+    elabel->schemaname = schema_name;
+    elabel->relname = rel->relname;
+    Oid eid = RangeVarGetRelid(elabel, true, false /*allowHcatalog*/);
+    DeleteRelationTuple(eid);
+    deleteDependencyRecordsFor(ElabelRelationId, eid);
+  }
+}
+
+void RemoveGraphByOid(Oid relid, bool ifRemoveTable) {
+  RangeVar *rel = RelidGetRangeVar(relid);
+  RemoveGraph(rel, false, ifRemoveTable);
+}
+
+void RemoveGraph(RangeVar *rel, bool missing_ok, bool ifRemoveTable) {
+  // get database name for the relation
+  char *database_name = rel->catalogname ? rel->catalogname : get_database_name(MyDatabaseId);
+
+  // get schema name for the relation
+  char *schema_name = get_namespace_name(RangeVarGetCreationNamespace(rel));
+
+  {
+    Relation  skylon_graph_rel;
+    cqContext cqc;
+
+    skylon_graph_rel = heap_open(GraphRelationId, RowExclusiveLock);
+
+    if (0 == caql_getcount(
+          caql_addrel(cqclr(&cqc), skylon_graph_rel),
+          cql("DELETE FROM skylon_graph "
+            " WHERE graphname = :1 AND schemaname = :2",
+            CStringGetDatum(rel->relname), CStringGetDatum(schema_name))))
+    {
+      if(!missing_ok)
+        ereport(ERROR,
+            (errcode(ERRCODE_UNDEFINED_OBJECT),
+             errmsg("graph object name \"%s\" does not exist in schema %s",
+                    rel->relname, schema_name)));
+      else
+        ereport(NOTICE,
+          (errcode(ERRCODE_UNDEFINED_OBJECT),
+           errmsg("graph object name \"%s\" does not exist in schema %s, skipping",
+                  rel->relname, schema_name),
+                     errOmitLocation(true)));
+      heap_close(skylon_graph_rel, RowExclusiveLock);
+      return;
+    }
+    heap_close(skylon_graph_rel, RowExclusiveLock);
+  }
+
+  if(ifRemoveTable) {
+    Relation  skylon_graph_vlabel_rel;
+    cqContext cqc;
+
+    skylon_graph_vlabel_rel = heap_open(GraphVlabelRelationId, RowExclusiveLock);
+    cqContext *pcqCtx = caql_beginscan(
+        caql_addrel(cqclr(&cqc), skylon_graph_vlabel_rel),
+        cql("SELECT * FROM skylon_graph_vlabel "
+                        " WHERE graphname = :1 AND schemaname = :2",
+                        CStringGetDatum(rel->relname), CStringGetDatum(schema_name)));
+    HeapTuple vlabelTuple = NULL;
+    while (HeapTupleIsValid(vlabelTuple = caql_getnext(pcqCtx))) {
+      Form_skylon_graph_vlabel tuple = (Form_skylon_graph_vlabel) GETSTRUCT(vlabelTuple);
+      char *vname = pstrdup(NameStr(tuple->vlabelname));
+      DropStmt *dropStmt = makeNode(DropStmt);
+      dropStmt->removeType = OBJECT_VLABEL;
+      dropStmt->missing_ok = TRUE;
+      dropStmt->objects = NIL;
+      dropStmt->behavior = DROP_RESTRICT;
+      RangeVar   *vlabel = makeRangeVar(NULL, NULL, NULL, -1);
+      vlabel->catalogname = database_name;
+      vlabel->schemaname = schema_name;
+      vlabel->relname = graphVertexTableName(rel->relname, vname);
+      if (OidIsValid(RangeVarGetRelid(vlabel, true, false)))
+      {
+        RemoveRelation(vlabel, dropStmt->behavior, dropStmt, RELKIND_RELATION);
+      }
+    }
+    caql_endscan(pcqCtx);
+    heap_close(skylon_graph_vlabel_rel, RowExclusiveLock);
+  }
+  if(ifRemoveTable) {
+    Relation  skylon_graph_elabel_rel;
+    cqContext cqc;
+
+    skylon_graph_elabel_rel = heap_open(GraphElabelRelationId, RowExclusiveLock);
+    cqContext *pcqCtx = caql_beginscan(
+        caql_addrel(cqclr(&cqc), skylon_graph_elabel_rel),
+        cql("SELECT * FROM skylon_graph_elabel "
+                        " WHERE graphname = :1 AND schemaname = :2",
+                        CStringGetDatum(rel->relname), CStringGetDatum(schema_name)));
+    HeapTuple elabelTuple = NULL;
+    while (HeapTupleIsValid(elabelTuple = caql_getnext(pcqCtx))) {
+      Form_skylon_graph_elabel tuple = (Form_skylon_graph_elabel) GETSTRUCT(elabelTuple);
+      char *ename = pstrdup(NameStr(tuple->elabelname));
+      DropStmt *dropStmt = makeNode(DropStmt);
+      dropStmt->removeType = OBJECT_VLABEL;
+      dropStmt->missing_ok = TRUE;
+      dropStmt->objects = NIL;
+      dropStmt->behavior = DROP_RESTRICT;
+      RangeVar   *elabel = makeRangeVar(NULL, NULL, NULL, -1);
+      elabel->catalogname = database_name;
+      elabel->schemaname = schema_name;
+      elabel->relname = graphEdgeTableName(rel->relname, ename);;
+      if (OidIsValid(RangeVarGetRelid(elabel, true, false)))
+      {
+        RemoveRelation(elabel, dropStmt->behavior, dropStmt, RELKIND_RELATION);
+      }
+    }
+    caql_endscan(pcqCtx);
+    heap_close(skylon_graph_elabel_rel, RowExclusiveLock);
+  }
+  {
+    Relation  skylon_graph_vlabel_rel;
+    cqContext cqc;
+
+    skylon_graph_vlabel_rel = heap_open(GraphVlabelRelationId, RowExclusiveLock);
+
+    caql_getcount(
+              caql_addrel(cqclr(&cqc), skylon_graph_vlabel_rel),
+              cql("DELETE FROM skylon_graph_vlabel "
+                " WHERE graphname = :1 AND schemaname = :2",
+                CStringGetDatum(rel->relname), CStringGetDatum(schema_name)));
+    heap_close(skylon_graph_vlabel_rel, RowExclusiveLock);
+  }
+  {
+    Relation  skylon_graph_elabel_rel;
+    cqContext cqc;
+
+    skylon_graph_elabel_rel = heap_open(GraphElabelRelationId, RowExclusiveLock);
+
+    caql_getcount(
+              caql_addrel(cqclr(&cqc), skylon_graph_elabel_rel),
+              cql("DELETE FROM skylon_graph_elabel "
+                " WHERE graphname = :1 AND schemaname = :2",
+                CStringGetDatum(rel->relname), CStringGetDatum(schema_name)));
+    heap_close(skylon_graph_elabel_rel, RowExclusiveLock);
+  }
+  {
+    RangeVar   *graph = makeRangeVar(NULL, NULL, NULL, -1);
+    graph->catalogname = database_name;
+    graph->schemaname = schema_name;
+    graph->relname = rel->relname;
+    Oid graphid = RangeVarGetRelid(graph, true, false /*allowHcatalog*/);
+    DeleteRelationTuple(graphid);
+    deleteDependencyRecordsFor(GraphRelationId, graphid);
+  }
+  {
+    Relation  skylon_index_rel;
+    cqContext cqc;
+    skylon_index_rel = heap_open(SkylonIndexRelationId, RowExclusiveLock);
+    caql_getcount(
+              caql_addrel(cqclr(&cqc), skylon_index_rel),
+              cql("DELETE FROM skylon_index "
+                " WHERE graphname = :1 AND schemaname = :2",
+                CStringGetDatum(rel->relname), CStringGetDatum(schema_name)));
+    heap_close(skylon_index_rel, RowExclusiveLock);
+  }
+}
+
 /*
  * RemoveRelation
  *		Deletes a relation.
@@ -2286,11 +3696,13 @@ ExecuteTruncate(TruncateStmt *stmt)
     int partcheck = 2;
 	List *partList = NIL;
 
+
 	/*
 	 * Open, exclusive-lock, and check all the explicitly-specified relations
 	 *
 	 * Check if table has partitions and add them too
 	 */
+	bool ifReturn = false;
 	while (partcheck)
 	{
 		foreach(cell, stmt->relations)
@@ -2299,6 +3711,19 @@ ExecuteTruncate(TruncateStmt *stmt)
 			Relation	rel;
 			PartitionNode *pNode;
 
+			List *labellist = transformAsGraphName(NULL, rv);
+			if(labellist) {
+			  TruncateStmt *truncate = makeNode(TruncateStmt);
+			  truncate->relations = labellist;
+			  truncate->behavior = stmt->behavior;
+			  ExecuteTruncate(truncate);
+			  ifReturn = true;
+			  continue;
+			}
+			else {
+			  parseAndTransformAsGraph(NULL, rv);
+			}
+
 		  rel = heap_openrv(rv, AccessExclusiveLock);
 			truncate_check_rel(rel);
 
@@ -2321,6 +3746,9 @@ ExecuteTruncate(TruncateStmt *stmt)
 			}
 			heap_close(rel, NoLock);
 		}
+		//if truncate a whole graph, return now,because the verticies and edges have truncated before
+		if(ifReturn)
+		  return;
 
 		partcheck--;
 
diff --git a/src/backend/commands/vacuum.c b/src/backend/commands/vacuum.c
index 21a8618..a3023e2 100644
--- a/src/backend/commands/vacuum.c
+++ b/src/backend/commands/vacuum.c
@@ -264,6 +264,9 @@ static Relation open_relation_and_check_permission(VacuumStmt *vacstmt,
 												   char expected_relkind);
 static void vacuumStatement(VacuumStmt *vacstmt, List *relids);
 
+extern bool parseAndTransformAsGraph(ParseState *pstate, RangeVar *rangeVar);
+
+extern List *transformAsGraphName(ParseState *pstate, RangeVar *rangeVar);
 
 /****************************************************************************
  *																			*
@@ -288,6 +291,23 @@ void vacuum(VacuumStmt *vacstmt, List *relids, int preferred_seg_num)
 	
 	Assert(!(vacstmt != NULL && relids != NULL));
 	
+	List *labellist = transformAsGraphName(NULL, vacstmt->relation);
+	if(labellist) {
+	  ListCell *cell;
+	  foreach(cell, labellist) {
+	    RangeVar *labelrel = (RangeVar*)lfirst(cell);
+	    VacuumStmt *refactoredStmt = copyObject(vacstmt);
+	    refactoredStmt->relation = labelrel;
+	    vacuum(refactoredStmt, NIL, -1);
+	  }
+	  return;
+	}
+	else {
+	  parseAndTransformAsGraph(NULL, vacstmt->relation);
+	  analyzeStmt->relation = vacstmt->relation;
+	}
+
+
 	if (doVacuum)
 	{
 		if (vacstmt->rootonly)
diff --git a/src/backend/executor/execIndexscan.c b/src/backend/executor/execIndexscan.c
index 0e41c95..87b4453 100644
--- a/src/backend/executor/execIndexscan.c
+++ b/src/backend/executor/execIndexscan.c
@@ -24,6 +24,7 @@
  */
 #include "postgres.h"
 
+#include "catalog/pg_exttable.h"
 #include "access/genam.h"
 #include "access/nbtree.h"
 #include "executor/execIndexscan.h"
@@ -162,6 +163,9 @@ OpenIndexRelation(EState *estate, Oid indexOid, Index tableRtIndex)
  *
  * Caller may pass NULL for arrayKeys and numArrayKeys to indicate that
  * ScalarArrayOpExpr quals are not supported.
+ *
+ * TODO(hwy): This code is used for both external scan and index scan, so it
+ * should be placed in a common file.
  */
 
 void
@@ -232,6 +236,9 @@ ExecIndexBuildScanKeys(PlanState *planstate, Relation index,
 		Expr	   *leftop;		/* expr on lhs of operator */
 		Expr	   *rightop;	/* expr on rhs ... */
 		AttrNumber	varattno;	/* att number used in scan */
+		AttrNumber      varattnoold;
+		Oid             out_func_id = InvalidOid;
+		bool            typ_is_var_lena = InvalidOid;
 
 		/*
 		 * extract clause information from the qualification
@@ -243,6 +250,10 @@ ExecIndexBuildScanKeys(PlanState *planstate, Relation index,
 		subtype = lfirst_oid(subtype_cell);
 		subtype_cell = lnext(subtype_cell);
 
+		/*
+		 * So far, for a runTimeKey's expression, magma only supports OpExpr,
+		 * and must be (Var eq Var).
+		 */
 		if (IsA(clause, OpExpr))
 		{
 			/* indexkey op const or indexkey op expression */
@@ -266,6 +277,7 @@ ExecIndexBuildScanKeys(PlanState *planstate, Relation index,
 				insist_log(false,"indexqual doesn't have key on left side");
 
 			varattno = ((Var *) leftop)->varattno;
+			varattnoold = ((Var *) leftop)->varoattno;
 
 			/*
 			 * rightop is the constant or variable comparison value
@@ -286,6 +298,16 @@ ExecIndexBuildScanKeys(PlanState *planstate, Relation index,
 			}
 			else
 			{
+			        /* Only magma need do this logic. */
+			        if (IsA(planstate, ExternalScanState)
+			                && RelationIsMagmaTable2(((ExternalScanState *)planstate)->ss.ss_currentRelation->rd_id)
+			                /* We only need processing (Var op Var). */
+			                && IsA(rightop, Var))
+			        {
+			                Assert(((Var *) leftop)->vartype == ((Var *) rightop)->vartype);
+			                getTypeOutputInfo(((Var *) rightop)->vartype, &out_func_id, &typ_is_var_lena);
+			        }
+
 				/* Need to treat this one as a runtime key */
 				runtime_keys[n_runtime_keys].scan_key = this_scan_key;
 				runtime_keys[n_runtime_keys].key_expr =
@@ -303,7 +325,9 @@ ExecIndexBuildScanKeys(PlanState *planstate, Relation index,
 								   strategy,	/* op's strategy */
 								   subtype,		/* strategy subtype */
 								   opfuncid,	/* reg proc to use */
-								   scanvalue);	/* constant */
+								   scanvalue,   /* constant */
+								   varattnoold,
+								   out_func_id);
 		}
 		else if (IsA(clause, RowCompareExpr))
 		{
@@ -399,7 +423,9 @@ ExecIndexBuildScanKeys(PlanState *planstate, Relation index,
 									   op_strategy,		/* op's strategy */
 									   op_subtype,		/* strategy subtype */
 									   opfuncid,		/* reg proc to use */
-									   scanvalue);		/* constant */
+									   scanvalue,		/* constant */
+									   InvalidAttrNumber,
+				                                           InvalidOid);
 				extra_scan_keys++;
 			}
 
@@ -466,7 +492,9 @@ ExecIndexBuildScanKeys(PlanState *planstate, Relation index,
 								   strategy,	/* op's strategy */
 								   subtype,		/* strategy subtype */
 								   opfuncid,	/* reg proc to use */
-								   (Datum) 0);	/* constant */
+								   (Datum) 0,	/* constant */
+								   InvalidAttrNumber,
+								   InvalidOid);
 		}
 		else
 			insist_log(false, "unsupported indexqual type: %d",
diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c
index 5de2981..12de33a 100644
--- a/src/backend/executor/execMain.c
+++ b/src/backend/executor/execMain.c
@@ -972,18 +972,10 @@ ExecutorStart(QueryDesc *queryDesc, int eflags)
                 }
                 CommonPlanContext ctx;
                 bool newPlanner = can_convert_common_plan(queryDesc, &ctx);
-                if (newPlanner && ctx.enforceNewScheduler && scheduler_plan_support_check(queryDesc)) {
-                  const char *queryId = palloc0(sizeof(int) + sizeof(int) + 5);
-                  sprintf(queryId, "QID%d_%d", gp_session_id, gp_command_count);
-                  scheduler_prepare_for_new_query(queryDesc, queryId, 0);
-                  pfree(queryId);
-                  scheduler_run(queryDesc->estate->scheduler_data, &ctx);
-                } else {
-                  estate->mainDispatchData = mainDispatchInit(queryDesc->resource);
-                  estate->dispatch_data = NULL;
-                  mainDispatchPrepare(estate->mainDispatchData, queryDesc, newPlanner);
-                  mainDispatchRun(estate->mainDispatchData, &ctx, newPlanner);
-                }
+                estate->mainDispatchData = mainDispatchInit(queryDesc->resource);
+                estate->dispatch_data = NULL;
+                mainDispatchPrepare(estate->mainDispatchData, queryDesc, newPlanner);
+                mainDispatchRun(estate->mainDispatchData, &ctx, newPlanner);
 
                 DropQueryContextInfo(queryDesc->plannedstmt->contextdisp);
                 queryDesc->plannedstmt->contextdisp = NULL;
@@ -1291,7 +1283,8 @@ ExecutorRun(QueryDesc *queryDesc,
         } while (!readCacheEof());
         result = NULL;
       } else if (queryDesc->newPlan) {
-        exec_mpp_query_new(queryDesc->newPlan->str,
+        exec_mpp_query_new(queryDesc->estate->mainDispatchData,
+                           queryDesc->newPlan->str,
                            queryDesc->newPlan->len, currentSliceId,
                            false, dest, queryDesc->planstate);
         result = NULL;
@@ -1867,6 +1860,13 @@ InitializeResultRelations(PlannedStmt *plannedstmt, EState *estate, CmdType oper
       estate->es_result_aosegnos = NIL;
 
       if (get_rel_relstorage(relid) == RELSTORAGE_ORC &&
+          !(eflags & EXEC_FLAG_EXPLAIN_ONLY) && rel_has_index(relid)) {
+        ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+            errmsg("cannot UPDATE/DELETE on table:%s because the table has indexes.",
+                   get_rel_name(relid))));
+          }
+
+      if (get_rel_relstorage(relid) == RELSTORAGE_ORC &&
           !(eflags & EXEC_FLAG_EXPLAIN_ONLY)) {
         ListCell *cell;
         foreach (cell, plannedstmt->resultRelations) {
@@ -1882,6 +1882,12 @@ InitializeResultRelations(PlannedStmt *plannedstmt, EState *estate, CmdType oper
         }
       }
     } else {
+      if (get_rel_relstorage(relid) == RELSTORAGE_ORC &&
+          !(eflags & EXEC_FLAG_EXPLAIN_ONLY) && rel_has_index(relid)) {
+        ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+            errmsg("cannot INSERT on table:%s because the table has indexes.",
+                   get_rel_name(relid))));
+          }
       List  *all_relids = NIL;
       all_relids = lappend_oid(all_relids, relid);
       if (rel_is_partitioned(relid))
@@ -2039,7 +2045,7 @@ InitPlan(QueryDesc *queryDesc, int eflags)
 	 */
 	if (Gp_role != GP_ROLE_EXECUTE)
 	{
-		ExecCheckRTPerms(plannedstmt->rtable);
+		  ExecCheckRTPerms(plannedstmt->rtable);
 	}
 
 	/*
diff --git a/src/backend/executor/execUtils.c b/src/backend/executor/execUtils.c
index 87d8c17..6c05918 100644
--- a/src/backend/executor/execUtils.c
+++ b/src/backend/executor/execUtils.c
@@ -86,7 +86,6 @@
 #include "cdb/cdbmotion.h"
 #include "cdb/cdbsreh.h"
 #include "cdb/memquota.h"
-#include "cdb/scheduler.h"
 #include "cdb/cdbsrlz.h"
 #include "catalog/catalog.h" // isMasterOnly()
 #include "executor/spi.h"
@@ -331,7 +330,6 @@ InternalCreateExecutorState(MemoryContext qcontext, bool is_subquery)
 
 	estate->dispatch_data = NULL;
 	estate->mainDispatchData = NULL;
-	estate->scheduler_data = NULL;
 
 	estate->currentSliceIdInPlan = 0;
 	estate->currentExecutingSliceId = 0;
@@ -2196,6 +2194,8 @@ void mppExecutorFinishup(QueryDesc *queryDesc)
 		 */
 		TeardownInterconnect(estate->interconnect_context, estate->motionlayer_context, estate->cancelUnfinished);
 		estate->es_interconnect_is_setup = false;
+	} else if (queryDesc->newPlan) {
+	  teardownNewInterconnect();
 	}
 }
 
@@ -2259,12 +2259,7 @@ void mppExecutorCleanup(QueryDesc *queryDesc)
 		dispatch_catch_error(estate->dispatch_data);
 		estate->dispatch_data = NULL;
 	}
-	else if (estate->scheduler_data)
-	{
-	  scheduler_catch_error(estate->scheduler_data);
-	  scheduler_cleanup(estate->scheduler_data);
-	  estate->scheduler_data = NULL;
-	} else if (estate->mainDispatchData) {
+	else if (estate->mainDispatchData) {
 	  if (estate->es_interconnect_is_setup && !estate->es_got_eos)
 	    ExecSquelchNode(queryDesc->planstate);
 	  mainDispatchCatchError(&estate->mainDispatchData);
@@ -2275,6 +2270,8 @@ void mppExecutorCleanup(QueryDesc *queryDesc)
 	{
 		TeardownInterconnect(estate->interconnect_context, estate->motionlayer_context, true /* force EOS */);
 		estate->es_interconnect_is_setup = false;
+	} else if (queryDesc->newPlan) {
+	  teardownNewInterconnect();
 	}
 	
 	/**
diff --git a/src/backend/executor/functions.c b/src/backend/executor/functions.c
index 8443877..452ca5b 100644
--- a/src/backend/executor/functions.c
+++ b/src/backend/executor/functions.c
@@ -539,13 +539,15 @@ postquel_getnext(execution_state *es, SQLFunctionCachePtr fcache)
 				if (count == 0L) {
 					result = NULL;
 					exec_mpp_query_new(
+					    es->qd->estate->mainDispatchData,
 							es->qd->newPlan->str, es->qd->newPlan->len,
 							currentSliceId, false, NULL, NULL);
 				} else {
 					es->qd->newExecutorState =
 							makeMyNewExecutorTupState(es->qd->tupDesc);
 
-					beginMyNewExecutor(es->qd->newPlan->str,
+					beginMyNewExecutor(es->qd->estate->mainDispatchData,
+					                   es->qd->newPlan->str,
 														 es->qd->newPlan->len,
 														 currentSliceId, NULL);
 					execMyNewExecutor(es->qd->newExecutorState);
diff --git a/src/backend/executor/newExecutor.c b/src/backend/executor/newExecutor.c
index 706dc8a..6a5825d 100644
--- a/src/backend/executor/newExecutor.c
+++ b/src/backend/executor/newExecutor.c
@@ -55,7 +55,7 @@ void checkOushuDbExtensiveFunctionSupport(char functionString[]) {
 
 PlanState *newExecutorPlanStateReference = NULL;
 
-void exec_mpp_query_new(const char *plan, int len, int stageNo, bool setDisplay,
+void exec_mpp_query_new(void *dispatchData, const char *plan, int len, int stageNo, bool setDisplay,
                         DestReceiver *dest, PlanState *planstate) {
   checkOushuDbExtensiveFeatureSupport("New Executor");
   Assert(MyNewExecutor != NULL);
@@ -65,7 +65,7 @@ void exec_mpp_query_new(const char *plan, int len, int stageNo, bool setDisplay,
   sprintf(queryId, "QID%d_%d", gp_session_id, gp_command_count);
   int vsegNum = GetQEGangNum();
   int rangeNum = 0;
-  ExecutorNewWorkHorse(MyNewExecutor, plan, len, queryId, stageNo, GetQEIndex(),
+  ExecutorNewWorkHorse(MyNewExecutor, dispatchData, plan, len, queryId, stageNo, GetQEIndex(),
                        vsegNum, DateStyle, DateOrder, &rangeNum);
   MyExecutorSetJumpHashMap(MyNewExecutor, get_jump_hash_map(rangeNum),
                            JUMP_HASH_MAP_LENGTH);
@@ -97,6 +97,10 @@ void exec_mpp_query_new(const char *plan, int len, int stageNo, bool setDisplay,
   newExecutorPlanStateReference = NULL;
 }
 
+void teardownNewInterconnect() {
+  ExecutorTearDownInterconnect(MyNewExecutor);
+}
+
 MyNewExecutorTupState *makeMyNewExecutorTupState(TupleDesc tupdesc) {
   MyNewExecutorTupState *state =
       (MyNewExecutorTupState *)palloc(sizeof(MyNewExecutorTupState));
@@ -142,7 +146,7 @@ MyNewExecutorTupState *makeMyNewExecutorTupState(TupleDesc tupdesc) {
   return state;
 }
 
-void beginMyNewExecutor(const char *plan, int len, int stageNo,
+void beginMyNewExecutor(void *dispatchData, const char *plan, int len, int stageNo,
                         PlanState *planstate) {
   Assert(MyNewExecutor != NULL);
   newExecutorPlanStateReference = planstate;
@@ -151,7 +155,7 @@ void beginMyNewExecutor(const char *plan, int len, int stageNo,
   sprintf(queryId, "QID%d_%d", gp_session_id, gp_command_count);
   int vsegNum = GetQEGangNum();
   int rangeNum = 0;
-  ExecutorNewWorkHorse(MyNewExecutor, plan, len, queryId, stageNo, GetQEIndex(),
+  ExecutorNewWorkHorse(MyNewExecutor, dispatchData, plan, len, queryId, stageNo, GetQEIndex(),
                        vsegNum, DateStyle, DateOrder, &rangeNum);
   MyExecutorSetJumpHashMap(MyNewExecutor, get_jump_hash_map(rangeNum),
                            JUMP_HASH_MAP_LENGTH);
diff --git a/src/backend/executor/nodeExternalscan.c b/src/backend/executor/nodeExternalscan.c
index 2d203a7..0b5f423 100644
--- a/src/backend/executor/nodeExternalscan.c
+++ b/src/backend/executor/nodeExternalscan.c
@@ -36,6 +36,7 @@
 #include "postgres.h"
 #include "fmgr.h"
 
+#include "access/genam.h"
 #include "access/fileam.h"
 #include "access/filesplit.h"
 #include "access/heapam.h"
@@ -44,6 +45,8 @@
 #include "cdb/cdbvars.h"
 #include "cdb/cdbdatalocality.h"
 #include "executor/execdebug.h"
+#include "executor/execIndexscan.h"
+#include "executor/nodeIndexscan.h"
 #include "executor/nodeExternalscan.h"
 #include "utils/lsyscache.h"
 #include "utils/memutils.h"
@@ -66,7 +69,6 @@ static TupleTableSlot *
 ExternalNext(ExternalScanState *node)
 {
 	FileScanDesc scandesc;
-	Index scanrelid;
 	EState *estate = NULL;
 	ScanDirection direction;
 	TupleTableSlot *slot = NULL;
@@ -78,7 +80,6 @@ ExternalNext(ExternalScanState *node)
 	 */
 	estate = node->ss.ps.state;
 	scandesc = node->ess_ScanDesc;
-	scanrelid = ((ExternalScan *) node->ss.ps.plan)->scan.scanrelid;
 	direction = estate->es_direction;
 	slot = node->ss.ss_ScanTupleSlot;
 
@@ -368,6 +369,53 @@ ExecInitExternalScan(ExternalScan *node, EState *estate, int eflags)
 
 	ExecAssignScanType(&externalstate->ss, RelationGetDescr(currentRelation));
 
+	if (IsA(node, MagmaIndexScan) || IsA(node, MagmaIndexOnlyScan))
+	{
+	        /*
+	         * Unlike ExecInitIndexScan(which is used by the heap table and is only executed on QD),
+	         * we can't call index_open to get iss_RelationDesc, because QE will also run here, and
+	         * index did not dispatch, so we set iss_RelationDesc to NULL.
+	         *
+	         * We have disabled the path that requires iss_RelationDesc, so there is no problem with doing so.
+	         * See also: best_inner_indexscan().
+	         */
+                externalstate->iss_RelationDesc = NULL;
+
+                /*
+                 * build the index scan keys from the index qualification
+                 */
+                ExecIndexBuildScanKeys((PlanState *) externalstate,
+                                       externalstate->iss_RelationDesc,
+                                       node->indexqual,
+                                       node->indexstrategy,
+                                       node->indexsubtype,
+                                       &externalstate->iss_ScanKeys,
+                                       &externalstate->iss_NumScanKeys,
+                                       &externalstate->iss_RuntimeKeys,
+                                       &externalstate->iss_NumRuntimeKeys,
+                                       NULL,        /* no ArrayKeys */
+                                       NULL);
+
+                /*
+                 * If we have runtime keys, we need an ExprContext to evaluate them. The
+                 * node's standard context won't do because we want to reset that context
+                 * for every tuple.  So, build another context just like the other one...
+                 * -tgl 7/11/00
+                 */
+                if (externalstate->iss_NumRuntimeKeys != 0)
+                {
+                	ExprContext *stdecontext = externalstate->ss.ps.ps_ExprContext;
+
+                	ExecAssignExprContext(estate, &externalstate->ss.ps);
+                	externalstate->iss_RuntimeContext = externalstate->ss.ps.ps_ExprContext;
+                	externalstate->ss.ps.ps_ExprContext = stdecontext;
+                }
+                else
+                {
+                	externalstate->iss_RuntimeContext = NULL;
+                }
+	}
+
 	/*
 	 * Initialize result tuple type and projection info.
 	 */
@@ -497,11 +545,48 @@ void
 ExecExternalReScan(ExternalScanState *node, ExprContext *exprCtxt)
 {
 	EState	   *estate;
+	ExprContext *econtext;
 	Index		scanrelid;
 	FileScanDesc fileScan;
+	TupleTableSlot *slot = NULL;
 
 	estate = node->ss.ps.state;
+	econtext = node->iss_RuntimeContext;		/* context for runtime keys */
 	scanrelid = ((SeqScan *) node->ss.ps.plan)->scanrelid;
+	slot = node->ss.ss_ScanTupleSlot;
+
+	if (econtext)
+	{
+		/*
+		 * If we are being passed an outer tuple, save it for runtime key
+		 * calc.  We also need to link it into the "regular" per-tuple
+		 * econtext, so it can be used during indexqualorig evaluations.
+		 */
+		if (exprCtxt != NULL)
+		{
+			ExprContext *stdecontext;
+
+			econtext->ecxt_outertuple = exprCtxt->ecxt_outertuple;
+			stdecontext = node->ss.ps.ps_ExprContext;
+			stdecontext->ecxt_outertuple = exprCtxt->ecxt_outertuple;
+		}
+
+		/*
+		 * Reset the runtime-key context so we don't leak memory as each outer
+		 * tuple is scanned.  Note this assumes that we will recalculate *all*
+		 * runtime keys on each call.
+		 */
+		ResetExprContext(econtext);
+	}
+
+	/*
+	 * If we are doing runtime key calculations (ie, the index keys depend on
+	 * data from an outer scan), compute the new key values
+	 */
+	if (node->iss_NumRuntimeKeys != 0)
+		ExecIndexEvalRuntimeKeys(econtext,
+								 node->iss_RuntimeKeys,
+								 node->iss_NumRuntimeKeys);
 
 	/* If this is re-scanning of PlanQual ... */
 	if (estate->es_evTuple != NULL &&
@@ -529,16 +614,13 @@ ExecExternalReScan(ExternalScanState *node, ExprContext *exprCtxt)
 		Assert(fileScan->fs_formatter_name);
 
 		FmgrInfo *rescanFunc = fileScan->fs_ps_scan_funcs.rescan;
+		if (!rescanFunc)
+                        elog(ERROR, "%s_rescan function was not found",
+                               fileScan->fs_formatter_name);
 
-		if (rescanFunc)
-		{
-			InvokePlugStorageFormatReScan(rescanFunc, fileScan);
-		}
-		else
-		{
-			elog(ERROR, "%s_rescan function was not found",
-			            fileScan->fs_formatter_name);
-		}
+		InvokePlugStorageFormatReScan(rescanFunc, fileScan, &(node->ss),
+		    PlugStorageGetTransactionSnapshot(NULL),
+		    node->iss_RuntimeKeys, node->iss_NumRuntimeKeys, slot);
 	}
 }
 
diff --git a/src/backend/executor/nodeIndexscan.c b/src/backend/executor/nodeIndexscan.c
index beca1dc..98fcfe0 100644
--- a/src/backend/executor/nodeIndexscan.c
+++ b/src/backend/executor/nodeIndexscan.c
@@ -24,16 +24,20 @@
  */
 #include "postgres.h"
 
+#include "access/filesplit.h"
 #include "access/genam.h"
 #include "access/nbtree.h"
+#include "access/orcam.h"
 #include "cdb/cdbvars.h"
 #include "executor/execdebug.h"
 #include "executor/nodeIndexscan.h"
 #include "nodes/nodeFuncs.h"
+#include "nodes/nodes.h"
 #include "optimizer/clauses.h"
 #include "utils/array.h"
 #include "utils/lsyscache.h"
 #include "utils/memutils.h"
+#include "utils/debugutils.h"
 
 /*
  * Initialize the index scan descriptor if it is not initialized.
@@ -70,6 +74,22 @@ freeScanDesc(IndexScanState *indexstate)
 	}
 }
 
+/* native orc index */
+extern TupleTableSlot *OrcIndexNext(IndexScanState *node)
+{
+	if (node->scandesc == NULL)
+		orcBeginIndexOnlyScan(node, ((IndexScan*)(node->ss.ps.plan))->indexid,
+													((IndexScan*)(node->ss.ps.plan))->idxColummns);
+	TupleTableSlot *slot = orcIndexOnlyScanNext(node);
+
+	if (!TupIsNull(slot))
+	{
+		Gpmon_M_Incr_Rows_Out(GpmonPktFromIndexScanState(node));
+		CheckSendPlanStateGpmonPkt(&node->ss.ps);
+	}
+
+	return slot;
+}
 
 /* ----------------------------------------------------------------
  *		IndexNext
@@ -202,7 +222,10 @@ ExecIndexScan(IndexScanState *node)
 	/*
 	 * use IndexNext as access method
 	 */
-	return ExecScan(&node->ss, (ExecScanAccessMtd) IndexNext);
+	if (!(IsA(node->ss.ps.plan, OrcIndexOnlyScan) || IsA(node->ss.ps.plan, OrcIndexScan)))
+		return ExecScan(&node->ss, (ExecScanAccessMtd) IndexNext);
+	else
+		return ExecScan(&node->ss, (ExecScanAccessMtd) OrcIndexNext);
 }
 
 /* ----------------------------------------------------------------
@@ -222,7 +245,11 @@ ExecIndexReScan(IndexScanState *node, ExprContext *exprCtxt)
 	ExprContext *econtext;
 	Index		scanrelid;
 
-	initScanDesc(node);
+	if (!(IsA(node->ss.ps.plan, OrcIndexOnlyScan) || IsA(node->ss.ps.plan, OrcIndexScan)))
+		initScanDesc(node);
+	else
+		orcBeginIndexOnlyScan(node, ((IndexScan*)(node->ss.ps.plan))->indexid,
+													((IndexScan*)(node->ss.ps.plan))->idxColummns);
 
 	estate = node->ss.ps.state;
 	econtext = node->iss_RuntimeContext;		/* context for runtime keys */
@@ -273,7 +300,10 @@ ExecIndexReScan(IndexScanState *node, ExprContext *exprCtxt)
 	}
 
 	/* reset index scan */
-	index_rescan(node->iss_ScanDesc, node->iss_ScanKeys);
+	if (!(IsA(node->ss.ps.plan, OrcIndexOnlyScan) || IsA(node->ss.ps.plan, OrcIndexScan)))
+		index_rescan(node->iss_ScanDesc, node->iss_ScanKeys);
+	else
+		orcIndexOnlyReScan(node);
 
 	Gpmon_M_Incr(GpmonPktFromIndexScanState(node), GPMON_INDEXSCAN_RESCAN); 
 	CheckSendPlanStateGpmonPkt(&node->ss.ps);
@@ -481,7 +511,10 @@ ExecEndIndexScan(IndexScanState *node)
 	 * close the index relation
 	 */
 	ExecEagerFreeIndexScan(node);
-	index_close(indexRelationDesc, NoLock);
+	if (!(IsA(node->ss.ps.plan, OrcIndexOnlyScan) || IsA(node->ss.ps.plan, OrcIndexScan)))
+		index_close(indexRelationDesc, NoLock);
+	else
+		orcEndIndexOnlyScan(node);
 
 	/*
 	 * close the heap relation.
@@ -594,8 +627,13 @@ ExecInitIndexScan(IndexScan *node, EState *estate, int eflags)
 	 * taking another lock here.  Otherwise we need a normal reader's lock.
 	 */
 	relistarget = ExecRelationIsTargetRelation(estate, node->scan.scanrelid);
-	indexstate->iss_RelationDesc = index_open(node->indexid,
-									 relistarget ? NoLock : AccessShareLock);
+	if (!(IsA(&(node->scan.plan), OrcIndexOnlyScan) || IsA(&(node->scan.plan), OrcIndexScan)))
+		indexstate->iss_RelationDesc = index_open(node->indexid,relistarget ? NoLock : AccessShareLock);
+	else
+	{
+		indexstate->ss.splits = GetFileSplitsOfSegment(estate->es_plannedstmt->scantable_splits,
+																									 currentRelation->rd_id, GetQEIndex());
+	}
 
 	/*
 	 * build the index scan keys from the index qualification
diff --git a/src/backend/executor/nodeNestloop.c b/src/backend/executor/nodeNestloop.c
index 3992c3a..b5be790 100644
--- a/src/backend/executor/nodeNestloop.c
+++ b/src/backend/executor/nodeNestloop.c
@@ -152,13 +152,13 @@ ExecNestLoop(NestLoopState *node)
 			 * iterations.
 			 */
 			node->nl_innerSideScanned = true;
-            /* CDB: Quit if empty inner implies no outer rows can match. */
+			/* CDB: Quit if empty inner implies no outer rows can match. */
 			/* See MPP-1146 and MPP-1694 */
 			if (node->nl_QuitIfEmptyInner)
-            {
-                ExecSquelchNode(outerPlan);
-                return NULL;
-            }
+			{
+			        ExecSquelchNode(outerPlan);
+			        return NULL;
+			}
 		}
 
 		if ((node->js.jointype == JOIN_LASJ_NOTIN) &&
@@ -200,10 +200,10 @@ ExecNestLoop(NestLoopState *node)
 		 */
 		if (node->nl_NeedNewOuter)
 		{
-            ENL1_printf("getting new outer tuple");
-            outerTupleSlot = ExecProcNode(outerPlan);
-            Gpmon_M_Incr(GpmonPktFromNLJState(node), GPMON_NLJ_OUTERTUPLE);
-            Gpmon_M_Incr(GpmonPktFromNLJState(node), GPMON_QEXEC_M_ROWSIN); 
+		        ENL1_printf("getting new outer tuple");
+		        outerTupleSlot = ExecProcNode(outerPlan);
+		        Gpmon_M_Incr(GpmonPktFromNLJState(node), GPMON_NLJ_OUTERTUPLE);
+		        Gpmon_M_Incr(GpmonPktFromNLJState(node), GPMON_QEXEC_M_ROWSIN);
 
 			/*
 			 * if there are no more outer tuples, then the join is complete..
@@ -308,18 +308,18 @@ ExecNestLoop(NestLoopState *node)
 					 */
 					ENL1_printf("qualification succeeded, projecting tuple");
 
-					Gpmon_M_Incr_Rows_Out(GpmonPktFromNLJState(node)); 
-                          	CheckSendPlanStateGpmonPkt(&node->js.ps);
+					Gpmon_M_Incr_Rows_Out(GpmonPktFromNLJState(node));
+					CheckSendPlanStateGpmonPkt(&node->js.ps);
 					return ExecProject(node->js.ps.ps_ProjInfo, NULL);
 				}
 			}
 
-            /* CDB: Quit if empty inner implies no outer rows can match. */
-            if (node->nl_QuitIfEmptyInner)
-            {
-                ExecSquelchNode(outerPlan);
-                return NULL;
-            }
+			/* CDB: Quit if empty inner implies no outer rows can match. */
+			if (node->nl_QuitIfEmptyInner)
+			{
+			        ExecSquelchNode(outerPlan);
+			        return NULL;
+			}
 
 			/*
 			 * Otherwise just return to top of loop for a new outer tuple.
@@ -327,7 +327,7 @@ ExecNestLoop(NestLoopState *node)
 			continue;
 		}
 
-        node->nl_QuitIfEmptyInner = false;  /*CDB*/
+		node->nl_QuitIfEmptyInner = false;  /*CDB*/
 
 		if ((node->js.jointype == JOIN_LASJ_NOTIN) &&
 				(!node->nl_innerSideScanned) &&
@@ -376,7 +376,7 @@ ExecNestLoop(NestLoopState *node)
 				ENL1_printf("qualification succeeded, projecting tuple");
 
 				Gpmon_M_Incr_Rows_Out(GpmonPktFromNLJState(node));
-                     	CheckSendPlanStateGpmonPkt(&node->js.ps);
+				CheckSendPlanStateGpmonPkt(&node->js.ps);
 				return ExecProject(node->js.ps.ps_ProjInfo, NULL);
 			}
 
diff --git a/src/backend/executor/nodeSubplan.c b/src/backend/executor/nodeSubplan.c
index 81c6792..92aa892 100644
--- a/src/backend/executor/nodeSubplan.c
+++ b/src/backend/executor/nodeSubplan.c
@@ -1129,18 +1129,10 @@ ExecSetParamPlan(SubPlanState *node, ExprContext *econtext, QueryDesc *gbl_query
 			 */
 		  CommonPlanContext ctx;
 		  bool newPlanner = can_convert_common_plan(queryDesc, &ctx);
-       if (newPlanner && ctx.enforceNewScheduler && scheduler_plan_support_check(queryDesc)) {
-		   	const char *queryId = palloc0(sizeof(int) + sizeof(int) + 5);
-		   	sprintf(queryId, "QID%d_%d", gp_session_id, gp_command_count);
-		   	scheduler_prepare_for_new_query(queryDesc, queryId, subplan->plan_id);
-		   	pfree(queryId);
-		   	scheduler_run(queryDesc->estate->scheduler_data, &ctx);
-		  } else {
-		    queryDesc->estate->mainDispatchData = mainDispatchInit(queryDesc->resource);
-		    queryDesc->estate->dispatch_data = NULL;
-		    mainDispatchPrepare(queryDesc->estate->mainDispatchData, queryDesc, newPlanner);
-		    mainDispatchRun(queryDesc->estate->mainDispatchData, &ctx, newPlanner);
-		  }
+      queryDesc->estate->mainDispatchData = mainDispatchInit(queryDesc->resource);
+      queryDesc->estate->dispatch_data = NULL;
+      mainDispatchPrepare(queryDesc->estate->mainDispatchData, queryDesc, newPlanner);
+      mainDispatchRun(queryDesc->estate->mainDispatchData, &ctx, newPlanner);
 
 			// decide if the query is supported by new executor
 			if (queryDesc && queryDesc->newPlan)
@@ -1179,7 +1171,8 @@ ExecSetParamPlan(SubPlanState *node, ExprContext *econtext, QueryDesc *gbl_query
 		  Plan *plan = exec_subplan_get_plan(gbl_queryDesc->plannedstmt, subplan);
 		  TupleDesc cleanTupDesc = ExecCleanTypeFromTL(plan->targetlist, false);
 		  queryDesc->newExecutorState = makeMyNewExecutorTupState(cleanTupDesc);
-		  beginMyNewExecutor(queryDesc->newPlan->str, queryDesc->newPlan->len,
+		  beginMyNewExecutor(queryDesc->estate->mainDispatchData,
+		                     queryDesc->newPlan->str, queryDesc->newPlan->len,
 		                     subplan->qDispSliceId, planstate);
 		}
 
@@ -1360,20 +1353,6 @@ ExecSetParamPlan(SubPlanState *node, ExprContext *econtext, QueryDesc *gbl_query
              * report it and exit to our error handler (below) via PG_THROW.
              */
             mainDispatchCleanUp(&queryDesc->estate->mainDispatchData);
-        } else if (shouldDispatch &&
-            queryDesc && queryDesc->estate &&
-            queryDesc->estate->scheduler_data) {
-          scheduler_wait(queryDesc->estate->scheduler_data);
-          queryDesc->estate->scheduler_data = NULL;
-          if (planstate->instrument && queryDesc->estate->scheduler_data &&
-              queryDesc->estate->scheduler_data->state != SS_ERROR) {
-            scheduler_receive_computenode_stats(
-                queryDesc->estate->scheduler_data, planstate);
-            cdbexplain_recvSchedulerExecStats(
-                planstate, queryDesc->estate->scheduler_data, 0,
-                econtext->ecxt_estate->showstatctx);
-          }
-          scheduler_cleanup(queryDesc->estate->scheduler_data);
         }
 
 		/* teardown the sequence server */
@@ -1387,6 +1366,8 @@ ExecSetParamPlan(SubPlanState *node, ExprContext *econtext, QueryDesc *gbl_query
 	        TeardownInterconnect(queryDesc->estate->interconnect_context, 
 								 queryDesc->estate->motionlayer_context,
 								 false); /* following success on QD */	
+		} else if (newExecutor) {
+		  teardownNewInterconnect();
 		}
 
     }
@@ -1410,12 +1391,6 @@ ExecSetParamPlan(SubPlanState *node, ExprContext *econtext, QueryDesc *gbl_query
                   econtext->ecxt_estate->showstatctx,
                   mainDispatchGetSegNum(queryDesc->estate->mainDispatchData));
             }
-          } else if (planstate->state->scheduler_data){
-            scheduler_receive_computenode_stats(
-                queryDesc->estate->scheduler_data, planstate);
-            cdbexplain_recvSchedulerExecStats(
-                planstate, queryDesc->estate->scheduler_data, 0,
-                econtext->ecxt_estate->showstatctx);
           }
         }
 
@@ -1429,24 +1404,23 @@ ExecSetParamPlan(SubPlanState *node, ExprContext *econtext, QueryDesc *gbl_query
 		 */
         if (shouldDispatch && queryDesc && queryDesc->estate && queryDesc->estate->mainDispatchData) {
           mainDispatchCatchError(&queryDesc->estate->mainDispatchData);
-        } else if (shouldDispatch && queryDesc && queryDesc->estate && queryDesc->estate->scheduler_data) {
-          scheduler_catch_error(queryDesc->estate->scheduler_data);
-          scheduler_cleanup(queryDesc->estate->scheduler_data);
-          queryDesc->estate->scheduler_data = NULL;
         }
 		
 		/* teardown the sequence server */
 		TeardownSequenceServer();
-		
-        /*
-         * Clean up the interconnect.
-         * CDB TODO: Is this needed following failure on QD?
-         */
-        if (shouldTeardownInterconnect)
-			TeardownInterconnect(queryDesc->estate->interconnect_context,
-								 queryDesc->estate->motionlayer_context,
-								 true);
-		PG_RE_THROW();
+
+    /*
+     * Clean up the interconnect.
+     * CDB TODO: Is this needed following failure on QD?
+     */
+    if (shouldTeardownInterconnect)
+      TeardownInterconnect(queryDesc->estate->interconnect_context,
+                           queryDesc->estate->motionlayer_context,
+                           true);
+    else if (newExecutor) {
+      teardownNewInterconnect();
+    }
+    PG_RE_THROW();
 	}
 	PG_END_TRY();
 
diff --git a/src/backend/executor/spi.c b/src/backend/executor/spi.c
index 03cdda7..5190670 100644
--- a/src/backend/executor/spi.c
+++ b/src/backend/executor/spi.c
@@ -775,8 +775,6 @@ void Explain_udf_plan(QueryDesc *qdesc)
 
 	if (qdesc->estate->dispatch_data)
 		dispatcher_print_statistics(buf, qdesc->estate->dispatch_data);
-	else if (qdesc->estate->scheduler_data)
-		scheduler_print_stats(qdesc->estate->scheduler_data, buf);
 	appendStringInfo(buf, "Data locality statistics:\n");
 	if (qdesc->plannedstmt->datalocalityInfo ==NULL){
 		appendStringInfo(buf, "  no data locality information in this query\n");
@@ -2365,7 +2363,8 @@ _SPI_pquery(QueryDesc * queryDesc, bool fire_triggers, long tcount)
         {
           queryDesc->newExecutorState = makeMyNewExecutorTupState(
               queryDesc->tupDesc);
-          beginMyNewExecutor(queryDesc->newPlan->str,
+          beginMyNewExecutor(queryDesc->estate->mainDispatchData,
+                             queryDesc->newPlan->str,
                              queryDesc->newPlan->len, currentSliceId,
                              queryDesc->planstate);
           (*queryDesc->dest->rStartup)(queryDesc->dest,
diff --git a/src/backend/nodes/copyfuncs.c b/src/backend/nodes/copyfuncs.c
index f68767d..f7dde09 100644
--- a/src/backend/nodes/copyfuncs.c
+++ b/src/backend/nodes/copyfuncs.c
@@ -238,6 +238,7 @@ _copyPlannedStmt(PlannedStmt *from)
 	COPY_SCALAR_FIELD(planner_segments);
 
 	COPY_LOCATION_FIELD(originNodeType);
+	COPY_NODE_FIELD(graphEntry);
 
 	return newnode;
 }
@@ -543,6 +544,8 @@ copyIndexScanFields(const IndexScan *from, IndexScan *newnode)
 	{
 		Assert(newnode->logicalIndexInfo == NULL);
 	}
+	COPY_SCALAR_FIELD(indexonly);
+	COPY_NODE_FIELD(idxColummns);
 }
 
 /*
@@ -602,8 +605,11 @@ _copyMagmaIndexScan(MagmaIndexScan *from)
 	COPY_SCALAR_FIELD(rejLimitInRows);
 	COPY_SCALAR_FIELD(fmterrtbl);
 	COPY_STRING_FIELD(indexname);
-	COPY_SCALAR_FIELD(indexorderdir);
+	COPY_NODE_FIELD(indexqual);
 	COPY_NODE_FIELD(indexqualorig);
+	COPY_NODE_FIELD(indexstrategy);
+	COPY_NODE_FIELD(indexsubtype);
+	COPY_SCALAR_FIELD(indexorderdir);
 	COPY_NODE_FIELD(errAosegnos);
 	COPY_NODE_FIELD(err_aosegfileinfos);
 	COPY_SCALAR_FIELD(encoding);
@@ -629,7 +635,10 @@ _copyMagmaIndexOnlyScan(MagmaIndexOnlyScan *from)
 	COPY_SCALAR_FIELD(rejLimitInRows);
 	COPY_SCALAR_FIELD(fmterrtbl);
 	COPY_STRING_FIELD(indexname);
+	COPY_NODE_FIELD(indexqual);
 	COPY_NODE_FIELD(indexqualorig);
+	COPY_NODE_FIELD(indexstrategy);
+	COPY_NODE_FIELD(indexsubtype);
 	COPY_SCALAR_FIELD(indexorderdir);
 	COPY_NODE_FIELD(errAosegnos);
 	COPY_NODE_FIELD(err_aosegfileinfos);
@@ -2160,6 +2169,7 @@ _copyRangeTblEntry(RangeTblEntry *from)
 	COPY_SCALAR_FIELD(forceDistRandom);
     COPY_NODE_FIELD(pseudocols);                /*CDB*/
 
+	COPY_STRING_FIELD(graphName);
 	return newnode;
 }
 
@@ -2723,6 +2733,7 @@ _copyQuery(Query *from)
 	}
 	else
 		newnode->intoPolicy = NULL;
+	COPY_NODE_FIELD(graphEntry);
 
 	return newnode;
 }
@@ -3379,7 +3390,15 @@ _copyIndexStmt(IndexStmt *from)
 	COPY_SCALAR_FIELD(concurrent);
 	COPY_NODE_FIELD(idxOids);
 	COPY_SCALAR_FIELD(do_part);
-
+	COPY_SCALAR_FIELD(magma);
+	COPY_SCALAR_FIELD(relationOid);
+	COPY_NODE_FIELD(allidxinfos);
+	COPY_NODE_FIELD(columnsToRead);
+	COPY_NODE_FIELD(contextdisp);
+	COPY_NODE_FIELD(graphele);
+	COPY_NODE_FIELD(graphIndexAttnum);
+	COPY_NODE_FIELD(graphIncludeAttnum);
+	COPY_SCALAR_FIELD(reverse);
 	return newnode;
 }
 
@@ -4401,6 +4420,15 @@ _copyVirtualSegmentNode(const VirtualSegmentNode *from)
 	return newnode;
 }
 
+static GraphEntry *
+_copyGraphEntry(const GraphEntry *from)
+{
+  GraphEntry *newnode = makeNode(GraphEntry);
+  newnode->relid = from->relid;
+  newnode->requiredPerms = from->requiredPerms;
+  return newnode;
+}
+
 /* ****************************************************************
  *					pg_list.h copy functions
  * ****************************************************************
@@ -5314,6 +5342,9 @@ copyObject(void *from)
 		case T_VirtualSegmentNode:
 			retval = _copyVirtualSegmentNode(from);
 			break;
+		case T_GraphEntry:
+		  retval = _copyGraphEntry(from);
+		  break;
 		default:
 			elog(ERROR, "unrecognized node type: %d", (int) nodeTag(from));
 			retval = from;		/* keep compiler quiet */
diff --git a/src/backend/nodes/equalfuncs.c b/src/backend/nodes/equalfuncs.c
index f04bb81..17e521c 100644
--- a/src/backend/nodes/equalfuncs.c
+++ b/src/backend/nodes/equalfuncs.c
@@ -1268,6 +1268,15 @@ _equalIndexStmt(IndexStmt *a, IndexStmt *b)
 	COMPARE_SCALAR_FIELD(concurrent);
 	COMPARE_NODE_FIELD(idxOids);
 	COMPARE_SCALAR_FIELD(do_part);
+	COMPARE_SCALAR_FIELD(magma);
+	COMPARE_SCALAR_FIELD(relationOid);
+	COMPARE_NODE_FIELD(allidxinfos);
+	COMPARE_NODE_FIELD(columnsToRead);
+	COMPARE_NODE_FIELD(contextdisp);
+	COMPARE_NODE_FIELD(graphele);
+	COMPARE_NODE_FIELD(graphIndexAttnum);
+	COMPARE_NODE_FIELD(graphIncludeAttnum);
+	COMPARE_SCALAR_FIELD(reverse);
 
 	return true;
 }
diff --git a/src/backend/nodes/outfast.c b/src/backend/nodes/outfast.c
index 1409f38..3b63e85 100644
--- a/src/backend/nodes/outfast.c
+++ b/src/backend/nodes/outfast.c
@@ -483,6 +483,7 @@ _outPlannedStmt(StringInfo str, PlannedStmt *node)
 
 	WRITE_STRING_FIELD(hiveUrl);
 	WRITE_ENUM_FIELD(originNodeType, NodeTag);
+	WRITE_NODE_FIELD(graphEntry);
 }
 
 static void
@@ -612,7 +613,10 @@ _outMagmaIndexScan(StringInfo str, MagmaIndexScan *node)
 	WRITE_NODE_TYPE("MAGMAINDEXSCAN");
 	_outScanInfo(str, (Scan *) node);
 	WRITE_STRING_FIELD(indexname);
+	WRITE_LIST_FIELD(indexqual);
 	WRITE_LIST_FIELD(indexqualorig);
+	WRITE_LIST_FIELD(indexstrategy);
+	WRITE_LIST_FIELD(indexsubtype);
 	WRITE_ENUM_FIELD(indexorderdir, ScanDirection);
 	if (print_variable_fields) {
 	WRITE_NODE_FIELD(uriList);
@@ -640,7 +644,10 @@ _outMagmaIndexOnlyScan(StringInfo str, MagmaIndexOnlyScan *node)
 	WRITE_BOOL_FIELD(rejLimitInRows);
 	WRITE_OID_FIELD(fmterrtbl);
 	WRITE_STRING_FIELD(indexname);
+	WRITE_LIST_FIELD(indexqual);
 	WRITE_LIST_FIELD(indexqualorig);
+	WRITE_LIST_FIELD(indexstrategy);
+	WRITE_LIST_FIELD(indexsubtype);
 	WRITE_ENUM_FIELD(indexorderdir, ScanDirection);
 	WRITE_NODE_FIELD(errAosegnos);
 	WRITE_NODE_FIELD(err_aosegfileinfos);
@@ -706,6 +713,8 @@ outIndexScanFields(StringInfo str, IndexScan *node)
 	{
 		Assert(node->logicalIndexInfo == NULL);
 	}
+	WRITE_BOOL_FIELD(indexonly);
+	WRITE_LIST_FIELD(idxColummns);
 }
 
 static void
@@ -2362,6 +2371,7 @@ _outIndexStmt(StringInfo str, IndexStmt *node)
 	WRITE_STRING_FIELD(accessMethod);
 	WRITE_STRING_FIELD(tableSpace);
 	WRITE_NODE_FIELD(indexParams);
+	WRITE_NODE_FIELD(indexIncludingParams);
 	WRITE_NODE_FIELD(options);
 
 	WRITE_NODE_FIELD(whereClause);
@@ -2374,10 +2384,16 @@ _outIndexStmt(StringInfo str, IndexStmt *node)
 	WRITE_OID_FIELD(constrOid);
 	WRITE_BOOL_FIELD(concurrent);
 	WRITE_NODE_FIELD(idxOids);
+	WRITE_BOOL_FIELD(do_part);
+	WRITE_BOOL_FIELD(magma);
 	WRITE_OID_FIELD(relationOid);
 	WRITE_NODE_FIELD(allidxinfos);
 	WRITE_NODE_FIELD(columnsToRead);
 	WRITE_NODE_FIELD(contextdisp);
+	WRITE_NODE_FIELD(graphele);
+	WRITE_NODE_FIELD(graphIndexAttnum);
+	WRITE_NODE_FIELD(graphIncludeAttnum);
+	WRITE_BOOL_FIELD(reverse);
 }
 
 static void
@@ -3303,6 +3319,7 @@ _outQuery(StringInfo str, Query *node)
 	WRITE_NODE_FIELD(result_aosegnos);
 	WRITE_NODE_FIELD(returningLists);
 	WRITE_NODE_FIELD(contextdisp);
+	WRITE_NODE_FIELD(graphEntry);
 	/* Don't serialize policy */
 }
 
@@ -3519,6 +3536,7 @@ _outRangeTblEntry(StringInfo str, RangeTblEntry *node)
 	WRITE_OID_FIELD(checkAsUser);
 
 	WRITE_BOOL_FIELD(forceDistRandom);
+	WRITE_STRING_FIELD(graphName);
 }
 
 static void
@@ -4011,6 +4029,14 @@ _outQueryResource(StringInfo str, QueryResource *node)
 	WRITE_INT64_FIELD(master_start_time);
 }
 
+static void
+_outGraphEntry(StringInfo str, GraphEntry *node)
+{
+  WRITE_NODE_TYPE("GraphEntry");
+  WRITE_OID_FIELD(relid);
+  WRITE_UINT_FIELD(requiredPerms);
+}
+
 /*
  * _outNode -
  *	  converts a Node into binary string and append it to 'str'
@@ -4868,6 +4894,10 @@ _outNode(StringInfo str, void *obj)
 				_outQueryResource(str, obj);
 				break;
 
+			case T_GraphEntry:
+        _outGraphEntry(str, obj);
+        break;
+
 			default:
 				elog(ERROR, "could not serialize unrecognized node type: %d",
 						 (int) nodeTag(obj));
diff --git a/src/backend/nodes/outfuncs.c b/src/backend/nodes/outfuncs.c
index c7ed4da..962b15c 100644
--- a/src/backend/nodes/outfuncs.c
+++ b/src/backend/nodes/outfuncs.c
@@ -663,6 +663,8 @@ outIndexScanFields(StringInfo str, IndexScan *node)
 	{
 		Assert(node->logicalIndexInfo == NULL);
 	}
+	WRITE_BOOL_FIELD(indexonly);
+	WRITE_NODE_FIELD(idxColummns);
 }
 
 static void
@@ -2292,10 +2294,16 @@ _outIndexStmt(StringInfo str, IndexStmt *node)
 	WRITE_OID_FIELD(constrOid);
 	WRITE_BOOL_FIELD(concurrent);
 	WRITE_NODE_FIELD(idxOids);
+	WRITE_BOOL_FIELD(do_part);
+	WRITE_BOOL_FIELD(magma);
 	WRITE_OID_FIELD(relationOid);
 	WRITE_NODE_FIELD(allidxinfos);
 	WRITE_NODE_FIELD(columnsToRead);
 	WRITE_NODE_FIELD(contextdisp);
+	WRITE_NODE_FIELD(graphele);
+	WRITE_NODE_FIELD(graphIndexAttnum);
+	WRITE_NODE_FIELD(graphIncludeAttnum);
+	WRITE_BOOL_FIELD(reverse);
 }
 
 static void
@@ -3711,6 +3719,7 @@ _outRangeTblEntry(StringInfo str, RangeTblEntry *node)
 
 	WRITE_BOOL_FIELD(forceDistRandom);
     WRITE_NODE_FIELD(pseudocols);                                       /*CDB*/
+	WRITE_STRING_FIELD(graphName);
 }
 
 static void
diff --git a/src/backend/nodes/readfast.c b/src/backend/nodes/readfast.c
index 3371440..8547682 100644
--- a/src/backend/nodes/readfast.c
+++ b/src/backend/nodes/readfast.c
@@ -295,6 +295,7 @@ _readQuery(const char ** str)
 	READ_NODE_FIELD(result_aosegnos);
 	READ_NODE_FIELD(returningLists);
 	READ_NODE_FIELD(contextdisp);
+	READ_NODE_FIELD(graphEntry);
 	/* policy not serialized */
 
 	READ_DONE();
@@ -765,6 +766,7 @@ _readIndexStmt(const char ** str)
 	READ_STRING_FIELD(accessMethod);
 	READ_STRING_FIELD(tableSpace);
 	READ_NODE_FIELD(indexParams);
+	READ_NODE_FIELD(indexIncludingParams);
 	READ_NODE_FIELD(options);
 	READ_NODE_FIELD(whereClause);
 	READ_NODE_FIELD(rangetable);
@@ -776,11 +778,16 @@ _readIndexStmt(const char ** str)
 	READ_OID_FIELD(constrOid);
 	READ_BOOL_FIELD(concurrent);
 	READ_NODE_FIELD(idxOids);
+	READ_BOOL_FIELD(do_part);
+	READ_BOOL_FIELD(magma);
 	READ_OID_FIELD(relationOid);
 	READ_NODE_FIELD(allidxinfos);
 	READ_NODE_FIELD(columnsToRead);
 	READ_NODE_FIELD(contextdisp);
-
+	READ_NODE_FIELD(graphele);
+	READ_NODE_FIELD(graphIndexAttnum);
+	READ_NODE_FIELD(graphIncludeAttnum);
+	READ_BOOL_FIELD(reverse);
 	READ_DONE();
 }
 
@@ -2032,6 +2039,7 @@ _readRangeTblEntry(const char ** str)
 	READ_OID_FIELD(checkAsUser);
 
 	READ_BOOL_FIELD(forceDistRandom);
+	READ_STRING_FIELD(graphName);
 
 	READ_DONE();
 }
@@ -2975,6 +2983,7 @@ _readPlannedStmt(const char ** str)
 
 	READ_STRING_FIELD(hiveUrl);
 	READ_ENUM_FIELD(originNodeType, NodeTag);
+	READ_NODE_FIELD(graphEntry);
 
 	READ_DONE();
 }
@@ -3208,6 +3217,8 @@ readIndexScanFields(const char ** str, IndexScan *local_node)
 		ALLOCATE_LOCAL(local_node->logicalIndexInfo, LogicalIndexInfo, 1 /* single node allocation  */);
 		readLogicalIndexInfo(str, local_node->logicalIndexInfo);
 	}
+	READ_BOOL_FIELD(indexonly);
+	READ_NODE_FIELD(idxColummns);
 }
 
 /*
@@ -3289,7 +3300,10 @@ _readMagmaIndexScan(const char ** str)
 	READ_LOCALS(MagmaIndexScan);
 	readScanInfo(str, (Scan *)local_node);
 	READ_STRING_FIELD(indexname);
+	READ_NODE_FIELD(indexqual);
 	READ_NODE_FIELD(indexqualorig);
+	READ_NODE_FIELD(indexstrategy);
+	READ_NODE_FIELD(indexsubtype);
 	READ_ENUM_FIELD(indexorderdir, ScanDirection);
 	READ_NODE_FIELD(uriList);
 	READ_NODE_FIELD(fmtOpts);
@@ -3316,7 +3330,10 @@ _readMagmaIndexOnlyScan(const char ** str)
 	READ_BOOL_FIELD(rejLimitInRows);
 	READ_OID_FIELD(fmterrtbl);
 	READ_STRING_FIELD(indexname);
+	READ_NODE_FIELD(indexqual);
 	READ_NODE_FIELD(indexqualorig);
+	READ_NODE_FIELD(indexstrategy);
+	READ_NODE_FIELD(indexsubtype);
 	READ_ENUM_FIELD(indexorderdir, ScanDirection);
 	READ_NODE_FIELD(errAosegnos);
 	READ_NODE_FIELD(err_aosegfileinfos);
@@ -4247,6 +4264,16 @@ _readResultRelSegFileInfo(const char **str)
 	READ_DONE();
 }
 
+static GraphEntry *
+_readGraphEntry(const char **str) {
+  READ_LOCALS(GraphEntry);
+
+  READ_OID_FIELD(relid);
+  READ_UINT_FIELD(requiredPerms);
+
+  READ_DONE();
+}
+
 static QueryResource *
 _readQueryResource(const char **str)
 {
@@ -5053,6 +5080,10 @@ readNodeBinary(const char ** str)
 			return_value = _readQueryResource(str);
 			break;
 
+		case T_GraphEntry:
+      return_value = _readGraphEntry(str);
+      break;
+
 		default:
 			return_value = NULL; /* keep the compiler silent */
 			elog(ERROR, "could not deserialize unrecognized node type: %d",
diff --git a/src/backend/nodes/readfuncs.c b/src/backend/nodes/readfuncs.c
index 6570406..55d6a24 100644
--- a/src/backend/nodes/readfuncs.c
+++ b/src/backend/nodes/readfuncs.c
@@ -909,6 +909,7 @@ _readIndexStmt(void)
 	READ_STRING_FIELD(accessMethod);
 	READ_STRING_FIELD(tableSpace);
 	READ_NODE_FIELD(indexParams);
+	READ_NODE_FIELD(indexIncludingParams);
 	READ_NODE_FIELD(options);
 	READ_NODE_FIELD(whereClause);
 	READ_NODE_FIELD(rangetable);
@@ -920,7 +921,16 @@ _readIndexStmt(void)
 	READ_OID_FIELD(constrOid);
 	READ_BOOL_FIELD(concurrent);
 	READ_NODE_FIELD(idxOids);
-
+	READ_BOOL_FIELD(do_part);
+	READ_BOOL_FIELD(magma);
+	READ_OID_FIELD(relationOid);
+	READ_NODE_FIELD(allidxinfos);
+	READ_NODE_FIELD(columnsToRead);
+	READ_NODE_FIELD(contextdisp);
+	READ_NODE_FIELD(graphele);
+	READ_NODE_FIELD(graphIndexAttnum);
+	READ_NODE_FIELD(graphIncludeAttnum);
+	READ_BOOL_FIELD(reverse);
 	READ_DONE();
 }
 
diff --git a/src/backend/optimizer/path/indxpath.c b/src/backend/optimizer/path/indxpath.c
index ae88f81..827a907 100644
--- a/src/backend/optimizer/path/indxpath.c
+++ b/src/backend/optimizer/path/indxpath.c
@@ -41,6 +41,7 @@
 #include "catalog/pg_opclass.h"
 #include "catalog/pg_operator.h"
 #include "catalog/pg_type.h"
+#include "catalog/pg_exttable.h"
 #include "nodes/makefuncs.h"
 #include "optimizer/clauses.h"
 #include "optimizer/cost.h"
@@ -1662,6 +1663,8 @@ best_inner_indexscan(PlannerInfo *root, RelOptInfo *rel,
 	List	   *indexpaths;
 	List	   *bitindexpaths;
 	ListCell   *l;
+	ListCell   *nextcell = NULL;
+	ListCell   *prevcell = NULL;
 	InnerIndexscanInfo *info;
 	MemoryContext oldcontext;
 	RangeTblEntry *rte;
@@ -1789,17 +1792,100 @@ best_inner_indexscan(PlannerInfo *root, RelOptInfo *rel,
 	relstorage = get_rel_relstorage(rte->relid);
 	Assert(relstorage != '\0');
 
-    /* Exclude plain index paths if user doesn't want them. */
-    if (!root->config->enable_indexscan && !root->config->mpp_trying_fallback_plan)
-        indexpaths = NIL;
-
-	/* Exclude plain index paths if the relation is an append-only relation. */
-	if (relstorage_is_ao(relstorage) ||
-		/* disable inner join index scan for magma
-		 * because magma index scan cant support dynamic filter
-		 */
-		relstorage == RELSTORAGE_EXTERNAL)
-		indexpaths = NIL;
+	if (RelationIsMagmaTable2(rte->relid))
+	{
+	        bool        del = false;
+	        Expr       *clause;
+	        Expr	   *leftop;	/* expr on lhs of operator */
+	        Expr	   *rightop;	/* expr on rhs ... */
+	        ListCell   *qual;
+	        Path       *path;
+
+	        /* Exclude plain index paths if user doesn't want them. */
+	        if ((!root->config->enable_magma_indexscan || !root->config->enable_magma_indexonlyscan)
+	                && !root->config->mpp_trying_fallback_plan)
+	        {
+	                for (l = list_head(indexpaths); l; l = nextcell)
+	                {
+	                        nextcell = lnext(l);
+	                        path = (Path *) lfirst(l);
+	                        if ((path->pathtype == T_MagmaIndexScan && !root->config->enable_magma_indexscan)
+	                                || (path->pathtype == T_MagmaIndexOnlyScan && !root->config->enable_magma_indexonlyscan))
+	                                indexpaths = list_delete_cell(indexpaths, l, prevcell);
+	                        else
+	                                prevcell = l;
+	                }
+	        }
+
+	        /*
+	         * Exclude plain index paths if there is an expression that magma deos not support.
+	         * So far, we only support (Var op Const), (Const op Var), (Var eq Var).
+	         */
+	        prevcell = NULL;
+	        for (l = list_head(indexpaths); l; l = nextcell)
+	        {
+	                nextcell = lnext(l);
+	                path = (Path *) lfirst(l);
+
+	                foreach(qual, ((IndexPath *) path)->indexquals)
+	                {
+	                        clause = ((RestrictInfo *) lfirst(qual))->clause;
+	                        /* e.g., RowCompareExpr */
+	                        if (!IsA(clause, OpExpr))
+	                        {
+	                                del = true;
+	                                break;
+	                        }
+
+	                        Assert(list_length(((OpExpr *) clause)->args) == 2);
+
+	                        leftop = (Expr *) get_leftop(clause);
+	                        if (leftop && IsA(leftop, RelabelType))
+	                                leftop = ((RelabelType *) leftop)->arg;
+	                        rightop = (Expr *) get_rightop(clause);
+	                        if (rightop && IsA(rightop, RelabelType))
+	                                rightop = ((RelabelType *) rightop)->arg;
+
+	                        /* e.g., (Var op FuncExpr), (FuncExpr op Var) */
+	                        if (!(IsA(leftop, Var) || IsA(leftop, Const))
+	                                || !(IsA(rightop, Var) || IsA(rightop, Const)))
+	                        {
+	                                del = true;
+	                                break;
+	                        }
+	                        if (IsA(leftop, Var) && IsA(rightop, Var))
+	                        {
+	                                if (((Var *) leftop)->vartype != ((Var *) rightop)->vartype
+	                                        /* If not satisfy (Var eq Var). */
+	                                        || pg_strcasecmp(get_opname(((OpExpr *) clause)->opno), "=") != 0)
+	                                {
+	                                        del = true;
+	                                        break;
+	                                }
+	                        }
+	                }
+
+	                if (del)
+	                {
+	                        indexpaths = list_delete_cell(indexpaths, l, prevcell);
+	                        del = false;
+	                }
+	                else
+	                        prevcell = l;
+	        }
+	}
+	else
+	{
+	        if (
+	                /* Exclude plain index paths if user doesn't want them. */
+	                (!root->config->enable_indexscan && !root->config->mpp_trying_fallback_plan)
+	                /* ... if the relation is an append-only relation. */
+	                || relstorage_is_ao(relstorage)
+	                /* ... if the relation is an external relation and not magma. */
+	                || relstorage_is_external(relstorage)
+	        )
+	                indexpaths = NIL;
+	}
 
 	/*
 	 * If we found anything usable, generate a BitmapHeapPath for the most
diff --git a/src/backend/optimizer/plan/createplan.c b/src/backend/optimizer/plan/createplan.c
index 7e09615..b68c0ab 100644
--- a/src/backend/optimizer/plan/createplan.c
+++ b/src/backend/optimizer/plan/createplan.c
@@ -40,6 +40,7 @@
 #include "miscadmin.h" /* work_mem */
 
 #include "access/hd_work_mgr.h"
+#include "catalog/catquery.h"
 #include "catalog/pg_type.h"    /* INT8OID */
 #include "nodes/makefuncs.h"
 #include "executor/execHHashagg.h"
@@ -168,7 +169,10 @@ static MagmaIndexScan *make_magma_indexscan(List *qptlist,
 			   Index scanrelid,
 			   bool indexonly,
 			   Oid indexid,
+			   List *indexqual,
 			   List *indexqualorig,
+			   List *indexstrategy,
+			   List *indexsubtype,
 			   List *urilist,
 			   List *fmtopts,
 			   char fmttype,
@@ -352,8 +356,7 @@ create_scan_plan(CreatePlanContext *ctx, Path *best_path)
 	 */
 	if (use_physical_tlist(ctx, rel))
 	{
-		if (best_path->pathtype == T_MagmaIndexOnlyScan || best_path->pathtype == T_OrcIndexOnlyScan ||
-				best_path->pathtype == T_OrcIndexScan)
+		if (best_path->pathtype == T_MagmaIndexOnlyScan || best_path->pathtype == T_OrcIndexOnlyScan)
 		{
 			/* For index-only scan, the preferred tlist is the index's */
 			tlist = copyObject(((IndexPath *) best_path)->indexinfo->indextlist);
@@ -2279,7 +2282,10 @@ MagmaIndexScan *create_magma_indexscan_plan(
                     baserelid,
                     indexonly,
                     indexoid,
+                    fixed_indexquals,
                     stripped_indexquals,
+                    indexstrategy,
+                    indexsubtype,
                     rel->locationlist,
                     fmtopts,
                     rel->fmttype,
@@ -3834,6 +3840,19 @@ make_indexscan(List *qptlist,
 	IndexScan  *node = makeNode(IndexScan);
 	node->scan.plan.type = pathtype;
 	Plan	   *plan = &node->scan.plan;
+	/* native orc index scan need index columns */
+	if (pathtype == T_OrcIndexScan || pathtype == T_OrcIndexOnlyScan)
+	{
+		cqContext *idxcqCtx = caql_beginscan(NULL, cql("SELECT * FROM pg_index "
+				" WHERE indexrelid = :1 ", ObjectIdGetDatum(indexid)));
+		HeapTuple	ht_idx = caql_getnext(idxcqCtx);
+		Form_pg_index idxrec = (Form_pg_index) GETSTRUCT(ht_idx);
+		for (int i = 0; i < idxrec->indnatts; i++)
+		{
+			node->idxColummns = lappend_oid(node->idxColummns, idxrec->indkey.values[i]);
+		}
+		caql_endscan(idxcqCtx);
+	}
 
 	/* cost should be inserted by caller */
 	plan->targetlist = qptlist;
@@ -3855,7 +3874,12 @@ make_indexscan(List *qptlist,
 
 static MagmaIndexScan *make_magma_indexscan(
 		List *qptlist, List *qpqual, Index scanrelid, bool indexonly,
-		Oid indexid, List *indexqualorig, List *urilist,
+		Oid indexid,
+		List *indexqual,
+		List *indexqualorig,
+		List *indexstrategy,
+		List *indexsubtype,
+		List *urilist,
 		List *fmtopts, char fmttype,
 		int rejectlimit, bool rejectlimitinrows,
 		Oid fmterrtableOid, int encoding, ScanDirection indexscandir)
@@ -3872,7 +3896,10 @@ static MagmaIndexScan *make_magma_indexscan(
 	plan->targetlist = qptlist;
 	plan->qual = qpqual;
 	node->indexname = getIndexNameByOid(indexid);
+	node->indexqual = indexqual;
 	node->indexqualorig = indexqualorig;
+	node->indexstrategy = indexstrategy;
+	node->indexsubtype = indexsubtype;
 	node->indexorderdir = indexscandir;
 	plan->lefttree = NULL;
 	plan->righttree = NULL;
diff --git a/src/backend/optimizer/plan/newPlanner.c b/src/backend/optimizer/plan/newPlanner.c
index 26be1c8..e15b7af 100644
--- a/src/backend/optimizer/plan/newPlanner.c
+++ b/src/backend/optimizer/plan/newPlanner.c
@@ -62,10 +62,6 @@ const char *new_executor_runtime_filter_mode;
 const char *new_executor_runtime_filter_mode_local = "local";
 const char *new_executor_runtime_filter_mode_global = "global";
 
-const char *new_scheduler_mode_on = "on";
-const char *new_scheduler_mode_off = "off";
-char *new_scheduler_mode;
-
 int new_interconnect_type;
 const char *show_new_interconnect_type() {
   switch (new_interconnect_type) {
@@ -90,6 +86,9 @@ static bool do_convert_targetlist_to_common_plan(Plan *node,
 static bool do_convert_quallist_to_common_plan(Plan *node,
                                                CommonPlanContext *ctx,
                                                bool isInsist);
+static bool do_convert_indexqualorig_to_common_plan(Plan *node,
+                                                    CommonPlanContext *ctx,
+                                                    bool isInsist);
 static bool do_convert_indexqual_to_common_plan(Plan *node,
                                                 CommonPlanContext *ctx,
                                                 bool isInsist);
@@ -369,6 +368,16 @@ void convert_extscan_to_common_plan(Plan *node, List *splits, Relation rel,
   }
 }
 
+void *convert_orcscan_indexqualorig_to_common_plan(Plan *node,
+                                                   CommonPlanContext *ctx,
+                                                   List *idxColumns) {
+  planner_init_common_plan_context(NULL, ctx);
+  ctx->idxColumns = idxColumns;
+  univPlanSeqScanNewInstance(ctx->univplan, -1);
+  do_convert_indexqualorig_to_common_plan(node, ctx, false);
+  return univPlanGetQualList(ctx->univplan);
+}
+
 void *convert_orcscan_qual_to_common_plan(Plan *node, CommonPlanContext *ctx) {
   planner_init_common_plan_context(NULL, ctx);
   univPlanSeqScanNewInstance(ctx->univplan, -1);
@@ -479,9 +488,6 @@ void planner_init_common_plan_context(PlannedStmt *stmt,
   ctx->convertible =
       pg_strcasecmp(new_executor_mode, new_executor_mode_off) != 0 ? true
                                                                    : false;
-  ctx->enforceNewScheduler =
-      pg_strcasecmp(new_scheduler_mode, new_scheduler_mode_on) == 0 ? true
-                                                                    : false;
   ctx->base.node = (Node *)stmt;
   ctx->querySelect = false;
   ctx->isMagma = false;
@@ -491,6 +497,8 @@ void planner_init_common_plan_context(PlannedStmt *stmt,
   ctx->parent = NULL;
   ctx->exprBufStack = NIL;
   ctx->rangeNum = 0;
+  ctx->isConvertingIndexQual = false;
+  ctx->idxColumns = NIL;
 }
 
 void planner_destroy_common_plan_context(CommonPlanContext *ctx, bool enforce) {
@@ -994,21 +1002,45 @@ bool do_convert_quallist_to_common_plan(Plan *node, CommonPlanContext *ctx,
   return true;
 }
 
+static bool do_convert_indexqualorig_to_common_plan(Plan *node,
+                                                    CommonPlanContext *ctx,
+                                                    bool isInsist) {
+  ListCell *lc;
+  foreach (lc, ((IndexScan *)node)->indexqualorig) {
+    Expr *expr = (Expr *)lfirst(lc);
+    univPlanNewExpr(ctx->univplan);
+    bool convert_ret = do_convert_expr_to_common_plan(-1, expr, ctx);
+    if (!convert_ret && isInsist)
+      return false;
+    else if (!convert_ret && !isInsist)
+      continue;
+    else if (convert_ret)
+      univPlanQualListAddExpr(ctx->univplan);
+  }
+  return true;
+}
+
 static bool do_convert_indexqual_to_common_plan(Plan *node,
                                                 CommonPlanContext *ctx,
                                                 bool isInsist) {
+  /* Must set to false when this function exit. */
+  ctx->isConvertingIndexQual = true;
+
   ListCell *lc;
   foreach (lc, ((ExternalScan *)node)->indexqualorig) {
     Expr *expr = (Expr *)lfirst(lc);
     univPlanNewExpr(ctx->univplan);
     bool convert_ret = do_convert_expr_to_common_plan(-1, expr, ctx);
-    if (!convert_ret && isInsist)
+    if (!convert_ret && isInsist) {
+      ctx->isConvertingIndexQual = false;
       return false;
-    else if (!convert_ret && !isInsist)
+    } else if (!convert_ret && !isInsist)
       continue;
     else if (convert_ret)
       univPlanIndexQualListAddExpr(ctx->univplan);
   }
+
+  ctx->isConvertingIndexQual = false;
   return true;
 }
 
@@ -1623,6 +1655,10 @@ bool do_convert_expr_to_common_plan(int32_t pid, Expr *expr,
 
     case T_Var: {
       Var *var = (Var *)expr;
+      // deal with orc index
+      if (ctx->idxColumns != NIL) {
+        var->varattno = list_find_oid(ctx->idxColumns, var->varattno) + 1;
+      }
       // TODO(chiyang): support system attribute
       if (var->varattno < 0 &&
           !(var->varattno == SelfItemPointerAttributeNumber ||
@@ -1686,6 +1722,22 @@ bool do_convert_expr_to_common_plan(int32_t pid, Expr *expr,
     case T_OpExpr: {
       OpExpr *opExpr = (OpExpr *)expr;
 
+      // Disable parameterized index qual, not supported yet.
+      // The reason we do not disable it on Segment is because the old executor
+      // will also run here(called by convert_extscan_to_common_plan() in
+      // magma_beginscan()/magma_rescan()), which supports parameterized index
+      // qual.
+      if (AmIMaster() && ctx->isMagma && ctx->isConvertingIndexQual) {
+        Expr *leftop = (Expr *)get_leftop(opExpr);
+        if (leftop && IsA(leftop, RelabelType))
+          leftop = ((RelabelType *)leftop)->arg;
+        Expr *rightop = (Expr *)get_rightop(opExpr);
+        if (rightop && IsA(rightop, RelabelType))
+          rightop = ((RelabelType *)rightop)->arg;
+
+        if (IsA(leftop, Var) && IsA(rightop, Var)) goto end;
+      }
+
       old = parentExprSwitchTo(expr, ctx);
 
       mappingFuncId = HAWQ_FUNCOID_MAPPING(opExpr->opfuncid);
diff --git a/src/backend/optimizer/plan/planner.c b/src/backend/optimizer/plan/planner.c
index 9717a43..f82b5c0 100644
--- a/src/backend/optimizer/plan/planner.c
+++ b/src/backend/optimizer/plan/planner.c
@@ -874,7 +874,7 @@ standard_planner(Query *parse, int cursorOptions, ParamListInfo boundParams)
 	result->queryPartsMetadata = NIL;
 	result->numSelectorsPerScanId = NIL;
 	result->hiveUrl = NULL;
-	
+	result->graphEntry = parse->graphEntry;
 	Assert(result->utilityStmt == NULL || IsA(result->utilityStmt, DeclareCursorStmt));
 	
 	if (Gp_role == GP_ROLE_DISPATCH)
diff --git a/src/backend/optimizer/plan/setrefs.c b/src/backend/optimizer/plan/setrefs.c
index a3ca1b1..2bcaf77 100644
--- a/src/backend/optimizer/plan/setrefs.c
+++ b/src/backend/optimizer/plan/setrefs.c
@@ -528,6 +528,8 @@ set_plan_refs(PlannerGlobal *glob, Plan *plan, const int rtoffset)
 			fix_scan_list(glob, splan->scan.plan.targetlist, rtoffset);
 			splan->scan.plan.qual =
 			fix_scan_list(glob, splan->scan.plan.qual, rtoffset);
+			splan->indexqual =
+			fix_scan_list(glob, splan->indexqual, rtoffset);
 			splan->indexqualorig =
 			fix_scan_list(glob, splan->indexqualorig, rtoffset);
 		}
@@ -1447,6 +1449,30 @@ set_inner_join_references(PlannerGlobal *glob, Plan *inner_plan,
 												 rtoffset);
 		}
 	}
+	else if (IsA(inner_plan, MagmaIndexOnlyScan) || IsA(inner_plan, MagmaIndexScan))
+	{
+		ExternalScan  *innerscan = (ExternalScan *) inner_plan;
+		List	   *indexqualorig = innerscan->indexqualorig;
+
+		/* No work needed if indexqual refers only to its own rel... */
+		if (NumRelids((Node *) indexqualorig) > 1)
+		{
+			Index		innerrel = innerscan->scan.scanrelid;
+
+			/* only refs to outer vars get changed in the inner qual */
+			innerscan->indexqualorig = fix_join_expr(
+			        glob, indexqualorig, outer_itlist, NULL,
+			        innerrel, rtoffset);
+			innerscan->indexqual = fix_join_expr(
+			        glob, innerscan->indexqual, outer_itlist, NULL,
+			        innerrel, rtoffset);
+
+			if (NumRelids((Node *) inner_plan->qual) > 1)
+				inner_plan->qual = fix_join_expr(
+						glob, inner_plan->qual, outer_itlist, NULL,
+						innerrel, rtoffset);
+		}
+	}
 	else if (IsA(inner_plan, BitmapIndexScan))
 	{
 		/*
diff --git a/src/backend/parser/analyze.c b/src/backend/parser/analyze.c
index 15226b0..9a58069 100644
--- a/src/backend/parser/analyze.c
+++ b/src/backend/parser/analyze.c
@@ -67,6 +67,13 @@
 #include "cdb/cdbpartition.h"
 #include "cdb/cdbparquetstoragewrite.h"
 #include "cdb/cdbdatalocality.h"
+#include "catalog/skylon_vlabel.h"
+#include "catalog/skylon_vlabel_attribute.h"
+#include "catalog/skylon_elabel.h"
+#include "catalog/skylon_elabel_attribute.h"
+#include "catalog/skylon_graph.h"
+#include "catalog/skylon_graph_vlabel.h"
+#include "catalog/skylon_graph_elabel.h"
 #include "commands/dbcommands.h"
 #include "commands/defrem.h"
 #include "commands/prepare.h"
@@ -229,6 +236,18 @@ static Query *transformCreateExternalStmt(ParseState *pstate,
                                           CreateExternalStmt *stmt,
                                           List **extras_before,
                                           List **extras_after);
+static Query *transformCreateVlabelStmt(ParseState *pstate,
+                                          CreateVlabelStmt *stmt,
+                                          List **extras_before,
+                                          List **extras_after);
+static Query *transformCreateElabelStmt(ParseState *pstate,
+                                          CreateElabelStmt *stmt,
+                                          List **extras_before,
+                                          List **extras_after);
+static Query *transformCreateGraphStmt(ParseState *pstate,
+                                          CreateGraphStmt *stmt,
+                                          List **extras_before,
+                                          List **extras_after);
 static Query *transformCreateForeignStmt(ParseState *pstate,
                                          CreateForeignStmt *stmt,
                                          List **extras_before,
@@ -323,6 +342,12 @@ static Node *make_prule_rulestmt(ParseState *pstate, CreateStmtContext *cxt,
 static List *transformAttributeEncoding(List *stenc, CreateStmt *stmt,
                                         CreateStmtContext cxt);
 
+extern char *graphVertexTableName(char *gname,char *vname);
+
+extern char *graphEdgeTableName(char *gname,char *ename);
+
+extern bool parseAndTransformAsGraph(ParseState *pstate, RangeVar *rangeVar);
+
 char *getDefaultFilespace();
 
 /*
@@ -648,6 +673,21 @@ static Query *transformStmt(ParseState *pstate, Node *parseTree,
           pstate, (CreateExternalStmt *)parseTree, extras_before, extras_after);
       break;
 
+    case T_CreateVlabelStmt:
+      result = transformCreateVlabelStmt(
+          pstate, (CreateVlabelStmt *)parseTree, extras_before, extras_after);
+      break;
+
+    case T_CreateElabelStmt:
+      result = transformCreateElabelStmt(
+          pstate, (CreateElabelStmt *)parseTree, extras_before, extras_after);
+      break;
+
+    case T_CreateGraphStmt:
+      result = transformCreateGraphStmt(
+                pstate, (CreateGraphStmt *)parseTree, extras_before, extras_after);
+      break;
+
     case T_CreateForeignStmt:
       result = transformCreateForeignStmt(
           pstate, (CreateForeignStmt *)parseTree, extras_before, extras_after);
@@ -741,7 +781,8 @@ static Query *transformStmt(ParseState *pstate, Node *parseTree,
         result = transformSelectStmt(pstate, n);
       else
         result = transformSetOperationStmt(pstate, n);
-    } break;
+    }
+    break;
 
     case T_DeclareCursorStmt:
       result =
@@ -763,6 +804,7 @@ static Query *transformStmt(ParseState *pstate, Node *parseTree,
   /* Mark as original query until we learn differently */
   result->querySource = QSRC_ORIGINAL;
   result->canSetTag = true;
+  result->graphEntry = pstate->graphEntry;
 
   /*
    * Check that we did not produce too many resnos; at the very least we
@@ -885,6 +927,9 @@ static Query *transformDeleteStmt(ParseState *pstate, DeleteStmt *stmt) {
 
   /* setup database name for use of magma operations */
   MemoryContext oldContext = MemoryContextSwitchTo(MessageContext);
+
+  int isGraph = parseAndTransformAsGraph(pstate, stmt->relation);
+
   char *dbname = stmt->relation->catalogname;
   database =
       (dbname != NULL) ? pstrdup(dbname) : get_database_name(MyDatabaseId);
@@ -894,7 +939,9 @@ static Query *transformDeleteStmt(ParseState *pstate, DeleteStmt *stmt) {
   qry->resultRelation = setTargetTable(
       pstate, stmt->relation, interpretInhOption(stmt->relation->inhOpt), true,
       ACL_DELETE);
-
+if(isGraph)
+  pstate->p_target_rangetblentry->graphName = stmt->relation->schemaname ?
+      pstrdup(stmt->relation->schemaname) : NULL;
   qry->distinctClause = NIL;
 
   /*
@@ -937,7 +984,6 @@ static Query *transformDeleteStmt(ParseState *pstate, DeleteStmt *stmt) {
 
   return qry;
 }
-
 /*
  * transformInsertStmt -
  *	  transform an Insert Statement
@@ -958,6 +1004,7 @@ static Query *transformInsertStmt(ParseState *pstate, InsertStmt *stmt,
   ListCell *icols;
   ListCell *attnos;
   ListCell *lc;
+  char *graphName;
 
   qry->commandType = CMD_INSERT;
   pstate->p_is_insert = true;
@@ -991,13 +1038,12 @@ static Query *transformInsertStmt(ParseState *pstate, InsertStmt *stmt,
     sub_varnamespace = NIL;
   }
 
-  if (stmt->relation->catalogname != NULL) {
-    if (database != NULL) {
-      pfree(database);
-      database = NULL;
-    }
-    database = pstrdup(stmt->relation->catalogname);
-  }
+  graphName = stmt->relation->schemaname ? pstrdup(stmt->relation->schemaname) : NULL;
+  int isGraph = parseAndTransformAsGraph(pstate, stmt->relation);
+  MemoryContext oldContext = MemoryContextSwitchTo(MessageContext);
+  database =
+      (stmt->relation->catalogname != NULL) ? pstrdup(stmt->relation->catalogname) : get_database_name(MyDatabaseId);
+  MemoryContextSwitchTo(oldContext);
   /*
    * Must get write lock on INSERT target table before scanning SELECT, else
    * we will grab the wrong kind of initial lock if the target table is also
@@ -1006,7 +1052,8 @@ static Query *transformInsertStmt(ParseState *pstate, InsertStmt *stmt,
    */
   qry->resultRelation =
       setTargetTable(pstate, stmt->relation, false, false, ACL_INSERT);
-
+  if(isGraph)
+    pstate->p_target_rangetblentry->graphName = graphName;
   /* Validate stmt->cols list, or build default list if no list given */
   icolumns = checkInsertTargets(pstate, stmt->cols, &attrnos);
   Assert(list_length(icolumns) == list_length(attrnos));
@@ -1794,6 +1841,13 @@ static List *transformAttributeEncoding(List *stenc, CreateStmt *stmt,
   return newenc;
 }
 
+Query *transformCreateStmtImpl(ParseState *pstate,
+                               CreateStmt *stmt,
+                                          List **extras_before,
+                                          List **extras_after) {
+  return transformCreateStmt(pstate, stmt, extras_before, extras_after);
+}
+
 /*
  * transformCreateStmt -
  *	  transforms the "create table" statement
@@ -2376,6 +2430,196 @@ void recognizeExternalRelationFormatterOptions(
   createExtStmt->base.options = newOpts;
 }
 
+static Query *transformCreateVlabelStmt(ParseState *pstate,
+                                          CreateVlabelStmt *stmt,
+                                          List **extras_before,
+                                          List **extras_after) {
+  CreateStmtContext cxt;
+  Query *q;
+
+  cxt.stmtType = "CREATE VERTEX";
+  cxt.relation = stmt->relation;
+  cxt.hasoids = false;
+  cxt.isalter = false;
+  cxt.columns = NIL;
+  cxt.ckconstraints = NIL;
+  cxt.fkconstraints = NIL;
+  cxt.ixconstraints = NIL;
+  cxt.inh_indexes = NIL;
+  cxt.pkey = NULL;
+
+  cxt.blist = NIL;
+  cxt.alist = NIL;
+
+  ListCell *elements;
+  foreach (elements, stmt->tableElts) {
+    Node *element = lfirst(elements);
+
+    switch (nodeTag(element)) {
+      case T_ColumnDef:
+        transformColumnDefinition(pstate, &cxt, (ColumnDef *)element);
+        ColumnDef *column = (ColumnDef *)element;
+        Type type = typenameType(NULL, column->typname);
+        column->typname->typid = typeTypeId(type);
+        ReleaseType(type);
+        break;
+
+      case T_Constraint:
+        transformExtTableConstraint(pstate, &cxt, (Constraint *)element);
+        break;
+
+      case T_FkConstraint:
+        /* should never happen. If it does fix gram.y */
+        elog(ERROR, "node type %d not supported for vlabel",
+             (int)nodeTag(element));
+        break;
+
+      default:
+        elog(ERROR, "unrecognized node type: %d", (int)nodeTag(element));
+        break;
+    }
+  }
+
+  /*
+   * transformIndexConstraints wants cxt.alist to contain only index
+   * statements, so transfer anything we already have into extras_after
+   * immediately.
+   */
+  *extras_after = list_concat(cxt.alist, *extras_after);
+  cxt.alist = NIL;
+
+  /*
+   * Output results.
+   */
+  q = makeNode(Query);
+  q->commandType = CMD_UTILITY;
+  q->utilityStmt = (Node *)stmt;
+  stmt->tableElts = cxt.columns;
+  stmt->constraints = cxt.ixconstraints;
+  *extras_before = list_concat(*extras_before, cxt.blist);
+  *extras_after = list_concat(cxt.alist, *extras_after);
+  return q;
+}
+
+static Query *transformCreateElabelStmt(ParseState *pstate,
+                                          CreateElabelStmt *stmt,
+                                          List **extras_before,
+                                          List **extras_after) {
+  CreateStmtContext cxt;
+  Query *q;
+
+  cxt.stmtType = "CREATE EDGE";
+  cxt.relation = stmt->relation;
+  cxt.hasoids = false;
+  cxt.isalter = false;
+  cxt.columns = NIL;
+  cxt.ckconstraints = NIL;
+  cxt.fkconstraints = NIL;
+  cxt.ixconstraints = NIL;
+  cxt.inh_indexes = NIL;
+  cxt.pkey = NULL;
+
+  cxt.blist = NIL;
+  cxt.alist = NIL;
+
+  ListCell *elements;
+  foreach (elements, stmt->tableElts) {
+    Node *element = lfirst(elements);
+
+    switch (nodeTag(element)) {
+      case T_ColumnDef:
+        transformColumnDefinition(pstate, &cxt, (ColumnDef *)element);
+        ColumnDef *column = (ColumnDef *)element;
+        Type type = typenameType(NULL, column->typname);
+        column->typname->typid = typeTypeId(type);
+        ReleaseType(type);
+        break;
+
+      case T_Constraint:
+        transformExtTableConstraint(pstate, &cxt, (Constraint *)element);
+        break;
+
+      case T_FkConstraint:
+        /* should never happen. If it does fix gram.y */
+        elog(ERROR, "node type %d not supported for vlabel",
+             (int)nodeTag(element));
+        break;
+
+      default:
+        elog(ERROR, "unrecognized node type: %d", (int)nodeTag(element));
+        break;
+    }
+  }
+
+  /*
+   * transformIndexConstraints wants cxt.alist to contain only index
+   * statements, so transfer anything we already have into extras_after
+   * immediately.
+   */
+  *extras_after = list_concat(cxt.alist, *extras_after);
+  cxt.alist = NIL;
+
+  /*
+   * Output results.
+   */
+  q = makeNode(Query);
+  q->commandType = CMD_UTILITY;
+  q->utilityStmt = (Node *)stmt;
+  stmt->tableElts = cxt.columns;
+  stmt->constraints = cxt.ixconstraints;
+  *extras_before = list_concat(*extras_before, cxt.blist);
+  *extras_after = list_concat(cxt.alist, *extras_after);
+  return q;
+}
+
+static Query *transformCreateGraphStmt(ParseState *pstate,
+                                          CreateGraphStmt *stmt,
+                                          List **extras_before,
+                                          List **extras_after) {
+  CreateStmtContext cxt;
+  Query *q;
+
+  cxt.stmtType = "CREATE GRAPH";
+  cxt.relation = stmt->graph;
+  cxt.hasoids = false;
+  cxt.isalter = false;
+  cxt.columns = NIL;
+  cxt.ckconstraints = NIL;
+  cxt.fkconstraints = NIL;
+  cxt.ixconstraints = NIL;
+  cxt.inh_indexes = NIL;
+  cxt.pkey = NULL;
+
+  cxt.blist = NIL;
+  cxt.alist = NIL;
+
+
+  /*
+   * transformIndexConstraints wants cxt.alist to contain only index
+   * statements, so transfer anything we already have into extras_after
+   * immediately.
+   */
+  *extras_after = list_concat(cxt.alist, *extras_after);
+  cxt.alist = NIL;
+
+  /*
+   * Output results.
+   */
+  q = makeNode(Query);
+  q->commandType = CMD_UTILITY;
+  q->utilityStmt = (Node *)stmt;
+  *extras_before = list_concat(*extras_before, cxt.blist);
+  *extras_after = list_concat(cxt.alist, *extras_after);
+  return q;
+}
+
+Query *transformCreateExternalStmtImpl(ParseState *pstate,
+                                          CreateExternalStmt *stmt,
+                                          List **extras_before,
+                                          List **extras_after) {
+  return transformCreateExternalStmt(pstate, stmt, extras_before, extras_after);
+}
+
 static Query *transformCreateExternalStmt(ParseState *pstate,
                                           CreateExternalStmt *stmt,
                                           List **extras_before,
@@ -2591,6 +2835,9 @@ static Query *transformCreateExternalStmt(ParseState *pstate,
         }
       } break;
 
+      case T_String:
+        break;
+
       default:
         elog(ERROR, "unrecognized node type: %d", (int)nodeTag(element));
         break;
@@ -8084,6 +8331,181 @@ static Query *transformIndexStmt(ParseState *pstate, IndexStmt *stmt,
   qry = makeNode(Query);
   qry->commandType = CMD_UTILITY;
 
+  char *graph = stmt->relation->schemaname ? pstrdup(stmt->relation->schemaname) : NULL;
+  char *ele = stmt->relation->relname;
+  int isGraph = parseAndTransformAsGraph(pstate, stmt->relation);
+  if(isGraph) {
+    stmt->graphele = makeRangeVar(stmt->relation->schemaname, graph, ele, -1);
+    Oid dboid = GetCatalogId(stmt->relation->catalogname);
+    Oid namespaceId = LookupNamespaceId(stmt->relation->schemaname, dboid);
+    if(isGraph == 2 /* as edge*/) {
+      Oid relid = caql_getoid(
+          NULL,
+          cql("SELECT oid FROM pg_class "
+            " WHERE relname = :1 "
+            " AND relnamespace = :2 ",
+            CStringGetDatum(stmt->relation->relname),
+            ObjectIdGetDatum(namespaceId)));
+
+      Relation attrelation = heap_open(AttributeRelationId, RowExclusiveLock);
+      cqContext cqc;
+      int colNum = caql_getcount(
+          NULL,
+                cql("SELECT COUNT(*) FROM pg_attribute "
+                  " WHERE attrelid = :1 AND attnum > :2",
+                  ObjectIdGetDatum(relid), Int32GetDatum(0)));
+
+      cqContext *pcqCtx = caql_beginscan(
+          caql_addrel(cqclr(&cqc), attrelation),
+          cql("SELECT * FROM pg_attribute "
+            " WHERE attrelid = :1 AND attnum > :2",
+            ObjectIdGetDatum(relid), Int32GetDatum(0)));
+      HeapTuple attributeTuple;
+      char **colnames = palloc0(colNum * sizeof(char*));
+      while (HeapTupleIsValid(attributeTuple = caql_getnext(pcqCtx))) {
+        Form_pg_attribute att = (Form_pg_attribute) GETSTRUCT(attributeTuple);
+        colnames[att->attnum - 1] = pstrdup(NameStr(att->attname));
+      }
+      caql_endscan(pcqCtx);
+      heap_close(attrelation, RowExclusiveLock);
+      Relation ElabelRelation = heap_open(ElabelRelationId, RowExclusiveLock);
+      HeapTuple elabelTuple= caql_getfirst(caql_addrel(cqclr(&cqc), ElabelRelation),
+                                           cql("SELECT * FROM skylon_elabel"
+          " WHERE elabelname = :1 AND schemaname = :2",
+          CStringGetDatum(ele), CStringGetDatum(stmt->relation->schemaname)));
+      Form_skylon_elabel elabel = (Form_skylon_elabel) GETSTRUCT(elabelTuple);
+      Relation VlabelAttRelation = heap_open(VlabelAttrRelationId, RowExclusiveLock);
+      int srcNum =
+          caql_getcount(
+          NULL,
+          cql("SELECT COUNT(*) FROM skylon_vlabel_attribute "
+             " WHERE vlabelname = :1 AND schemaname = :2 "
+             "AND primaryrank > :3",
+             CStringGetDatum(elabel->fromvlabel.data), CStringGetDatum(stmt->relation->schemaname), Int32GetDatum(0)));
+      int dstNum = caql_getcount(
+        NULL,
+        cql("SELECT COUNT(*) FROM skylon_vlabel_attribute "
+          " WHERE vlabelname = :1 AND schemaname = :2 "
+            "AND primaryrank > :3",
+          CStringGetDatum(elabel->tovlabel.data), CStringGetDatum(stmt->relation->schemaname), Int32GetDatum(0)));
+      int primaryNum = srcNum + dstNum;
+      heap_close(VlabelAttRelation, RowExclusiveLock);
+      heap_close(ElabelRelation, RowExclusiveLock);
+      Relation elabelAttRelation = heap_open(ElabelAttrRelationId, RowExclusiveLock);
+      int eattnum = caql_getcount(
+          NULL,
+              cql("SELECT COUNT(*) FROM skylon_elabel_attribute "
+                " WHERE elabelname = :1 AND schemaname = :2 "
+                  "AND rank > :3",
+                  CStringGetDatum(ele), CStringGetDatum(stmt->relation->schemaname), Int32GetDatum(0)));
+      char **attnames = palloc0(eattnum * sizeof(char*));
+      pcqCtx = caql_beginscan(
+          caql_addrel(cqclr(&cqc), elabelAttRelation),
+                        cql("SELECT * FROM skylon_elabel_attribute "
+                          " WHERE elabelname = :1 AND schemaname = :2 "
+                            "AND rank > :3",
+                            CStringGetDatum(ele), CStringGetDatum(stmt->relation->schemaname), Int32GetDatum(0)));
+      while (HeapTupleIsValid(attributeTuple = caql_getnext(pcqCtx))) {
+        Form_skylon_elabel_attribute att = (Form_skylon_elabel_attribute) GETSTRUCT(attributeTuple);
+        attnames[att->rank - 1] = pstrdup(NameStr(att->attrname));
+      }
+      caql_endscan(pcqCtx);
+      heap_close(elabelAttRelation, RowExclusiveLock);
+      ListCell *cell;
+      foreach(cell, stmt->indexParams) {
+        IndexElem *ele = (IndexElem *)lfirst(cell);
+        for(int i = 0; i < eattnum; i++) {
+          if(strcmp(attnames[i], ele->name) == 0) {
+            Value *attnum = makeInteger(0);
+            attnum->val.ival = i + 1;
+            stmt->graphIndexAttnum = lappend(stmt->graphIndexAttnum, attnum);
+            break;
+          }
+        }
+      }
+      foreach(cell, stmt->indexIncludingParams) {
+        IndexElem *ele = (IndexElem *)lfirst(cell);
+        for(int i = 0; i < eattnum; i++) {
+          if(strcmp(attnames[i], ele->name) == 0) {
+            Value *attnum = makeInteger(0);
+            attnum->val.ival = i + 1;
+            stmt->graphIncludeAttnum = lappend(stmt->graphIncludeAttnum, attnum);
+            break;
+          }
+        }
+      }
+      List *newIndexParams = NIL;
+      if(!stmt->reverse) {
+        for(int i = 0; i < primaryNum; i++) {
+          IndexElem *indexele = makeNode(IndexElem);
+          indexele->name = colnames[i];
+          newIndexParams = lappend(newIndexParams, indexele);
+        }
+      }
+      else {
+        for(int i = 0; i < dstNum; i++) {
+          IndexElem *indexele = makeNode(IndexElem);
+          indexele->name = colnames[srcNum + i];
+          newIndexParams = lappend(newIndexParams, indexele);
+        }
+        for(int i = 0; i < srcNum; i++) {
+          IndexElem *indexele = makeNode(IndexElem);
+          indexele->name = colnames[i];
+          newIndexParams = lappend(newIndexParams, indexele);
+        }
+      }
+    stmt->indexParams = list_concat(newIndexParams, stmt->indexParams);
+    }
+    else {
+      Relation vlabelAttRelation = heap_open(VlabelAttrRelationId, RowExclusiveLock);
+      cqContext cqc;
+      int vattnum = caql_getcount(
+              NULL,
+              cql("SELECT COUNT(*) FROM skylon_vlabel_attribute "
+                " WHERE vlabelname = :1 AND schemaname = :2 "
+                  "AND rank > :3",
+                  CStringGetDatum(ele), CStringGetDatum(stmt->relation->schemaname), Int32GetDatum(0)));
+      char **attnames = palloc0(vattnum * sizeof(char*));
+      cqContext *pcqCtx = caql_beginscan(
+          caql_addrel(cqclr(&cqc), vlabelAttRelation),
+                        cql("SELECT * FROM skylon_vlabel_attribute "
+                          " WHERE vlabelname = :1 AND schemaname = :2 "
+                            "AND rank > :3",
+                            CStringGetDatum(ele), CStringGetDatum(stmt->relation->schemaname), Int32GetDatum(0)));
+      HeapTuple attributeTuple;
+      while (HeapTupleIsValid(attributeTuple = caql_getnext(pcqCtx))) {
+        Form_skylon_vlabel_attribute att = (Form_skylon_vlabel_attribute) GETSTRUCT(attributeTuple);
+        attnames[att->rank - 1] = pstrdup(NameStr(att->attrname));
+      }
+      caql_endscan(pcqCtx);
+      heap_close(vlabelAttRelation, RowExclusiveLock);
+      ListCell *cell;
+      foreach(cell, stmt->indexParams) {
+        IndexElem *ele = (IndexElem *)lfirst(cell);
+        for(int i = 0; i < vattnum; i++) {
+          if(strcmp(attnames[i], ele->name) == 0) {
+            Value *attnum = makeInteger(0);
+            attnum->val.ival = i + 1;
+            stmt->graphIndexAttnum = lappend(stmt->graphIndexAttnum, attnum);
+            break;
+          }
+        }
+      }
+      foreach(cell, stmt->indexIncludingParams) {
+        IndexElem *ele = (IndexElem *)lfirst(cell);
+        for(int i = 0; i < vattnum; i++) {
+          if(strcmp(attnames[i], ele->name) == 0) {
+            Value *attnum = makeInteger(0);
+            attnum->val.ival = i + 1;
+            stmt->graphIncludeAttnum = lappend(stmt->graphIncludeAttnum, attnum);
+            break;
+          }
+        }
+      }
+    }
+
+  }
+
   /*
    * If the table already exists (i.e., this isn't a create table time
    * expansion of primary key() or unique()) and we're the ultimate parent
@@ -10133,6 +10555,9 @@ static Query *transformUpdateStmt(ParseState *pstate, UpdateStmt *stmt) {
 
   /* setup database name for use of magma operations */
   MemoryContext oldContext = MemoryContextSwitchTo(MessageContext);
+
+  int isGraph = parseAndTransformAsGraph(pstate, stmt->relation);
+
   char *dbname = stmt->relation->catalogname;
   database =
       (dbname != NULL) ? pstrdup(dbname) : get_database_name(MyDatabaseId);
@@ -10141,7 +10566,9 @@ static Query *transformUpdateStmt(ParseState *pstate, UpdateStmt *stmt) {
   qry->resultRelation = setTargetTable(
       pstate, stmt->relation, interpretInhOption(stmt->relation->inhOpt), true,
       ACL_UPDATE);
-
+  if(isGraph)
+    pstate->p_target_rangetblentry->graphName = stmt->relation->schemaname ?
+        pstrdup(stmt->relation->schemaname) : NULL;
   /*
    * the FROM clause is non-standard SQL syntax. We used to be able to do
    * this with REPLACE in POSTQUEL so we keep the feature.
diff --git a/src/backend/parser/gram.y b/src/backend/parser/gram.y
index 17b3d29..ea4b0ce 100755
--- a/src/backend/parser/gram.y
+++ b/src/backend/parser/gram.y
@@ -205,7 +205,7 @@ static Node *makeIsNotDistinctFromNode(Node *expr, int position);
 		CommentStmt ConstraintsSetStmt CopyStmt CreateAsStmt CreateCastStmt
 		CreateDomainStmt CreateExternalStmt CreateFileSpaceStmt CreateGroupStmt
 		CreateOpClassStmt CreatePLangStmt
-		CreateQueueStmt CreateSchemaStmt CreateSeqStmt CreateStmt 
+		CreateQueueStmt CreateSchemaStmt CreateSeqStmt CreateStmt CreateGraphStmt
 		CreateTableSpaceStmt CreateFdwStmt CreateForeignServerStmt CreateForeignStmt 
 		CreateAssertStmt CreateTrigStmt 
 		CreateUserStmt CreateUserMappingStmt CreateRoleStmt
@@ -351,7 +351,7 @@ static Node *makeIsNotDistinctFromNode(Node *expr, int position);
 %type <node>	overlay_placing substr_from substr_for
 
 %type <boolean> opt_instead opt_analyze
-%type <boolean> index_opt_unique opt_verbose opt_full
+%type <boolean> index_opt_unique opt_verbose opt_full index_opt_reverse
 %type <boolean> opt_freeze opt_default opt_ordered opt_recheck
 %type <boolean> opt_rootonly_all
 %type <boolean> opt_dxl
@@ -373,7 +373,7 @@ static Node *makeIsNotDistinctFromNode(Node *expr, int position);
 
 %type <vsetstmt> set_rest
 %type <node>	TableElement ExtTableElement ConstraintElem ExtConstraintElem TableFuncElement
-%type <node>	columnDef ExtcolumnDef
+%type <node>	columnDef ExtcolumnDef fromVlabel toVlabel
 %type <node>	cdb_string
 %type <defelt>	def_elem old_aggr_elem keyvalue_pair
 %type <node>	def_arg columnElem where_clause where_or_current_clause
@@ -443,7 +443,7 @@ static Node *makeIsNotDistinctFromNode(Node *expr, int position);
 %type <node>	TableConstraint ExtTableConstraint TableLikeClause 
 %type <list>	TableLikeOptionList
 %type <ival>	TableLikeOption
-%type <list>	ColQualList
+%type <list>	ColQualList LabelList
 %type <node>	ColConstraint ColConstraintElem ConstraintAttr
 %type <ival>	key_actions key_delete key_match key_update key_action
 %type <ival>	ConstraintAttributeSpec ConstraintDeferrabilitySpec
@@ -517,15 +517,15 @@ static Node *makeIsNotDistinctFromNode(Node *expr, int position);
 
 	DATA_P DATABASE DAY_P DEALLOCATE DEC DECIMAL_P DECLARE DECODE DEFAULT DEFAULTS
 	DEFERRABLE DEFERRED DEFINER DELETE_P DELIMITER DELIMITERS DENY
-	DESC DISABLE_P DISTINCT DISTRIBUTED DO DOMAIN_P DOUBLE_P DROP DXL
+	DESC DISABLE_P DISTINCT DISTRIBUTED DISCRIMINATOR DO DOMAIN_P DOUBLE_P DROP DXL
 
 	EACH ELSE ENABLE_P ENCODING ENCRYPTED END_P ENUM_P ERRORS ESCAPE EVERY EXCEPT 
-	EXCHANGE EXCLUDE EXCLUDING EXCLUSIVE EXECUTE EXISTS EXPLAIN EXTERNAL EXTRACT
+	EXCHANGE EXCLUDE EXCLUDING EXCLUSIVE EXECUTE EXISTS EXPLAIN EXTERNAL EXTRACT EDGE
 
 	FALSE_P FETCH FIELDS FILESPACE FILESYSTEM FILL FILTER FIRST_P FLOAT_P FOLLOWING FOR 
     	FORCE FOREIGN FORMAT FORMATTER FORWARD FREEZE FROM FULL FUNCTION
 
-	GB GLOBAL GRANT GRANTED GREATEST GROUP_P GROUP_ID GROUPING
+	GB GLOBAL GRANT GRANTED GREATEST GROUP_P GROUP_ID GROUPING GRAPH
 
 	HANDLER HASH HAVING HEADER_P HOLD HOST HOUR_P
 
@@ -563,7 +563,7 @@ static Node *makeIsNotDistinctFromNode(Node *expr, int position);
 	RANDOMLY RANGE READ READABLE READS REAL REASSIGN RECHECK RECURSIVE 
     REFERENCES REINDEX REJECT_P RELATIVE_P 
 	RELEASE RENAME REPEATABLE REPLACE RESET RESOURCE RESTART RESTRICT 
-	RETURNING RETURNS REVOKE RIGHT
+	RETURNING RETURNS REVERSED REVOKE RIGHT
 	ROLE ROLLBACK ROLLUP ROOTPARTITION ROW ROWS RULE
 
 	SAVEPOINT SCATTER SCHEMA SCROLL SEARCH SECOND_P 
@@ -583,7 +583,7 @@ static Node *makeIsNotDistinctFromNode(Node *expr, int position);
 	UPDATE USER USING
 
 	VACUUM VALID VALIDATION VALIDATOR VALUE_P VALUES VARCHAR VARYING
-	VERBOSE VERSION_P VIEW VOLATILE VARIADIC
+	VERBOSE VERSION_P VIEW VOLATILE VARIADIC VERTEX
 
 	WEB WHEN WHERE WINDOW WITH WITHIN WITHOUT WORK WRAPPER WRITABLE WRITE
 
@@ -1050,6 +1050,7 @@ stmt :
 			| CreateSchemaStmt
 			| CreateSeqStmt
 			| CreateStmt
+			| CreateGraphStmt
 			| CreateTableSpaceStmt
 			| CreateTrigStmt
 			| CreateRoleStmt
@@ -3302,12 +3303,12 @@ opt_using:
 			USING									{}
 			| /*EMPTY*/								{}
 		;
-
-
+ 
+ 
 /*****************************************************************************
  *
  *		QUERY :
- *				CREATE TABLE relname
+ *				CREATE TABLE/VERTEX/EDGE relname
  *
  *****************************************************************************/
 
@@ -3542,8 +3543,89 @@ CreateStmt:	CREATE OptTemp TABLE qualified_name '(' OptTableElementList ')'
 
 					$$ = (Node *)n;
 				}
+				| CREATE VERTEX qualified_name '(' OptTableElementList ')'
+		  		{
+		  			CreateVlabelStmt *n = makeNode(CreateVlabelStmt);
+		  			n->relation = $3;
+		  			n->tableElts = $5;
+		  			n->constraints = NULL;
+					$$ = (Node *)n;
+				}
+		| CREATE EDGE qualified_name '(' fromVlabel ',' toVlabel ',' OptTableElementList ')'
+		  		{
+		  			CreateElabelStmt *n = makeNode(CreateElabelStmt);
+		  			n->relation = $3;
+		  			n->tableElts = $9;
+		  			n->constraints = NULL;
+		  			n->fromVlabel = $5;
+		  			n->toVlabel = $7;
+					$$ = (Node *)n;
+				}
+		| CREATE EDGE qualified_name '(' fromVlabel ',' toVlabel ')'
+		  		{
+		  			CreateElabelStmt *n = makeNode(CreateElabelStmt);
+		  			n->relation = $3;
+		  			n->tableElts = NULL;
+		  			n->constraints = NULL;
+		  			n->fromVlabel = $5;
+		  			n->toVlabel = $7;
+					$$ = (Node *)n;
+				}
+		;
+		
+CreateGraphStmt: CREATE GRAPH qualified_name '(' VERTEX '(' LabelList ')' ',' EDGE '(' LabelList ')' ')' FORMAT Sconst format_opt OptWith
+				{
+					CreateGraphStmt *n = makeNode(CreateGraphStmt);
+					n->graph = $3;
+					n->vlabels = $7;
+					n->elabels = $12;
+					n->options = $18;
+					n->format = $16;
+					$$ = (Node *)n;
+				}
+				| CREATE GRAPH qualified_name '(' VERTEX '(' LabelList ')' ',' EDGE '(' LabelList ')' ')' OptWith
+				{
+					CreateGraphStmt *n = makeNode(CreateGraphStmt);
+					n->graph = $3;
+					n->vlabels = $7;
+					n->elabels = $12;
+					n->format = pstrdup("magmaap");
+					n->options = $15;
+					$$ = (Node *)n;
+				}
+				| CREATE GRAPH qualified_name '(' VERTEX '(' LabelList ')' ')' OptWith
+				{
+					CreateGraphStmt *n = makeNode(CreateGraphStmt);
+					n->graph = $3;
+					n->vlabels = $7;
+					n->elabels = NULL;
+					n->format = pstrdup("magmaap");
+					n->options = $10;
+					$$ = (Node *)n;
+				}
+				| CREATE GRAPH qualified_name '(' VERTEX '(' LabelList ')' ')' FORMAT Sconst format_opt OptWith
+				{
+					CreateGraphStmt *n = makeNode(CreateGraphStmt);
+					n->graph = $3;
+					n->vlabels = $7;
+					n->elabels = NULL;
+					n->format = $11;
+					n->options = $13;
+					$$ = (Node *)n;
+				}
 		;
 
+LabelList:
+			ColId
+				{
+					$$ = list_make1(makeString($1));
+				}
+			| LabelList ',' ColId
+				{
+					$$ = lappend($1, makeString($3));
+				}
+		;
+		
 /*
  * Redundancy here is needed to avoid shift/reduce conflicts,
  * since TEMP is not a reserved word.  See also OptTempTableName.
@@ -3617,6 +3699,20 @@ columnDef:	ColId Typename ColQualList opt_storage_encoding
 					$$ = (Node *)n;
 				}
 		;
+		
+fromVlabel:	FROM ColId
+				{
+					Value *n = makeString($2);					
+					$$ = (Node *)n;
+				}
+		;
+		
+toVlabel:	TO ColId
+				{
+					Value *n = makeString($2);					
+					$$ = (Node *)n;
+				}
+		;
 
 ColQualList:
 			ColQualList ColConstraint				{ $$ = lappend($1, $2); }
@@ -3899,6 +3995,18 @@ ConstraintElem:
 					n->indexspace = $8;
 					$$ = (Node *)n;
 				}
+			| DISCRIMINATOR '(' columnList ')' opt_definition OptConsTableSpace
+				{
+					Constraint *n = makeNode(Constraint);
+					n->contype = CONSTR_PRIMARY;
+					n->name = NULL;
+					n->raw_expr = NULL;
+					n->cooked_expr = NULL;
+					n->keys = $3;
+					n->options = $5;
+					n->indexspace = $6;
+					$$ = (Node *)n;
+				}
 			| FOREIGN KEY '(' columnList ')' REFERENCES qualified_name
 				opt_column_list key_match key_actions ConstraintAttributeSpec
 				{
@@ -4937,7 +5045,7 @@ format_opt:
 format_opt_list:
 			format_opt_item2
 			{
-				$$ = list_make1($1)
+				$$ = list_make1($1);
 			}
 			| format_opt_item2 '=' format_opt_item2
 			{
@@ -6327,7 +6435,9 @@ DropStmt:	DROP drop_type IF_P EXISTS any_name_list opt_drop_behavior
 					DropStmt *n = makeNode(DropStmt);
 					n->removeType = $2;
 					n->missing_ok = TRUE;
-					n->objects = $5;
+					List *list = $5;
+					ListCell *cell;
+					n->objects = list;
 					n->behavior = $6;
 					$$ = (Node *)n;
 				}
@@ -6336,7 +6446,9 @@ DropStmt:	DROP drop_type IF_P EXISTS any_name_list opt_drop_behavior
 					DropStmt *n = makeNode(DropStmt);
 					n->removeType = $2;
 					n->missing_ok = FALSE;
-					n->objects = $3;
+					List *list = $3;
+					ListCell *cell;
+					n->objects = list;
 					n->behavior = $4;
 					$$ = (Node *)n;
 				}
@@ -6358,6 +6470,9 @@ drop_type:	TABLE									{ $$ = OBJECT_TABLE; }
 			| TABLESPACE							{ $$ = OBJECT_TABLESPACE; }
 			| FOREIGN TABLE							{ $$ = OBJECT_FOREIGNTABLE; }
 			| PROTOCOL								{ $$ = OBJECT_EXTPROTOCOL; }
+			| VERTEX									{ $$ = OBJECT_VLABEL; }
+			| EDGE									{ $$ = OBJECT_ELABEL; }
+			| GRAPH									{ $$ = OBJECT_GRAPH; }
 		;
 
 any_name_list:
@@ -6977,20 +7092,40 @@ opt_granted_by: GRANTED BY RoleId						{ $$ = $3; }
  *****************************************************************************/
 
 IndexStmt:	CREATE index_opt_unique INDEX index_name
-			ON qualified_name access_method_clause '(' index_params ')'
+			ON index_opt_reverse qualified_name access_method_clause '(' index_params ')'
+			opt_include opt_definition OptTableSpace where_clause
+				{
+					IndexStmt *n = makeNode(IndexStmt);
+					n->unique = $2;
+					n->concurrent = false;
+					n->idxname = $4;
+					n->reverse = $6;
+					n->relation = $7;
+					n->accessMethod = $8;
+					n->indexParams = $10;
+					n->indexIncludingParams = $12;
+					n->options = $13;
+					n->tableSpace = $14;
+					n->whereClause = $15;
+					n->idxOids = NULL;
+					$$ = (Node *)n;
+				}
+			| CREATE index_opt_unique INDEX index_name
+			ON index_opt_reverse qualified_name access_method_clause
 			opt_include opt_definition OptTableSpace where_clause
 				{
 					IndexStmt *n = makeNode(IndexStmt);
 					n->unique = $2;
 					n->concurrent = false;
 					n->idxname = $4;
-					n->relation = $6;
-					n->accessMethod = $7;
-					n->indexParams = $9;
-					n->indexIncludingParams = $11;
-					n->options = $12;
-					n->tableSpace = $13;
-					n->whereClause = $14;
+					n->reverse = $6;
+					n->relation = $7;
+					n->accessMethod = $8;
+					n->indexParams = NULL;
+					n->indexIncludingParams = $9;
+					n->options = $10;
+					n->tableSpace = $11;
+					n->whereClause = $12;
 					n->idxOids = NULL;
 					$$ = (Node *)n;
 				}
@@ -7023,6 +7158,10 @@ IndexStmt:	CREATE index_opt_unique INDEX index_name
 					$$ = (Node *)n;
 				}
 		;
+		
+index_opt_reverse:
+			REVERSED									{ $$ = TRUE; }
+			| /*EMPTY*/								{ $$ = FALSE; }
 
 index_opt_unique:
 			UNIQUE									{ $$ = TRUE; }
@@ -13513,6 +13652,7 @@ reserved_keyword:
 			| DEFAULT
 			| DEFERRABLE
 			| DESC
+			| DISCRIMINATOR
 			| DISTINCT
 			| DISTRIBUTED /* gp */
 			| DO
@@ -13520,6 +13660,7 @@ reserved_keyword:
 			| END_P
 			| EXCEPT
 			| EXCLUDE 
+			| EDGE
 			| FALSE_P
 			| FETCH
 			| FILTER
@@ -13528,6 +13669,7 @@ reserved_keyword:
 			| FOREIGN
 			| FROM
 			| GRANT
+			| GRAPH
 			| GROUP_P
 			| HAVING
 			| IN_P
@@ -13555,6 +13697,7 @@ reserved_keyword:
 			| RANGE
 			| REFERENCES
 			| RETURNING
+			| REVERSED
 			| ROWS
 			| SCATTER  /* gp */
 			| SELECT
@@ -13572,6 +13715,7 @@ reserved_keyword:
 			| USER
 			| USING
 			| VARIADIC
+			| VERTEX
 			| WINDOW
 			| WITH
 			| WHEN
@@ -14207,6 +14351,78 @@ makeIsNotDistinctFromNode(Node *expr, int position)
 	return n;
 }
 
+char *fromVlabelColName(char *vname)
+{
+	char extraname[] = "from_";
+	int len1 = strlen(vname);
+	int len2 = strlen(extraname);
+	char * newname = palloc0(len1+len2+1);
+	memcpy(newname,extraname,len2);
+	memcpy(newname+len2,vname,len1);
+	newname[len1 + len2] = '\0';
+	return newname;
+}
+
+char *toVlabelColName(char *vname)
+{
+	char extraname[] = "to_";
+	int len1 = strlen(vname);
+	int len2 = strlen(extraname);
+	char * newname = palloc0(len1+len2+1);
+	memcpy(newname,extraname,len2);
+	memcpy(newname+len2,vname,len1);
+	newname[len1 + len2] = '\0';
+	return newname;
+}
+
+char *vlabelName(char *vname)
+{
+	char extraname[] = "graph_vlabel_";
+	int len1 = strlen(vname);
+	int len2 = strlen(extraname);
+	char * newname = palloc0(len1+len2+1);
+	memcpy(newname,extraname,len2);
+	memcpy(newname+len2,vname,len1);
+	newname[len1 + len2] = '\0';
+	return newname;
+}
+
+char *elabelName(char *ename)
+{
+	char extraname[] = "graph_elabel_";
+	int len1 = strlen(ename);
+	int len2 = strlen(extraname);
+	char * newname = palloc0(len1+len2+1);
+	memcpy(newname,extraname,len2);
+	memcpy(newname+len2,ename,len1);
+	newname[len1 + len2] = '\0';
+	return newname;
+}
+
+char *graphVertexName(char *gname)
+{
+  char extraname[] = "graph_vertex_";
+  int len1 = strlen(gname);
+  int len2 = strlen(extraname);
+  char * newname = palloc0(len1+len2+1);
+  memcpy(newname,extraname,len2);
+  memcpy(newname+len2,gname,len1);
+  newname[len1 + len2] = '\0';
+  return newname;
+}
+
+char *graphEdgeName(char *gname)
+{
+  char extraname[] = "graph_edge_";
+  int len1 = strlen(gname);
+  int len2 = strlen(extraname);
+  char * newname = palloc0(len1+len2+1);
+  memcpy(newname,extraname,len2);
+  memcpy(newname+len2,gname,len1);
+  newname[len1 + len2] = '\0';
+  return newname;
+}
+
 /*
  * Must undefine base_yylex before including scan.c, since we want it
  * to create the function base_yylex not filtered_base_yylex.
diff --git a/src/backend/parser/parse_clause.c b/src/backend/parser/parse_clause.c
index dc08457..da7a2e1 100644
--- a/src/backend/parser/parse_clause.c
+++ b/src/backend/parser/parse_clause.c
@@ -49,6 +49,13 @@
 #include "catalog/pg_proc.h"
 #include "catalog/pg_type.h"
 #include "catalog/pg_window.h"
+#include "catalog/skylon_vlabel.h"
+#include "catalog/skylon_vlabel_attribute.h"
+#include "catalog/skylon_elabel.h"
+#include "catalog/skylon_elabel_attribute.h"
+#include "catalog/skylon_graph.h"
+#include "catalog/skylon_graph_vlabel.h"
+#include "catalog/skylon_graph_elabel.h"
 #include "commands/defrem.h"
 #include "nodes/makefuncs.h"
 #include "nodes/print.h" /* XXX: remove after debugging !! */
@@ -86,6 +93,10 @@ static const char *clauseText[] = {
     "DISTINCT ON"
 };
 
+extern char *graphVertexTableName(char *gname,char *vname);
+
+extern char *graphEdgeTableName(char *gname,char *ename);
+
 static void extractRemainingColumns(List *common_colnames,
 						List *src_colnames, List *src_colvars,
 						List **res_colnames, List **res_colvars);
@@ -123,6 +134,8 @@ static List *transformRowExprToGroupClauses(ParseState *pstate, RowExpr *rowexpr
 											List *groupsets, List *targetList);
 static void freeGroupList(List *grouplist);
 
+extern char* findGraphSchema(char *graph);
+
 typedef struct grouping_rewrite_ctx
 {
 	List *grp_tles;
@@ -1410,7 +1423,170 @@ transformRangeFunction(ParseState *pstate, RangeFunction *r)
 	return rte;
 }
 
+List *transformAsGraphName(ParseState *pstate, RangeVar *rangeVar) {
+  if(!rangeVar) return NULL;
+  char *catalog = rangeVar->catalogname;
+  char *schema = rangeVar->schemaname;
+  char *graph = rangeVar->relname;
+
+  if(schema) {
+    Relation skylon_graph_rel = heap_open(GraphRelationId, RowExclusiveLock);
+    cqContext cqc;
+    Oid dboid = GetCatalogId(catalog);
+    Oid namespaceId = LookupNamespaceId(schema, dboid);
+    if (0 == caql_getcount(
+          caql_addrel(cqclr(&cqc), skylon_graph_rel),
+          cql("SELECT COUNT(*) FROM skylon_graph "
+            " WHERE graphname = :1 AND schemaname = :2",
+            CStringGetDatum(graph), CStringGetDatum(schema)))){
+      heap_close(skylon_graph_rel, RowExclusiveLock);
+      return NULL;
+    }
+    heap_close(skylon_graph_rel, RowExclusiveLock);
+  }
+  else {
+    schema = findGraphSchema(graph);
+    if(!schema)
+      return NULL;
+  }
+  List *list = NIL;
+  {
+    Relation  skylon_graph_vlabel_rel;
+    cqContext cqc;
+
+    skylon_graph_vlabel_rel = heap_open(GraphVlabelRelationId, RowExclusiveLock);
+    cqContext *pcqCtx = caql_beginscan(
+        caql_addrel(cqclr(&cqc), skylon_graph_vlabel_rel),
+        cql("SELECT * FROM skylon_graph_vlabel "
+                        " WHERE graphname = :1 AND schemaname = :2",
+                        CStringGetDatum(graph), CStringGetDatum(schema)));
+    HeapTuple vlabelTuple = NULL;
+    while (HeapTupleIsValid(vlabelTuple = caql_getnext(pcqCtx))) {
+      Form_skylon_graph_vlabel tuple = (Form_skylon_graph_vlabel) GETSTRUCT(vlabelTuple);
+      RangeVar   *vlabel = makeRangeVar(NULL, NULL, NULL, -1);
+      char *vname = pstrdup(NameStr(tuple->vlabelname));
+      vlabel->catalogname = catalog;
+      vlabel->schemaname = schema;
+      vlabel->relname = graphVertexTableName(graph, vname);
+      list = lappend(list, vlabel);
+    }
+    caql_endscan(pcqCtx);
+    heap_close(skylon_graph_vlabel_rel, RowExclusiveLock);
+  }
+  {
+    Relation  skylon_graph_elabel_rel;
+    cqContext cqc;
+
+    skylon_graph_elabel_rel = heap_open(GraphElabelRelationId, RowExclusiveLock);
+    cqContext *pcqCtx = caql_beginscan(
+        caql_addrel(cqclr(&cqc), skylon_graph_elabel_rel),
+        cql("SELECT * FROM skylon_graph_elabel "
+                        " WHERE graphname = :1 AND schemaname = :2",
+                        CStringGetDatum(graph), CStringGetDatum(schema)));
+    HeapTuple elabelTuple = NULL;
+    while (HeapTupleIsValid(elabelTuple = caql_getnext(pcqCtx))) {
+      Form_skylon_graph_elabel tuple = (Form_skylon_graph_elabel) GETSTRUCT(elabelTuple);
+      RangeVar   *elabel = makeRangeVar(NULL, NULL, NULL, -1);
+      char *ename = pstrdup(NameStr(tuple->elabelname));
+      elabel->catalogname = catalog;
+      elabel->schemaname = schema;
+      elabel->relname = graphEdgeTableName(graph, ename);
+      list = lappend(list, elabel);
+    }
+    caql_endscan(pcqCtx);
+    heap_close(skylon_graph_elabel_rel, RowExclusiveLock);
+  }
+  return list;
+}
 
+int parseAndTransformAsGraph(ParseState *pstate, RangeVar *rangeVar) {
+  if(!rangeVar) return false;
+  if (rangeVar->schemaname == NULL)
+    return false;
+  char *catalog = NULL;
+  char *schema = rangeVar->catalogname;
+  char *graph = rangeVar->schemaname;
+  char *ele = rangeVar->relname;
+
+  Oid dboid = GetCatalogId(catalog);
+  Oid namespaceId = LookupNamespaceId(rangeVar->schemaname, dboid);
+  Oid relId=OidIsValid(namespaceId)
+      ? get_relname_relid(rangeVar->relname, namespaceId)
+      : InvalidOid;
+
+  int res = 0;
+  if(!OidIsValid(relId)){
+    RangeVar *tmpRel = makeNode(RangeVar);
+    tmpRel->schemaname = schema;
+    schema = get_namespace_name(RangeVarGetCreationNamespace(tmpRel));
+    cqContext cqc;
+    Relation VlabelRelation = heap_open(GraphVlabelRelationId, RowExclusiveLock);
+    HeapTuple vlabelTuple= caql_getfirst(caql_addrel(cqclr(&cqc), VlabelRelation),
+                                         cql("SELECT * FROM skylon_graph_vlabel"
+        " WHERE vlabelname = :1 "
+        "AND graphname = :2 AND schemaname = :3",
+        CStringGetDatum(ele),
+        CStringGetDatum(graph), CStringGetDatum(schema)));
+    if(HeapTupleIsValid(vlabelTuple)) {
+      rangeVar->catalogname = NULL;
+      rangeVar->schemaname = schema;
+      rangeVar->relname = graphVertexTableName(graph, ele);
+      res = 1;
+    }
+    else {
+      Relation ElabelRelation = heap_open(GraphElabelRelationId, RowExclusiveLock);
+      HeapTuple elabelTuple= caql_getfirst(caql_addrel(cqclr(&cqc), ElabelRelation),
+                                           cql("SELECT * FROM skylon_graph_elabel"
+          " WHERE elabelname = :1 "
+          "AND graphname = :2 AND schemaname = :3",
+          CStringGetDatum(ele),
+          CStringGetDatum(graph), CStringGetDatum(schema)));
+      if(!HeapTupleIsValid(elabelTuple)) {
+        heap_close(VlabelRelation, RowExclusiveLock);
+        heap_close(ElabelRelation, RowExclusiveLock);
+        return 0;
+      }
+      rangeVar->catalogname = NULL;
+      rangeVar->schemaname = schema;
+      rangeVar->relname = graphEdgeTableName(graph, ele);
+      res = 2;
+      heap_close(ElabelRelation, RowExclusiveLock);
+    }
+    heap_close(VlabelRelation, RowExclusiveLock);
+    Oid graphId = caql_getoid_only(
+        NULL,
+        NULL,
+        cql("SELECT oid FROM pg_class "
+          " WHERE relname = :1 and relnamespace = :2",
+          CStringGetDatum(graph),
+          ObjectIdGetDatum(LookupNamespaceId(schema, dboid))));
+    if(pstate) {
+      if(pstate->graphEntry) {
+        ListCell *cell;
+        bool hit = false;
+        foreach(cell, pstate->graphEntry)
+        {
+          GraphEntry *n = (GraphEntry*)lfirst(cell);
+          if(n->relid == graphId) {
+            hit = true;
+            break;
+          }
+        }
+        if(!hit) {
+          GraphEntry *node = makeNode(GraphEntry);
+          node->relid = graphId;
+          pstate->graphEntry = lappend(pstate->graphEntry, node);
+        }
+      }
+      else {
+        GraphEntry *node = makeNode(GraphEntry);
+        node->relid = graphId;
+        pstate->graphEntry = lappend(pstate->graphEntry, node);
+      }
+    }
+  }
+return res;
+}
 /*
  * transformFromClauseItem -
  *	  Transform a FROM-clause item, adding any required entries to the
@@ -1459,6 +1635,10 @@ transformFromClauseItem(ParseState *pstate, Node *n,
 		RangeTblEntry *rte = NULL;
 		int	rtindex;
 		RangeVar *rangeVar = (RangeVar *)n;
+		char *graph = rangeVar->schemaname ? pstrdup(rangeVar->schemaname) : NULL;
+
+//try to parse as graph
+		int isGraph = parseAndTransformAsGraph(pstate, rangeVar);
 
 		/*
 		 * If it is an unqualified name, it might be a CTE reference.
@@ -1480,7 +1660,8 @@ transformFromClauseItem(ParseState *pstate, Node *n,
 		{
 			rte = transformTableEntry(pstate, rangeVar);
 		}
-		
+		if(isGraph)
+		  rte->graphName = graph;
 		/* assume new rte is at end */
 		rtindex = list_length(pstate->p_rtable);
 		Assert(rte == rt_fetch(rtindex, pstate->p_rtable));
diff --git a/src/backend/parser/parse_expr.c b/src/backend/parser/parse_expr.c
index 923e75b..0f2958c 100644
--- a/src/backend/parser/parse_expr.c
+++ b/src/backend/parser/parse_expr.c
@@ -15,8 +15,16 @@
 
 #include "postgres.h"
 
+#include "catalog/catquery.h"
 #include "catalog/namespace.h"
 #include "catalog/pg_type.h"
+#include "catalog/skylon_vlabel.h"
+#include "catalog/skylon_vlabel_attribute.h"
+#include "catalog/skylon_elabel.h"
+#include "catalog/skylon_elabel_attribute.h"
+#include "catalog/skylon_graph.h"
+#include "catalog/skylon_graph_vlabel.h"
+#include "catalog/skylon_graph_elabel.h"
 #include "commands/dbcommands.h"
 #include "mb/pg_wchar.h"
 #include "miscadmin.h"
@@ -41,6 +49,10 @@
 
 bool		Transform_null_equals = false;
 
+extern char *graphVertexTableName(char *gname,char *vname);
+
+extern char *graphEdgeTableName(char *gname,char *ename);
+
 static Node *transformParamRef(ParseState *pstate, ParamRef *pref);
 static Node *transformAExprOp(ParseState *pstate, A_Expr *a);
 static Node *transformAExprAnd(ParseState *pstate, A_Expr *a);
@@ -173,6 +185,7 @@ transformExpr(ParseState *pstate, Node *expr)
 			{
 				A_Expr	   *a = (A_Expr *) expr;
 
+				a->lexpr;
 				switch (a->kind)
 				{
 					case AEXPR_OP:
@@ -643,6 +656,7 @@ transformIndirection(ParseState *pstate, Node *basenode, List *indirection)
 
 	return result;
 }
+extern bool parseAndTransformAsGraph(ParseState *pstate, RangeVar *rangeVar);
 
 static Node *
 transformColumnRef(ParseState *pstate, ColumnRef *cref)
@@ -768,12 +782,29 @@ transformColumnRef(ParseState *pstate, ColumnRef *cref)
 				char	   *name1 = strVal(linitial(cref->fields));
 				char	   *name2 = strVal(lsecond(cref->fields));
 				char	   *name3 = strVal(lthird(cref->fields));
+				char     *graphName = name1 ? pstrdup(name1) : NULL;
+				RangeVar *rangeVar = makeNode(RangeVar);
+				rangeVar->catalogname = NULL;
+				rangeVar->schemaname = name1;
+				rangeVar->relname = name2;
+				int isGraph = parseAndTransformAsGraph(pstate, rangeVar);
+				if(isGraph) {
+				  name1 = rangeVar->schemaname;
+				  name2 = rangeVar->relname;
+				}
 
 				/* Whole-row reference? */
 				if (strcmp(name3, "*") == 0)
 				{
 					node = transformWholeRowRef(pstate, NULL /*catalogname*/, name1, name2,
 												cref->location);
+					if (node != NULL) {
+						RangeTblEntry *rte =  GetRTEByRangeTablePosn(pstate,
+								((Var*)node)->varno,
+								((Var*)node)->varlevelsup);
+						if(isGraph)
+						  rte->graphName = graphName;
+					}
 					break;
 				}
 
@@ -791,6 +822,13 @@ transformColumnRef(ParseState *pstate, ColumnRef *cref)
 											 NIL, false, false, false, true, NULL,
 											 cref->location, NULL);
 				}
+				if (node != NULL) {
+					RangeTblEntry *rte =  GetRTEByRangeTablePosn(pstate,
+									((Var*)node)->varno,
+									((Var*)node)->varlevelsup);
+					if(isGraph)
+					  rte->graphName = graphName;
+				}
 				break;
 			}
 		case 4:
@@ -799,7 +837,16 @@ transformColumnRef(ParseState *pstate, ColumnRef *cref)
 				char	   *name2 = strVal(lsecond(cref->fields));
 				char	   *name3 = strVal(lthird(cref->fields));
 				char	   *name4 = strVal(lfourth(cref->fields));
-
+        RangeVar *rangeVar = makeNode(RangeVar);
+        rangeVar->catalogname = name1;
+        rangeVar->schemaname = name2;
+        rangeVar->relname = name3;
+        bool isGraph = parseAndTransformAsGraph(pstate, rangeVar);
+        if(isGraph) {
+          name1 = rangeVar->catalogname;
+          name2 = rangeVar->schemaname;
+          name3 = rangeVar->relname;
+        }
 				/* Whole-row reference? */
 				if (strcmp(name4, "*") == 0)
 				{
diff --git a/src/backend/parser/parse_relation.c b/src/backend/parser/parse_relation.c
index 06c369b..e7227f5 100644
--- a/src/backend/parser/parse_relation.c
+++ b/src/backend/parser/parse_relation.c
@@ -2852,12 +2852,29 @@ ExecCheckRTEPerms(RangeTblEntry *rte)
 	 */
 	if (pg_class_aclmask(relOid, userid, requiredPerms, ACLMASK_ALL)
 			!= requiredPerms) {
+		bool isgraph = false;
 		/*
 		 * If the table is a partition, return an error message that includes
 		 * the name of the parent table.
 		 */
 		const char *rel_name = get_rel_name_partition(relOid);
-		aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_CLASS, rel_name);
+		if (0 != caql_getcount(NULL,
+							cql("SELECT COUNT(*) FROM skylon_graph "
+							" WHERE graphname = :1 ",
+							CStringGetDatum(rel_name)))) {
+			isgraph = true;
+		}
+		else if (NULL != rte->graphName) {
+			if (0 != caql_getcount(NULL,
+								cql("SELECT COUNT(*) FROM skylon_graph "
+								" WHERE graphname = :1 ",
+								CStringGetDatum(rte->graphName)))) {
+				isgraph = true;
+				rel_name = rte->graphName;
+			}
+		}
+		aclcheck_error(ACLCHECK_NO_PRIV,
+									 isgraph ? ACL_KIND_CLASS_GRAPH : ACL_KIND_CLASS, rel_name);
 	}
 }
 
diff --git a/src/backend/tcop/postgres.c b/src/backend/tcop/postgres.c
index 03858a1..7b9f907 100644
--- a/src/backend/tcop/postgres.c
+++ b/src/backend/tcop/postgres.c
@@ -134,7 +134,6 @@
 #include "resourcemanager/resourceenforcer/resourceenforcer.h"
 
 #include "storage/cwrapper/hdfs-file-system-c.h"
-#include "scheduler/cwrapper/scheduler-c.h"
 #include "magma/cwrapper/magma-client-c.h"
 
 #include "pg_stat_activity_history_process.h"
@@ -3479,9 +3478,6 @@ die(SIGNAL_ARGS)
 		MagmaClientC_CancelMagmaClient();
 		MagmaFormatC_CancelMagmaClient();
 
-		if (MyScheduler != NULL)
-		  SchedulerCancelQuery(MyScheduler);
-
 		if (MyNewExecutor != NULL)
 			MyExecutorSetCancelQuery(MyNewExecutor);
 
@@ -3558,9 +3554,6 @@ StatementCancelHandler(SIGNAL_ARGS)
 		MagmaClientC_CancelMagmaClient();
 		MagmaFormatC_CancelMagmaClient();
 
-		if (MyScheduler != NULL)
-		  SchedulerCancelQuery(MyScheduler);
-
 		if (MyNewExecutor != NULL)
 			MyExecutorSetCancelQuery(MyNewExecutor);
 
@@ -5225,6 +5218,7 @@ PostgresMain(int argc, char *argv[], const char *username)
 					else {
 						if (serializedCommonPlanLen > 0) {
 							exec_mpp_query_new(
+							    NULL,
 									serializedCommonPlan,
 									serializedCommonPlanLen,
 									localSlice, true, NULL, NULL);
@@ -5447,6 +5441,7 @@ PostgresMain(int argc, char *argv[], const char *username)
 
                     if (serializedCommonPlanLen > 0) {
                       exec_mpp_query_new(
+                          NULL,
                           serializedCommonPlan,
                           serializedCommonPlanLen,
                           currentSliceId, true, NULL, NULL);
@@ -5655,19 +5650,6 @@ PostgresMain(int argc, char *argv[], const char *username)
 					close_target = pq_getmsgstring(&input_message);
 					pq_getmsgend(&input_message);
 
-					// stop scheduler
-					if (MyScheduler) {
-					  SchedulerEnd(MyScheduler);
-					  SchedulerCatchedError *err = SchedulerGetLastError(MyScheduler);
-					  if (err->errCode != ERRCODE_SUCCESSFUL_COMPLETION) {
-					    int errCode = err->errCode;
-					    SchedulerCleanUp(MyScheduler);
-					    ereport(WARNING, (errcode(errCode),
-					        errmsg("failed to stop scheduler. %s (%d)",
-					               err->errMessage, errCode)));
-					  }
-					}
-
 					switch (close_type)
 					{
 						case 'S':
diff --git a/src/backend/tcop/utility.c b/src/backend/tcop/utility.c
index 81d4635..586634b 100644
--- a/src/backend/tcop/utility.c
+++ b/src/backend/tcop/utility.c
@@ -14,6 +14,8 @@
  *
  *-------------------------------------------------------------------------
  */
+
+#include <string.h>
 #include "postgres.h"
 #include "port.h"
 
@@ -678,6 +680,21 @@ ProcessDropStatement(DropStmt *stmt)
 				ereport(ERROR,
 						(errcode(ERRCODE_CDB_FEATURE_NOT_YET), errmsg("Cannot support drop foreign table statement yet") ));
 
+			case OBJECT_VLABEL:
+			  rel = makeRangeVarFromNameList(names);
+			  RemoveVlabel(rel, stmt->missing_ok);
+			  break;
+
+			case OBJECT_ELABEL:
+        rel = makeRangeVarFromNameList(names);
+        RemoveElabel(rel, stmt->missing_ok);
+        break;
+
+      case OBJECT_GRAPH:
+        rel = makeRangeVarFromNameList(names);
+        RemoveGraph(rel, stmt->missing_ok, true);
+        break;
+
 			case OBJECT_TABLE:
 			case OBJECT_EXTTABLE:
 
@@ -1059,6 +1076,18 @@ ProcessUtility(Node *parsetree,
 			DefineExternalRelation((CreateExternalStmt *) parsetree);
 			break;
 
+		case T_CreateVlabelStmt:
+      DefineVlabel((CreateVlabelStmt *) parsetree);
+      break;
+
+		case T_CreateElabelStmt:
+      DefineElabel((CreateElabelStmt *) parsetree);
+      break;
+
+    case T_CreateGraphStmt:
+      DefineGraph((CreateGraphStmt *) parsetree);
+      break;
+
 		case T_CreateForeignStmt:
 			ereport(ERROR,
 							(errcode(ERRCODE_CDB_FEATURE_NOT_YET), errmsg("Cannot support create foreign statement yet") ));
@@ -1831,6 +1860,9 @@ ProcessUtility(Node *parsetree,
 					case OBJECT_INDEX:
 						ReindexIndex(stmt);
 						break;
+		      case OBJECT_VLABEL:
+		      case OBJECT_ELABEL:
+		      case OBJECT_GRAPH:
 					case OBJECT_TABLE:
 						ReindexTable(stmt);
 						break;
@@ -2157,6 +2189,18 @@ CreateCommandTag(Node *parsetree)
 			}
 			break;
 
+    case T_CreateVlabelStmt:
+      tag = "CREATE VERTEX";
+      break;
+
+    case T_CreateElabelStmt:
+      tag = "CREATE EDGE";
+      break;
+
+    case T_CreateGraphStmt:
+      tag = "CREATE GRAPH";
+      break;
+
 		case T_CreateForeignStmt:
 			tag = "CREATE FOREIGN TABLE";
 			break;
@@ -2208,6 +2252,15 @@ CreateCommandTag(Node *parsetree)
 		case T_DropStmt:
 			switch (((DropStmt *) parsetree)->removeType)
 			{
+			  case OBJECT_VLABEL:
+          tag = "DROP VERTEX";
+          break;
+			  case OBJECT_ELABEL:
+          tag = "DROP EDGE";
+          break;
+        case OBJECT_GRAPH:
+          tag = "DROP GRAPH";
+          break;
 				case OBJECT_TABLE:
 					tag = "DROP TABLE";
 					break;
@@ -2802,6 +2855,9 @@ GetCommandLogLevel(Node *parsetree)
 			lev = LOGSTMT_DDL;
 			break;
 
+		case T_CreateVlabelStmt:
+		case T_CreateElabelStmt:
+		case T_CreateGraphStmt:
 		case T_CreateExternalStmt:
 		case T_CreateForeignStmt:
 			lev = LOGSTMT_DDL;
diff --git a/src/backend/utils/cache/lsyscache.c b/src/backend/utils/cache/lsyscache.c
index 9cd07ad..9fbccad 100644
--- a/src/backend/utils/cache/lsyscache.c
+++ b/src/backend/utils/cache/lsyscache.c
@@ -1675,18 +1675,14 @@ get_relname_relid(const char *relname, Oid relnamespace)
 	Oid			result;
 	int			fetchCount;
 
-	result = caql_getoid_plus(
-			NULL,
-			&fetchCount,
-			NULL,
-			cql("SELECT oid FROM pg_class "
-				" WHERE relname = :1 "
-				" AND relnamespace = :2 ",
-				PointerGetDatum((char *) relname),
-				ObjectIdGetDatum(relnamespace)));
-
-	if (!fetchCount)
-		return InvalidOid;
+	result = caql_getoid_only(
+      NULL,
+      NULL,
+      cql("SELECT oid FROM pg_class "
+        " WHERE relname = :1 "
+        " AND relnamespace = :2 ",
+        CStringGetDatum(relname),
+        ObjectIdGetDatum(relnamespace)));
 
 	return result;
 }
@@ -3654,6 +3650,38 @@ get_relation_keys(Oid relid)
 }
 
 /*
+ * Is there a index with the given relid
+ */
+bool rel_has_index(Oid relid)
+{
+	if (!rel_is_partitioned(relid))
+		return (caql_getcount(
+				NULL, cql("SELECT COUNT(*) FROM pg_index "
+						" WHERE indrelid = :1 ",
+						ObjectIdGetDatum(relid))) > 0);
+	else
+	{
+		List *children = find_all_inheritors(relid);
+		ListCell *child;
+		foreach (child, children)
+		{
+			Oid myrelid = lfirst_oid(child);
+			bool hasidx = (caql_getcount(
+					NULL, cql("SELECT COUNT(*) FROM pg_index "
+							" WHERE indrelid = :1 ",
+							ObjectIdGetDatum(myrelid))) > 0);
+			if (hasidx)
+			{
+				list_free(children);
+				return true;
+			}
+		}
+		list_free(children);
+		return false;
+	}
+}
+
+/*
  * check_constraint_exists
  *	  Is there a check constraint with the given oid
  */
diff --git a/src/backend/utils/init/globals.c b/src/backend/utils/init/globals.c
index 986eb68..9a44514 100644
--- a/src/backend/utils/init/globals.c
+++ b/src/backend/utils/init/globals.c
@@ -26,7 +26,6 @@
 #include "executor/cwrapper/executor-c.h"
 #include "executor/cwrapper/cached-result.h"
 #include "magma/cwrapper/magma-client-c.h"
-#include "scheduler/cwrapper/scheduler-c.h"
 
 ProtocolVersion FrontendProtocol = PG_PROTOCOL_LATEST;
 
@@ -167,5 +166,4 @@ const char *sql_text;
 bool is_qtype_sql = false;
 
 ExecutorC	*MyNewExecutor;
-SchedulerC *MyScheduler = NULL;
 CachedResultC *MyCachedResult = NULL;
diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c
index 2cf0c8a..a56a59c 100644
--- a/src/backend/utils/misc/guc.c
+++ b/src/backend/utils/misc/guc.c
@@ -245,7 +245,6 @@ static const char *assign_timezone_abbreviations(const char *newval, bool doit,
 static const char *assign_new_interconnect_type(const char *newval, bool doit,GucSource source);
 static const char *assign_new_executor_mode(const char *newval, bool doit, GucSource source);
 static const char *assign_new_executor_runtime_filter_mode(const char *newval, bool doit, GucSource source);
-static const char *assign_new_scheduler_mode(const char *newval, bool doit, GucSource source);
 static const char *assign_switch_mode(const char *newval, bool doit, GucSource source);
 
 static bool assign_tcp_keepalives_idle(int newval, bool doit, GucSource source);
@@ -675,7 +674,7 @@ bool		enable_magma_seqscan = true;
 bool		enable_magma_bitmapscan = false;
 bool		enable_magma_indexonlyscan = false;
 bool		enable_orc_indexscan = false;
-bool		enable_orc_indexonlyscan = false;
+bool		enable_orc_indexonlyscan = true;
 bool		force_bitmap_table_scan = false;
 bool		enable_tidscan = true;
 bool		enable_sort = true;
@@ -1159,7 +1158,7 @@ static struct config_bool ConfigureNamesBool[] =
 			NULL
 		},
 		&enable_orc_indexonlyscan,
-		false, NULL, NULL
+		true, NULL, NULL
 	},
 	{
 		{"enable_orc_indexscan", PGC_USERSET, QUERY_TUNING_METHOD,
@@ -3741,7 +3740,7 @@ static struct config_bool ConfigureNamesBool[] =
 			GUC_NO_SHOW_ALL | GUC_NOT_IN_SAMPLE
 		},
 		&optimizer_enable_indexjoin,
-		true, NULL, NULL
+		false, NULL, NULL
 	},
 	{
 		{"optimizer_enable_motions_masteronly_queries", PGC_USERSET, DEVELOPER_OPTIONS,
@@ -7540,16 +7539,6 @@ static struct config_string ConfigureNamesString[] =
 	},
 
 	{
-		{"new_scheduler", PGC_USERSET, EXTERNAL_TABLES,
-			gettext_noop("Enable the new scheduler."),
-			gettext_noop("Valid values are \"OFF\" and \"ON\"."),
-			GUC_NOT_IN_SAMPLE | GUC_NO_SHOW_ALL
-		},
-		&new_scheduler_mode,
-		"OFF", assign_new_scheduler_mode, NULL
-	},
-
-	{
 		{"dispatch_udf", PGC_USERSET, EXTERNAL_TABLES,
 			gettext_noop("specified UDF to be dispatched."),
 			gettext_noop("Valid separator is \",\""),
@@ -14051,14 +14040,6 @@ static const char *assign_new_executor_runtime_filter_mode(const char *newval, b
 		return newval;				/* OK */
 }
 
-static const char *assign_new_scheduler_mode(const char *newval, bool doit, GucSource source) {
-		if (pg_strcasecmp(newval, new_scheduler_mode_on) != 0
-				&& pg_strcasecmp(newval, new_scheduler_mode_off) != 0) {
-			return NULL;			/* fail */
-		}
-		return newval;				/* OK */
-}
-
 static const char *assign_switch_mode(const char *newval, bool doit,
                                             GucSource source) {
   if (pg_strcasecmp(newval, "ON") != 0 && pg_strcasecmp(newval, "OFF") != 0) {
diff --git a/src/backend/utils/mmgr/Makefile b/src/backend/utils/mmgr/Makefile
index b117da9..5e8df67 100644
--- a/src/backend/utils/mmgr/Makefile
+++ b/src/backend/utils/mmgr/Makefile
@@ -11,6 +11,8 @@
 subdir = src/backend/utils/mmgr
 top_builddir = ../../../..
 include $(top_builddir)/src/Makefile.global
+override CPPFLAGS := -I$(top_srcdir)/src/include/catalog $(CPPFLAGS)
+override CPPFLAGS := -I include $(CPPFLAGS)
 
 OBJS =  aset.o asetDirect.o mcxt.o memaccounting.o mpool.o portalmem.o memprot.o vmem_tracker.o redzone_handler.o runaway_cleaner.o idle_tracker.o event_version.o
 
diff --git a/src/bin/psql/tab-complete.c b/src/bin/psql/tab-complete.c
index af5f797..24cccce 100644
--- a/src/bin/psql/tab-complete.c
+++ b/src/bin/psql/tab-complete.c
@@ -322,6 +322,51 @@ static const SchemaQuery Query_for_list_of_tables = {
 	NULL
 };
 
+static const SchemaQuery Query_for_list_of_graphs = {
+  /* catname */
+  "pg_catalog.pg_class c",
+  /* selcondition */
+  "c.relname IN (SELECT graphname FROM skylon_graph)",  /* GPDB: x is obsolete, used only on very old GPDB systems */
+  /* viscondition */
+  "pg_catalog.pg_table_is_visible(c.oid)",
+  /* namespace */
+  "c.relnamespace",
+  /* result */
+  "pg_catalog.quote_ident(c.relname)",
+  /* qualresult */
+  NULL
+};
+
+static const SchemaQuery Query_for_list_of_vertices = {
+  /* catname */
+  "pg_catalog.pg_class c",
+  /* selcondition */
+  "c.relname IN (SELECT vlabelname FROM skylon_vlabel)",
+  /* viscondition */
+  "pg_catalog.pg_table_is_visible(c.oid)",
+  /* namespace */
+  "c.relnamespace",
+  /* result */
+  "pg_catalog.quote_ident(c.relname)",
+  /* qualresult */
+  NULL
+};
+
+static const SchemaQuery Query_for_list_of_edges = {
+  /* catname */
+  "pg_catalog.pg_class c",
+  /* selcondition */
+  "c.relname IN (SELECT elabelname FROM skylon_elabel)",
+  /* viscondition */
+  "pg_catalog.pg_table_is_visible(c.oid)",
+  /* namespace */
+  "c.relnamespace",
+  /* result */
+  "pg_catalog.quote_ident(c.relname)",
+  /* qualresult */
+  NULL
+};
+
 static const SchemaQuery Query_for_list_of_tisv = {
 	/* catname */
 	"pg_catalog.pg_class c",
@@ -557,8 +602,10 @@ static const pgsql_thing_t words_after_create[] = {
 	{"CONFIGURATION", Query_for_list_of_ts_configurations, NULL, true},
 	{"DATABASE", Query_for_list_of_databases},
 	{"DICTIONARY", Query_for_list_of_ts_dictionaries, NULL, true},
+	{"EDGE", NULL, &Query_for_list_of_edges},
 	{"FOREIGN DATA WRAPPER", NULL, NULL},
 	{"FUNCTION", NULL, &Query_for_list_of_functions},
+	{"GRAPH", NULL, &Query_for_list_of_graphs},
 	{"GROUP", Query_for_list_of_roles},
 	{"LANGUAGE", Query_for_list_of_languages},
 	{"INDEX", NULL, &Query_for_list_of_indexes},
@@ -578,6 +625,7 @@ static const pgsql_thing_t words_after_create[] = {
 	{"UNIQUE", NULL, NULL},		/* for CREATE UNIQUE INDEX ... */
 	{"USER", Query_for_list_of_roles},
 	{"USER MAPPING FOR", NULL, NULL},
+	{"VERTEX", NULL, &Query_for_list_of_vertices},
 	{"VIEW", NULL, &Query_for_list_of_views},
 	{NULL, NULL, NULL, false}	/* end of list */
 };
@@ -2531,7 +2579,7 @@ _complete_from_query(int is_schema_query, const char *text, int state)
 			if (strcmp(completion_squery->catname,
 					   "pg_catalog.pg_class c") == 0 &&
 				strncmp(text, "pg_", 3) != 0 &&
-				strncmp(text, "gp_", 3) != 0)
+				strncmp(text, "gp_", 3) != 0 && strncmp(text, "skylon_", 3) != 0)
 			{
 				appendPQExpBuffer(&query_buffer,
 								  " AND c.relnamespace <> (SELECT oid FROM"
@@ -2554,6 +2602,43 @@ _complete_from_query(int is_schema_query, const char *text, int state)
 							  " substring('%s',1,pg_catalog.length(pg_catalog.quote_ident(nspname))+1)) > 1",
 							  string_length, e_text);
 
+      /*
+       * Add in matching graph names, but only if there is more than
+       * one potential match among graph names.
+       */
+
+      appendPQExpBuffer(&query_buffer, "\nUNION\n"
+               "SELECT pg_catalog.quote_ident(n.graphname) || '.' "
+                "FROM pg_catalog.skylon_graph n "
+                "WHERE substring(pg_catalog.quote_ident(n.graphname) || '.',1,%d)='%s'",
+                string_length, e_text);
+      appendPQExpBuffer(&query_buffer,
+                " AND (SELECT pg_catalog.count(*)"
+                " FROM pg_catalog.skylon_graph"
+      " WHERE substring(pg_catalog.quote_ident(graphname) || '.',1,%d) ="
+                " substring('%s',1,pg_catalog.length(pg_catalog.quote_ident(graphname))+1)) > 1",
+                string_length, e_text);
+
+      /*
+       * Add in matching qualified names, but only if there is exactly
+       * one graph matching the input-so-far.
+       */
+      appendPQExpBuffer(&query_buffer, "\nUNION\n"
+           "SELECT pg_catalog.quote_ident(n.graphname) || '.' || v.vlabelname "
+                "FROM pg_catalog.skylon_graph_vlabel v, pg_catalog.skylon_graph n "
+                "WHERE v.graphname = n.graphname AND v.schemaname = n.schemaname ",
+                qualresult,
+                completion_squery->catname,
+                completion_squery->namespace);
+
+      appendPQExpBuffer(&query_buffer, "\nUNION\n"
+           "SELECT pg_catalog.quote_ident(n.graphname) || '.' || e.elabelname "
+                "FROM pg_catalog.skylon_graph_elabel e, pg_catalog.skylon_graph n "
+                "WHERE e.graphname = n.graphname AND e.schemaname = n.schemaname ",
+                qualresult,
+                completion_squery->catname,
+                completion_squery->namespace);
+
 			/*
 			 * Add in matching qualified names, but only if there is exactly
 			 * one schema matching the input-so-far.
diff --git a/src/include/access/aomd.h b/src/include/access/aomd.h
index 42d8174..8eb4e0e 100644
--- a/src/include/access/aomd.h
+++ b/src/include/access/aomd.h
@@ -44,6 +44,16 @@ FormatAOSegmentFileName(
 							char *filepathname);
 
 extern void
+FormatAOSegmentIndexFileName(
+              char *basepath,
+              int segno,
+              int idxId,
+              int col,
+              int numCols,
+              int32 *fileSegNo,
+              char *filepathname);
+
+extern void
 MakeAOSegmentFileName(
 							Relation rel, 
 							int segno, 
diff --git a/src/include/access/orcam.h b/src/include/access/orcam.h
index f181a9c..47d24c2 100644
--- a/src/include/access/orcam.h
+++ b/src/include/access/orcam.h
@@ -79,17 +79,20 @@ extern Oid orcInsertValues(OrcInsertDescData *insertDesc, Datum *values,
 extern void orcEndInsert(OrcInsertDescData *insertDesc);
 
 // scan
-extern void orcBeginScan(struct ScanState *scanState);
-extern TupleTableSlot *orcScanNext(struct ScanState *scanState);
-extern void orcEndScan(struct ScanState *scanState);
-extern void orcReScan(struct ScanState *scanState);
-
-extern OrcScanDescData *orcBeginRead(Relation rel, Snapshot snapshot,
-                                     TupleDesc desc, List *fileSplits,
-                                     bool *colToReads, void *pushDown);
-extern void orcReadNext(OrcScanDescData *scanData, TupleTableSlot *slot);
-extern void orcEndRead(OrcScanDescData *scanData);
-extern void orcResetRead(OrcScanDescData *scanData);
+extern void orcBeginScan(struct ScanState* scanState);
+extern TupleTableSlot* orcScanNext(struct ScanState* scanState);
+extern void orcEndScan(struct ScanState* scanState);
+extern void orcReScan(struct ScanState* scanState);
+
+extern OrcScanDescData* orcBeginReadWithOptionsStr(
+    Relation rel, Snapshot snapshot, TupleDesc desc, List* fileSplits,
+    bool* colToReads, void* pushDown, const char*);
+extern OrcScanDescData* orcBeginRead(Relation rel, Snapshot snapshot,
+                                     TupleDesc desc, List* fileSplits,
+                                     bool* colToReads, void* pushDown);
+extern void orcReadNext(OrcScanDescData* scanData, TupleTableSlot* slot);
+extern void orcEndRead(OrcScanDescData* scanData);
+extern void orcResetRead(OrcScanDescData* scanData);
 
 // delete
 extern OrcDeleteDescData *orcBeginDelete(Relation rel, List *fileSplits,
@@ -110,8 +113,41 @@ extern uint64 orcEndUpdate(OrcUpdateDescData *updateDesc);
 // utils
 extern bool isDirectDispatch(Plan *plan);
 
+void checkOrcError(OrcFormatData* orcFormatData);
+
 // index
-extern int64_t* orcCreateIndex(Relation rel, int idxId, List* segno, int64* eof,
+extern int64_t* orcCreateIndex(Relation rel, Oid idxId, List* segno, int64* eof,
                                List* columnsToRead, int sortIdx);
+extern void orcBeginIndexOnlyScan(struct ScanState* scanState, Oid idxId,
+                                  List* columnsInIndex);
+extern void orcIndexReadNext(OrcScanDescData* scanData, TupleTableSlot* slot,
+                             List* columnsInIndex);
+
+extern TupleTableSlot* orcIndexOnlyScanNext(struct ScanState* scanState);
+
+extern void orcEndIndexOnlyScan(struct ScanState* scanState);
+
+extern void orcIndexOnlyReScan(struct ScanState* scanState);
+
+extern OrcScanDescData* orcBeginIndexOnlyRead(Relation rel, Oid idxId,
+                                              List* columnsInIndex,
+                                              Snapshot snapshot, TupleDesc desc,
+                                              List* fileSplits,
+                                              bool* colToReads, void* pushDown);
+
+extern void orcBeginIndexOnlyScan(struct ScanState* scanState, Oid idxId,
+                                  List* columnsInIndex);
+
+extern TupleTableSlot* orcIndexOnlyScanNext(struct ScanState* scanState);
+
+extern void orcEndIndexOnlyScan(struct ScanState* scanState);
+
+extern void orcIndexOnlyReScan(struct ScanState* scanState);
+
+extern OrcScanDescData* orcBeginIndexOnlyRead(Relation rel, Oid idxId,
+                                              List* columnsInIndex,
+                                              Snapshot snapshot, TupleDesc desc,
+                                              List* fileSplits,
+                                              bool* colToReads, void* pushDown);
 
 #endif /* ORCAM_H_ */
diff --git a/src/include/access/orcsegfiles.h b/src/include/access/orcsegfiles.h
index a23521b..2d6b5e4 100644
--- a/src/include/access/orcsegfiles.h
+++ b/src/include/access/orcsegfiles.h
@@ -46,7 +46,8 @@ extern void updateOrcFileSegInfo(Relation rel, AppendOnlyEntry *aoEntry,
 
 extern void insertInitialOrcIndexEntry(AppendOnlyEntry *aoEntry, int idxOid, int segNo);
 extern void updateOrcIndexFileInfo(AppendOnlyEntry *aoEntry, int idxOid, int segNo, int64 eof);
-extern void deleteOrcIndexFileInfo(AppendOnlyEntry *aoEntry, int idxOid);
+extern void deleteOrcIndexFileInfo(Relation rel, AppendOnlyEntry *aoEntry, int idxOid);
+extern void deleteOrcIndexHdfsFiles(Relation rel, int32 segmentFileNum, int32 idx);
 
 extern List *orcGetAllSegFileSplits(AppendOnlyEntry *aoEntry,
                                     Snapshot snapshot);
diff --git a/src/include/access/plugstorage.h b/src/include/access/plugstorage.h
index 7b20af8..9505fe7 100644
--- a/src/include/access/plugstorage.h
+++ b/src/include/access/plugstorage.h
@@ -130,7 +130,6 @@ typedef struct PlugStorageData
 	ScanState              *ps_scan_state;
 	ScanDirection           ps_scan_direction;
 	FileScanDesc            ps_file_scan_desc;
-	ExternalScanState      *ps_ext_scan_state;
 	ResultRelSegFileInfo   *ps_result_seg_file_info;
 	ExternalInsertDesc      ps_ext_insert_desc;
 	ExternalInsertDesc      ps_ext_delete_desc;
@@ -163,8 +162,11 @@ typedef struct PlugStorageData
 	char                   *ps_hive_url;
 	/* Add for magma index info */
 	MagmaIndex              magma_idx;
-	/* for beginTransaction */
+	/* For beginTransaction */
 	List*                   magma_talbe_full_names;
+	/* The following two fields are for parameterized index scan */
+	IndexRuntimeKeyInfo*    runtime_key_info;
+	int                     num_run_time_keys;
 } PlugStorageData;
 
 typedef PlugStorageData *PlugStorage;
@@ -238,7 +240,12 @@ bool InvokePlugStorageFormatGetNext(FmgrInfo *func,
                                     TupleTableSlot *tupTableSlot);
 
 void InvokePlugStorageFormatReScan(FmgrInfo *func,
-                                   FileScanDesc fileScanDesc);
+                                   FileScanDesc fileScanDesc,
+                                   ScanState* scanState,
+                                   MagmaSnapshot* snapshot,
+                                   IndexRuntimeKeyInfo* runtimeKeyInfo,
+                                   int numRuntimeKeys,
+                                   TupleTableSlot *tupTableSlot);
 
 void InvokePlugStorageFormatEndScan(FmgrInfo *func,
                                     FileScanDesc fileScanDesc);
diff --git a/src/include/access/relscan.h b/src/include/access/relscan.h
index 4becb73..0c573e3 100644
--- a/src/include/access/relscan.h
+++ b/src/include/access/relscan.h
@@ -137,11 +137,11 @@ typedef struct FileScanDescData
 	int    fs_serializeSchemaLen;
 
 	/* current scan information for pluggable format */
-	 PlugStorageScanFuncs fs_ps_scan_funcs;   /* scan functions */
-	 void *fs_ps_user_data;                   /* user data */
-	 struct ScanState *fs_ps_scan_state;      /* support rescan */
-	 Plan *fs_ps_plan;                        /* support rescan */
-
+	PlugStorageScanFuncs  fs_ps_scan_funcs;
+	void                 *fs_ps_user_data;
+	Plan                 *fs_ps_plan;
+	List                 *fs_ps_magma_splits;
+	bool                  fs_ps_magma_skip_tid;
 }	FileScanDescData;
 
 typedef FileScanDescData *FileScanDesc;
diff --git a/src/include/access/skey.h b/src/include/access/skey.h
index d295d3d..1882cac 100644
--- a/src/include/access/skey.h
+++ b/src/include/access/skey.h
@@ -62,15 +62,19 @@ typedef uint16 StrategyNumber;
  * for the invocation of an access method support procedure.  In this case
  * sk_strategy/sk_subtype are not meaningful, and sk_func may refer to a
  * function that returns something other than boolean.
+ *
+ * Fixme(hwy): Since the sk_attno may change, I added the sk_attnoold. Do we really need sk_attnoold?
  */
 typedef struct ScanKeyData
 {
-	int			sk_flags;		/* flags, see below */
-	AttrNumber	sk_attno;		/* table or index column number */
-	StrategyNumber sk_strategy; /* operator strategy number */
-	Oid			sk_subtype;		/* strategy subtype */
-	FmgrInfo	sk_func;		/* lookup info for function to call */
+	int	        sk_flags;	/* flags, see below */
+	AttrNumber	sk_attno;	/* table or index column number */
+	StrategyNumber  sk_strategy;    /* operator strategy number */
+	Oid		sk_subtype;     /* strategy subtype */
+	FmgrInfo	sk_func;	/* lookup info for function to call */
 	Datum		sk_argument;	/* data to compare */
+	AttrNumber      sk_attnoold;	/* original table or index column number */
+	FmgrInfo        sk_out_func;    /* for evaluating the runtime key in magma parameterized index scan */
 } ScanKeyData;
 
 typedef ScanKeyData *ScanKey;
@@ -133,8 +137,10 @@ extern void ScanKeyEntryInitialize(ScanKey entry,
 					   AttrNumber attributeNumber,
 					   StrategyNumber strategy,
 					   Oid subtype,
-					   RegProcedure procedure,
-					   Datum argument);
+					   RegProcedure opProcedure,
+					   Datum argument,
+					   AttrNumber attributeNumberOld,
+					   RegProcedure outputProcedure);
 extern void ScanKeyEntryInitializeWithInfo(ScanKey entry,
 							   int flags,
 							   AttrNumber attributeNumber,
diff --git a/src/include/catalog/calico.pl b/src/include/catalog/calico.pl
index de2e78d..82e6e17 100755
--- a/src/include/catalog/calico.pl
+++ b/src/include/catalog/calico.pl
@@ -3515,6 +3515,14 @@ sub more_header
 #include "catalog/pg_tidycat.h"
 
 #include "catalog/gp_configuration.h"
+#include "catalog/skylon_vlabel.h"
+#include "catalog/skylon_elabel.h"
+#include "catalog/skylon_vlabel_attribute.h"
+#include "catalog/skylon_elabel_attribute.h"
+#include "catalog/skylon_graph_vlabel.h"
+#include "catalog/skylon_graph_elabel.h"
+#include "catalog/skylon_graph.h"
+#include "catalog/skylon_index.h"
 #include "catalog/gp_segment_config.h"
 #include "catalog/gp_san_config.h"
 
diff --git a/src/include/catalog/dependency.h b/src/include/catalog/dependency.h
index abd79df..b17b0cb 100644
--- a/src/include/catalog/dependency.h
+++ b/src/include/catalog/dependency.h
@@ -144,6 +144,9 @@ typedef enum ObjectClass
 	OCLASS_USER_MAPPING,		/* pg_user_mapping */
 	OCLASS_EXTPROTOCOL,			/* pg_extprotocol */
 	OCLASS_COMPRESSION,			/* pg_compression */
+	OCLASS_GRAPH,     /* skylon_graph */
+	OCLASS_VLABEL,     /* skylon_vlabel */
+  OCLASS_ELABEL,     /* skylon_elabel */
 	MAX_OCLASS					/* MUST BE LAST */
 } ObjectClass;
 
diff --git a/src/include/catalog/indexing.h b/src/include/catalog/indexing.h
index f0d33e7..b36e07c 100644
--- a/src/include/catalog/indexing.h
+++ b/src/include/catalog/indexing.h
@@ -391,6 +391,27 @@ DECLARE_UNIQUE_INDEX(pg_resqueue_oid_index, 6027, on pg_resqueue using btree(oid
 DECLARE_UNIQUE_INDEX(pg_resqueue_rsqname_index, 6028, on pg_resqueue using btree(rsqname name_ops));
 #define ResQueueRsqnameIndexId	6028
 
+//DECLARE_UNIQUE_INDEX(pg_vlabel_name_index, 4860, on pg_vlabel using btree(vlabelname name_ops));
+//#define VlabelNameIndexId  4860
+//
+//DECLARE_UNIQUE_INDEX(pg_elabel_name_index, 4861, on pg_elabel using btree(elabelname name_ops));
+//#define ElabelNameIndexId  4861
+//
+//DECLARE_UNIQUE_INDEX(pg_vlabel_attribute_vname_attnam_index, 4862, on pg_vlabel_attribute using btree(vlabelname name_ops,attrname name_ops));
+//#define VlabelAttributeVnameAttnamIndexId  4862
+//
+//DECLARE_UNIQUE_INDEX(pg_elabel_attribute_ename_attnam_index, 4863, on pg_elabel_attribute using btree(elabelname name_ops,attrname name_ops));
+//#define ElabelAttributeEnameAttnamIndexId  4863
+//
+//DECLARE_UNIQUE_INDEX(pg_graph_vlabel_gnam_vnam_index, 4864, on pg_graph_vlabel using btree(graphname name_ops,vlabelname name_ops));
+//#define GraphVlabelGnameVnamIndexId  4864
+//
+//DECLARE_UNIQUE_INDEX(pg_graph_elabel_gnam_enam_index, 4865, on pg_graph_elabel using btree(graphname name_ops,elabelname name_ops));
+//#define GraphElabelGnameEnamIndexId  4865
+//
+//DECLARE_UNIQUE_INDEX(pg_graph_gnam_snam_index, 4866, on pg_graph using btree(graphname name_ops,schemaname name_ops));
+//#define GraphGnameSnamIndexId  4866
+
 /* TIDYCAT_END_CODEGEN */
 
 /* last step of initialization script: build the indexes declared above */
diff --git a/src/include/catalog/namespace.h b/src/include/catalog/namespace.h
index 8dbf305..e2a4937 100644
--- a/src/include/catalog/namespace.h
+++ b/src/include/catalog/namespace.h
@@ -33,7 +33,8 @@ typedef struct _FuncCandidateList
 	Oid			args[1];		/* arg types --- VARIABLE LENGTH ARRAY */
 }	*FuncCandidateList;	/* VARIABLE LENGTH STRUCT */
 
-
+extern char *RelidGetName(Oid relid);
+extern RangeVar *RelidGetRangeVar(Oid relid);
 extern Oid GetCatalogId(const char *catalogname);
 extern Oid	RangeVarGetRelid(const RangeVar *relation, bool failOK, bool allowHcatalog);
 extern Oid	RangeVarGetCreationNamespace(const RangeVar *newRelation);
diff --git a/src/include/catalog/pg_tidycat.h b/src/include/catalog/pg_tidycat.h
index 49c7ff3..41d0aa3 100644
--- a/src/include/catalog/pg_tidycat.h
+++ b/src/include/catalog/pg_tidycat.h
@@ -26,6 +26,14 @@
  */
 
 
+#include "catalog/skylon_elabel.h"
+#include "catalog/skylon_elabel_attribute.h"
+#include "catalog/skylon_graph.h"
+#include "catalog/skylon_index.h"
+#include "catalog/skylon_graph_elabel.h"
+#include "catalog/skylon_graph_vlabel.h"
+#include "catalog/skylon_vlabel.h"
+#include "catalog/skylon_vlabel_attribute.h"
 #include "catalog/gp_configuration.h"
 #include "catalog/gp_global_sequence.h"
 #include "catalog/gp_master_mirroring.h"
diff --git a/src/include/catalog/pg_type.h b/src/include/catalog/pg_type.h
index b4b1029..26a2837 100644
--- a/src/include/catalog/pg_type.h
+++ b/src/include/catalog/pg_type.h
@@ -734,6 +734,29 @@ DATA(insert OID = 6995 ( gp_global_sequence	    PGNSP PGUID -1 f c t \054 5096 0
 /* relation id: 5006 - gp_configuration_history 20101104 */
 DATA(insert OID = 6434 ( gp_configuration_history	    PGNSP PGUID -1 f c t \054 5006 0 record_in record_out record_recv record_send - d x f 0 -1 0 _null_ _null_ ));	
 #define GP_CONFIGURATION_HISTORY_RELTYPE_OID 6434
+/* relation id: 4850 - skylon_vlabel 20200224 */
+DATA(insert OID = 4870 ( skylon_vlabel     PGNSP PGUID -1 f c t \054 4850 0 record_in record_out record_recv record_send - d x f 0 -1 0 _null_ _null_ ));
+#define SKYLON_VLABEL_RELTYPE_OID 4870
+/* relation id: 4851 - skylon_elabel 20200224 */
+DATA(insert OID = 4871 ( skylon_elabel     PGNSP PGUID -1 f c t \054 4851 0 record_in record_out record_recv record_send - d x f 0 -1 0 _null_ _null_ ));
+#define SKYLON_ELABEL_RELTYPE_OID 4871
+/* relation id: 4852 - skylon_vlabel_attribute 20200224 */
+DATA(insert OID = 4872 ( skylon_vlabel_attribute     PGNSP PGUID -1 f c t \054 4852 0 record_in record_out record_recv record_send - d x f 0 -1 0 _null_ _null_ ));
+#define SKYLON_VLABEL_ATTRIBUTE_RELTYPE_OID 4872
+/* relation id: 4853 - skylon_elabel_attribute 20200224 */
+DATA(insert OID = 4873 ( skylon_elabel_attribute     PGNSP PGUID -1 f c t \054 4853 0 record_in record_out record_recv record_send - d x f 0 -1 0 _null_ _null_ ));
+#define SKYLON_ELABEL_ATTRIBUTE_RELTYPE_OID 4873
+/* relation id: 4854 - skylon_graph_vlabel 20200224 */
+DATA(insert OID = 4874 ( skylon_graph_vlabel     PGNSP PGUID -1 f c t \054 4854 0 record_in record_out record_recv record_send - d x f 0 -1 0 _null_ _null_ ));
+#define SKYLON_GRAPH_VLABEL_RELTYPE_OID 4874
+/* relation id: 4855 - skylon_graph_elabel 20200224 */
+DATA(insert OID = 4875 ( skylon_graph_elabel     PGNSP PGUID -1 f c t \054 4855 0 record_in record_out record_recv record_send - d x f 0 -1 0 _null_ _null_ ));
+#define SKYLON_GRAPH_ELABEL_RELTYPE_OID 4875
+/* relation id: 4856 - skylon_graph_elabel 20200224 */
+DATA(insert OID = 4876 ( skylon_graph     PGNSP PGUID -1 f c t \054 4856 0 record_in record_out record_recv record_send - d x f 0 -1 0 _null_ _null_ ));
+#define SKYLON_GRAPH_RELTYPE_OID 4876
+DATA(insert OID = 4877 ( skylon_index     PGNSP PGUID -1 f c t \054 4857 0 record_in record_out record_recv record_send - d x f 0 -1 0 _null_ _null_ ));
+#define SKYLON_INDEX_RELTYPE_OID 4877
 /* relation id: 5029 - gp_db_interfaces 20101104 */
 DATA(insert OID = 6436 ( gp_db_interfaces	    PGNSP PGUID -1 f c t \054 5029 0 record_in record_out record_recv record_send - d x f 0 -1 0 _null_ _null_ ));	
 #define GP_DB_INTERFACES_RELTYPE_OID 6436
diff --git a/src/include/catalog/skylon_elabel.h b/src/include/catalog/skylon_elabel.h
new file mode 100644
index 0000000..d17f601
--- /dev/null
+++ b/src/include/catalog/skylon_elabel.h
@@ -0,0 +1,113 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+/*-------------------------------------------------------------------------
+ *
+ * skylon_elabel.h
+ *    record sql execution information
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#ifndef SRC_INCLUDE_CATALOG_SKYLON_ELABEL_H_
+#define SRC_INCLUDE_CATALOG_SKYLON_ELABEL_H_
+
+#include "catalog/genbki.h"
+#include "c.h"
+
+/* TIDYCAT_BEGINDEF
+
+   CREATE TABLE skylon_elabel
+   with (camelcase=Elabel, shared=true, oid=false, relid=4851, reltype_oid=4871, content=MASTER_ONLY)
+   (
+   vlabelname                name,
+   schemaname              name
+   );
+
+   TIDYCAT_ENDDEF
+*/
+
+/* TIDYCAT_BEGIN_CODEGEN
+
+   WARNING: DO NOT MODIFY THE FOLLOWING SECTION:
+   Generated by tidycat.pl version 34
+   on Fri Feb 26 10:43:15 2016
+*/
+
+
+/*
+ TidyCat Comments for gp_configuration_history:
+  Table is shared, so catalog.c:IsSharedRelation is updated.
+  Table does not have an Oid column.
+  Table has static type (see pg_types.h).
+  Table has TOASTable columns, but NO TOAST table.
+  Table contents are only maintained on MASTER.
+  Table has weird hack for timestamp column.
+
+*/
+
+/*
+ * The CATALOG definition has to refer to the type of "time" as
+ * "timestamptz" (lower case) so that bootstrap mode recognizes it.  But
+ * the C header files define this type as TimestampTz.  Since the field is
+ * potentially-null and therefore cannot be accessed directly from C code,
+ * there is no particular need for the C struct definition to show the
+ * field type as TimestampTz --- instead we just make it Datum.
+ */
+
+
+/* ----------------
+ *    skylon_elabel definition.  cpp turns this into
+ *    typedef struct FormData_skylon_elabel
+ * ----------------
+ */
+
+#define ElabelRelationId 4851
+
+CATALOG(skylon_elabel,4851) BKI_WITHOUT_OIDS
+{
+  NameData elabelname;
+  NameData schemaname;
+  NameData fromvlabel;
+  NameData tovlabel;
+} FormData_skylon_elabel;
+
+
+/* ----------------
+ *    Form_skylon_elabel corresponds to a pointer to a tuple with
+ *    the format of skylon_elabel relation.
+ * ----------------
+ */
+
+typedef FormData_skylon_elabel *Form_skylon_elabel;
+
+/* ----------------
+ *    compiler constants for pg_database
+ * ----------------
+ */
+#define Natts_skylon_elabel 4
+#define Anum_skylon_elabel_elabelname 1
+#define Anum_skylon_elabel_schemaname 2
+#define Anum_skylon_elabel_fromvlabel 3
+#define Anum_skylon_elabel_tovlabel 4
+/* TIDYCAT_END_CODEGEN */
+
+extern void InsertElabelEntry(const char* elabelname, const char* schemaname,
+                       const char* fromvlabel, const char* tovlabel);
+
+#endif /* SRC_INCLUDE_CATALOG_SKYLON_ELABEL_H_ */
diff --git a/src/include/catalog/skylon_elabel_attribute.h b/src/include/catalog/skylon_elabel_attribute.h
new file mode 100644
index 0000000..2b28e6f
--- /dev/null
+++ b/src/include/catalog/skylon_elabel_attribute.h
@@ -0,0 +1,118 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+/*-------------------------------------------------------------------------
+ *
+ * pg_elabel.h
+ *    record sql execution information
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#ifndef SRC_INCLUDE_CATALOG_SKYLON_ELABEL_ATTRIBUTE_H_
+#define SRC_INCLUDE_CATALOG_SKYLON_ELABEL_ATTRIBUTE_H_
+
+#include "catalog/genbki.h"
+#include "c.h"
+
+/* TIDYCAT_BEGINDEF
+
+   CREATE TABLE skylon_elabel_attribute
+   with (camelcase=ElabelAttr, shared=true, oid=false, relid=4853, reltype_oid=4873, content=MASTER_ONLY)
+   (
+   elabelname                name,
+   attrname              name,
+   attrtypid             oid
+   );
+
+   TIDYCAT_ENDDEF
+*/
+
+/* TIDYCAT_BEGIN_CODEGEN
+
+   WARNING: DO NOT MODIFY THE FOLLOWING SECTION:
+   Generated by tidycat.pl version 34
+   on Fri Feb 26 10:43:15 2016
+*/
+
+
+/*
+ TidyCat Comments for gp_configuration_history:
+  Table is shared, so catalog.c:IsSharedRelation is updated.
+  Table does not have an Oid column.
+  Table has static type (see pg_types.h).
+  Table has TOASTable columns, but NO TOAST table.
+  Table contents are only maintained on MASTER.
+  Table has weird hack for timestamp column.
+
+*/
+
+/*
+ * The CATALOG definition has to refer to the type of "time" as
+ * "timestamptz" (lower case) so that bootstrap mode recognizes it.  But
+ * the C header files define this type as TimestampTz.  Since the field is
+ * potentially-null and therefore cannot be accessed directly from C code,
+ * there is no particular need for the C struct definition to show the
+ * field type as TimestampTz --- instead we just make it Datum.
+ */
+
+
+/* ----------------
+ *    skylon_elabel definition.  cpp turns this into
+ *    typedef struct FormData_skylon_elabel
+ * ----------------
+ */
+
+#define ElabelAttrRelationId 4853
+
+CATALOG(skylon_elabel_attribute,4853) BKI_WITHOUT_OIDS
+{
+  NameData schemaname;
+  NameData elabelname;
+  NameData attrname;
+  Oid attrtypid;
+  int4 primaryrank;
+  int4 rank;
+} FormData_skylon_elabel_attribute;
+
+
+/* ----------------
+ *    Form_pg_elabel corresponds to a pointer to a tuple with
+ *    the format of pg_elabel relation.
+ * ----------------
+ */
+
+typedef FormData_skylon_elabel_attribute *Form_skylon_elabel_attribute;
+
+/* ----------------
+ *    compiler constants for pg_database
+ * ----------------
+ */
+#define Natts_skylon_elabel_attribute 6
+#define Anum_skylon_elabel_attribute_schemaname 1
+#define Anum_skylon_elabel_attribute_elabelname 2
+#define Anum_skylon_elabel_attribute_attrname 3
+#define Anum_skylon_elabel_attribute_attrtypid 4
+#define Anum_skylon_elabel_attribute_primaryrank 5
+#define Anum_skylon_elabel_attribute_rank 6
+/* TIDYCAT_END_CODEGEN */
+
+extern void InsertElabelAttrEntry(const char* schemaname, const char* elabelname, const char* attrname,
+                           Oid attrtypid, int4 primaryrank, int4 rank);
+
+#endif /* SRC_INCLUDE_CATALOG_SKYLON_ELABEL_ATTRIBUTE_H_ */
diff --git a/src/include/catalog/skylon_graph.h b/src/include/catalog/skylon_graph.h
new file mode 100644
index 0000000..6d1097c
--- /dev/null
+++ b/src/include/catalog/skylon_graph.h
@@ -0,0 +1,107 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+/*-------------------------------------------------------------------------
+ *
+ * pg_elabel.h
+ *    record sql execution information
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#ifndef SRC_INCLUDE_CATALOG_SKYLON_GRAPH_H_
+#define SRC_INCLUDE_CATALOG_SKYLON_GRAPH_H_
+
+#include "catalog/genbki.h"
+#include "c.h"
+
+/* TIDYCAT_BEGINDEF
+
+   CREATE TABLE skylon_graph
+   with (camelcase=Graph, shared=true, oid=false, relid=4856, reltype_oid=4876, content=MASTER_ONLY)
+   (
+   graphName                name,
+   schemaName              name
+   );
+
+   TIDYCAT_ENDDEF
+*/
+
+/* TIDYCAT_BEGIN_CODEGEN
+
+   WARNING: DO NOT MODIFY THE FOLLOWING SECTION:
+   Generated by tidycat.pl version 34
+   on Fri Feb 26 10:43:15 2016
+*/
+
+
+/*
+ TidyCat Comments for gp_configuration_history:
+  Table is shared, so catalog.c:IsSharedRelation is updated.
+  Table does not have an Oid column.
+  Table has static type (see pg_types.h).
+  Table has TOASTable columns, but NO TOAST table.
+  Table contents are only maintained on MASTER.
+  Table has weird hack for timestamp column.
+
+*/
+
+/*
+ * The CATALOG definition has to refer to the type of "time" as
+ * "timestamptz" (lower case) so that bootstrap mode recognizes it.  But
+ * the C header files define this type as TimestampTz.  Since the field is
+ * potentially-null and therefore cannot be accessed directly from C code,
+ * there is no particular need for the C struct definition to show the
+ * field type as TimestampTz --- instead we just make it Datum.
+ */
+
+
+/* ----------------
+ *    skylon_graph definition.  cpp turns this into
+ *    typedef struct FormData_skylon_graph
+ * ----------------
+ */
+
+#define GraphRelationId 4856
+
+CATALOG(skylon_graph,4856) BKI_WITHOUT_OIDS
+{
+  NameData graphname;
+  NameData schemaname;
+} FormData_skylon_graph;
+
+
+/* ----------------
+ *    Form_skylon_graph corresponds to a pointer to a tuple with
+ *    the format of skylon_graph relation.
+ * ----------------
+ */
+
+typedef FormData_skylon_graph *Form_skylon_graph;
+
+/* ----------------
+ *    compiler constants for pg_database
+ * ----------------
+ */
+#define Natts_skylon_graph 2
+#define Anum_skylon_graph_graphname 1
+#define Anum_skylon_graph_schemaname 2
+/* TIDYCAT_END_CODEGEN */
+extern void InsertGraphEntry(const char* graphname, const char* schemaname);
+
+#endif /* SRC_INCLUDE_CATALOG_SKYLON_GRAPH_H_ */
diff --git a/src/include/catalog/skylon_graph_elabel.h b/src/include/catalog/skylon_graph_elabel.h
new file mode 100644
index 0000000..ab24904
--- /dev/null
+++ b/src/include/catalog/skylon_graph_elabel.h
@@ -0,0 +1,112 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
... 23801 lines suppressed ...

[hawq] 02/02: HAWQ-1834. add options for native orc table creation

Posted by zt...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

ztao1987 pushed a commit to branch ztao
in repository https://gitbox.apache.org/repos/asf/hawq.git

commit c79ab3228fddc1620b6790b0fee8ffe629b77017
Author: ztao1987 <zh...@gmail.com>
AuthorDate: Wed Mar 16 14:36:21 2022 +0800

    HAWQ-1834. add options for native orc table creation
---
 src/backend/access/common/reloptions.c | 278 +++++++++++++++++++++------------
 src/backend/access/orc/orcam.c         |  25 +--
 src/backend/utils/cache/relcache.c     |  37 +++++
 src/include/access/orcam.h             |   7 +
 src/include/utils/rel.h                |   5 +-
 5 files changed, 228 insertions(+), 124 deletions(-)

diff --git a/src/backend/access/common/reloptions.c b/src/backend/access/common/reloptions.c
index 6211fdd..5307f90 100644
--- a/src/backend/access/common/reloptions.c
+++ b/src/backend/access/common/reloptions.c
@@ -300,6 +300,175 @@ parseRelOptions(Datum options, int numkeywords, const char *const * keywords,
 	}
 }
 
+/*
+ * Parse reloptions for native orc format
+ */
+void
+checkOrcOptions(Datum reloptions, bool validate, StdRdOptions *result)
+{
+  /*
+   * 1. Needn't check option 'appendonly' and 'orientation' because we already
+   *    check them in default_reloptions.
+   * 2. 'compresslevel' is a default option in reloptions, but we actually don't
+   *    use it in native orc format.
+   * 3. Everytime we add an option into orc_keywords, we should also add one
+   *    into default_keywords because there will perform a first check.
+   */
+  const char *const orc_keywords[] = {
+    "appendonly",
+    "orientation",
+    "compresstype",
+    "compresslevel",
+    "dicthreshold",
+    "compressblocksize",
+    "rowindexstride",
+    "stripesize",
+    "bloomfilter",
+    "bucketnum",
+  };
+
+  bool    appendonly = true;
+  char    columnstore = RELSTORAGE_ORC;
+  char*   compresstype = NULL;
+  int32   compressblocksize = DEFAULT_ORC_COMPRESS_BLOCK_SIZE;
+  int32   rowindexstride = DEFAULT_ORC_ROW_GROUP_SIZE;
+  int32   stripesize = DEFAULT_ORC_STRIPE_SIZE;
+  char*   bloomfilter = NULL;
+  int32   bucket_num = 0;
+  int     j = 0;
+
+  char     *orcOptionValues[ARRAY_SIZE(orc_keywords)];
+  parseRelOptions(reloptions, ARRAY_SIZE(orc_keywords), orc_keywords, orcOptionValues, validate);
+
+  /* orc compresstype */
+  if (orcOptionValues[2] != NULL)
+  {
+    compresstype = orcOptionValues[2];
+
+    if ((strcmp(compresstype, "snappy") != 0) && (strcmp(compresstype, "lz4") != 0)
+            // XXX(changyong): The default zlib compression level of ORC table is Z_DEFAULT_COMPRESSION,
+            // and this is different from hive of which default compression level is (Z_BEST_SPEED + 1).
+            && (strcmp(compresstype, "zlib") != 0)
+            && (strcmp(compresstype, "zstd") != 0)
+            && (strcmp(compresstype, "none") != 0))
+    {
+      ereport(ERROR,
+            (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+             errmsg("orc table doesn't support compress type: \'%s\'", compresstype),
+             errOmitLocation(true)));
+    }
+
+    if (compresstype) {
+      StringInfoData option;
+      initStringInfo(&option);
+      appendStringInfo(&option, "\"compresstype\":\"%s\"",
+                             compresstype);
+      compresstype = pstrdup(option.data);
+    }
+  }
+
+  /* orc dicthreshold */
+  if (orcOptionValues[4] != NULL)
+  {
+    char *end;
+    double threshold = strtod(orcOptionValues[4], &end);
+    if (end == orcOptionValues[4] || *end != '\0' || threshold < 0 || threshold > 1)
+      ereport(ERROR,
+        (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+         errmsg("\'dicthreshold\' must be within [0-1]"),
+             errOmitLocation(true)));
+    StringInfoData option;
+    initStringInfo(&option);
+    if (compresstype != NULL)
+      appendStringInfo(&option, "%s,",compresstype);
+    appendStringInfo(&option, "\"dicthreshold\": \"%s\"",
+                     orcOptionValues[1]);
+    compresstype = pstrdup(option.data);
+  }
+
+  /* orc compressblocksize */
+  if (orcOptionValues[5] != NULL)
+  {
+    compressblocksize = pg_atoi(orcOptionValues[5], sizeof(int32), 0);
+    if ((compressblocksize < MIN_ORC_COMPRESS_BLOCK_SIZE) || (compressblocksize > MAX_ORC_COMPRESS_BLOCK_SIZE))
+    {
+      if (validate)
+        ereport(ERROR,
+            (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+             errmsg("compressblock size for orc table should between 1B and 1GB and should be specified in Bytes. "
+                 "Got %d Bytes", compressblocksize), errOmitLocation(true)));
+
+      compressblocksize = DEFAULT_ORC_COMPRESS_BLOCK_SIZE;
+    }
+  }
+
+  /* orc rowgroupsize */
+  if (orcOptionValues[6] != NULL)
+  {
+    rowindexstride = pg_atoi(orcOptionValues[6], sizeof(int32), 0);
+
+    if ((rowindexstride < MIN_ORC_ROW_GROUP_SIZE) || (rowindexstride > MAX_ORC_ROW_GROUP_SIZE))
+    {
+      if (validate)
+        ereport(ERROR,
+            (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+             errmsg("row group size for orc table should between 1000 and 1024*1024*1024. "
+                 "Got %d", rowindexstride), errOmitLocation(true)));
+
+      rowindexstride = DEFAULT_ORC_ROW_GROUP_SIZE;
+    }
+  }
+
+  /* orc stripesize */
+  if (orcOptionValues[7] != NULL)
+  {
+    stripesize = pg_atoi(orcOptionValues[7], sizeof(int32), 0);
+
+    if ((stripesize < MIN_ORC_STRIPE_SIZE) || (stripesize > MAX_ORC_STRIPE_SIZE))
+    {
+      if (validate)
+        ereport(ERROR,
+            (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+             errmsg("stripe size for orc table should between 1MB and 1GB and should be specified in MBytes. "
+                 "Got %d MB", stripesize), errOmitLocation(true)));
+
+      stripesize = DEFAULT_ORC_STRIPE_SIZE;
+    }
+  }
+
+  /* orc bloomfilter */
+  if (orcOptionValues[8] != NULL)
+  {
+    StringInfoData option;
+    initStringInfo(&option);
+    appendStringInfo(&option, orcOptionValues[8]);
+    bloomfilter = pstrdup(option.data);
+  }
+
+  /* orc bucket_num */
+  if (orcOptionValues[9] != NULL)
+  {
+    bucket_num= pg_atoi(orcOptionValues[9], sizeof(int32), 0);
+    if(bucket_num <= 0)
+    {
+      if (validate)
+        ereport(ERROR,
+            (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+             errmsg("bucket number should be greater than 0. "
+                 "Got %d", bucket_num), errOmitLocation(true)));
+
+      bucket_num = 0;
+    }
+  }
+  result->compressblocksize = compressblocksize;
+  result->stripesize = stripesize;
+  result->rowindexstride = rowindexstride;
+  if (compresstype != NULL)
+    for (j = 0;j < strlen(compresstype); j++)
+      compresstype[j] = pg_tolower(compresstype[j]);
+  result->compresstype = compresstype;
+  result->bloomfilter = bloomfilter;
+}
 
 /*
  * Parse reloptions for anything using StdRdOptions
@@ -323,6 +492,8 @@ default_reloptions(Datum reloptions, bool validate, char relkind,
 		"dicthreshold",
 		"bloomfilter",
 		"stripesize",
+		"rowindexstride",
+		"compressblocksize",
 	};
 
 	char	   *values[ARRAY_SIZE(default_keywords)];
@@ -330,7 +501,6 @@ default_reloptions(Datum reloptions, bool validate, char relkind,
 	int32		blocksize = DEFAULT_APPENDONLY_BLOCK_SIZE;
 	int32		pagesize = DEFAULT_PARQUET_PAGE_SIZE;
 	int32		rowgroupsize = DEFAULT_PARQUET_ROWGROUP_SIZE;
-	int32   stripesize = DEFAULT_ORC_STRIPE_SIZE;
 	bool		appendonly = false;
 	bool		checksum = false;
 	char*		compresstype = NULL;
@@ -542,28 +712,6 @@ default_reloptions(Datum reloptions, bool validate, char relkind,
 						 errmsg("non-parquet table doesn't support compress type: \'%s\'", compresstype),
 						 errOmitLocation(true)));
 		}
-
-		if ((columnstore == RELSTORAGE_ORC) && (strcmp(compresstype, "snappy") != 0)
-		        && (strcmp(compresstype, "lz4") != 0)
-		        // XXX(changyong): The default zlib compression level of ORC table is Z_DEFAULT_COMPRESSION,
-		        // and this is different from hive of which default compression level is (Z_BEST_SPEED + 1).
-		        && (strcmp(compresstype, "zlib") != 0)
-		        && (strcmp(compresstype, "zstd") != 0)
-		        && (strcmp(compresstype, "none") != 0))
-    {
-      ereport(ERROR,
-            (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
-             errmsg("orc table doesn't support compress type: \'%s\'", compresstype),
-             errOmitLocation(true)));
-    }
-
-		if (!(columnstore == RELSTORAGE_ORC) && (strcmp(compresstype, "lz4") == 0))
-    {
-      ereport(ERROR,
-            (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
-             errmsg("non-orc table doesn't support compress type: \'%s\'", compresstype),
-             errOmitLocation(true)));
-    }
 	}
 
 	/* compression level */
@@ -644,14 +792,6 @@ default_reloptions(Datum reloptions, bool validate, char relkind,
 		compresslevel = setDefaultCompressionLevel(compresstype);
 	}
 
-	if (columnstore == RELSTORAGE_ORC && compresstype) {
-    StringInfoData option;
-    initStringInfo(&option);
-    appendStringInfo(&option, "\"compresstype\":\"%s\"",
-                           compresstype);
-    compresstype = pstrdup(option.data);
-  }
-
 	/* checksum */
 	if (values[7] != NULL)
 	{
@@ -811,75 +951,6 @@ default_reloptions(Datum reloptions, bool validate, char relkind,
 					 errOmitLocation(true)));
 	}
 
-  /* stripesize */
-  if (values[13] != NULL)
-  {
-    if(!(columnstore == RELSTORAGE_ORC)){
-      ereport(ERROR,
-          (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
-           errmsg("invalid option \'stripesize\' for non-orc table"),
-           errOmitLocation(true)));
-    }
-
-    stripesize = pg_atoi(values[13], sizeof(int32), 0);
-
-    if ((stripesize < MIN_ORC_STRIPE_SIZE) || (stripesize > MAX_ORC_STRIPE_SIZE))
-    {
-      if (validate)
-        ereport(ERROR,
-            (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
-             errmsg("stripe size for orc table should between 1MB and 1GB and should be specified in MBytes. "
-                 "Got %d MB", stripesize), errOmitLocation(true)));
-
-      stripesize = DEFAULT_ORC_STRIPE_SIZE;
-    }
-  }
-
-	// dicthreshold
-	if (values[11] != NULL) {
-	  if(!(columnstore == RELSTORAGE_ORC)){
-      ereport(ERROR,
-          (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
-           errmsg("invalid option \'dicthreshold\' for non-orc table"),
-           errOmitLocation(true)));
-    }
-	  char *end;
-	  double threshold = strtod(values[11], &end);
-	  if (end == values[11] || *end != '\0' || threshold < 0 || threshold > 1)
-	    ereport(ERROR,
-        (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
-         errmsg("\'dicthreshold\' must be within [0-1]"),
-             errOmitLocation(true)));
-	  StringInfoData option;
-	  initStringInfo(&option);
-	  if (compresstype != NULL)
-	    appendStringInfo(&option, "%s,",compresstype);
-	  appendStringInfo(&option, "\"dicthreshold\": \"%s\"",
-	                   values[11]);
-	  compresstype = pstrdup(option.data);
-	}
-
-	// bloomfilter
-	if (values[12] != NULL) {
-    if(!(columnstore == RELSTORAGE_ORC)){
-      ereport(ERROR,
-          (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
-           errmsg("invalid option \'bloomfilter\' for non-orc table"),
-           errOmitLocation(true)));
-    }
-    ereport(ERROR,
-        (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
-         errmsg("option \'bloomfilter\' for orc table not supported yet"),
-         errOmitLocation(true)));
-    StringInfoData option;
-    initStringInfo(&option);
-    if (compresstype != NULL)
-      appendStringInfo(&option, "%s",compresstype);
-    appendStringInfo(&option, ",\"bloomfilter\": \"%s\"",
-                     values[12]);
-    compresstype = pstrdup(option.data);
-  }
-
 	result = (StdRdOptions *) palloc(sizeof(StdRdOptions));
 	SET_VARSIZE(result, sizeof(StdRdOptions));
 
@@ -888,7 +959,6 @@ default_reloptions(Datum reloptions, bool validate, char relkind,
 	result->blocksize = blocksize;
 	result->pagesize = pagesize;
 	result->rowgroupsize = rowgroupsize;
-	result->stripesize = stripesize;
 	result->compresslevel = compresslevel;
 	if (compresstype != NULL)
 		for (j = 0;j < strlen(compresstype); j++)
@@ -900,6 +970,12 @@ default_reloptions(Datum reloptions, bool validate, char relkind,
 	result->errorTable = errorTable;
 	result->bucket_num = bucket_num;
 
+	// extra parse and check for ORC format
+	if (columnstore == RELSTORAGE_ORC)
+	{
+		checkOrcOptions(reloptions, validate, result);
+	}
+
 	return (bytea *) result;
 }
 
diff --git a/src/backend/access/orc/orcam.c b/src/backend/access/orc/orcam.c
index f226844..f7260c3 100644
--- a/src/backend/access/orc/orcam.c
+++ b/src/backend/access/orc/orcam.c
@@ -279,7 +279,6 @@ static int32 GetSplitCount(List *fileSplits, Oid idxId) {
   return ret;
 }
 
->>>>>>> 7910d663d... step2
 OrcInsertDescData *orcBeginInsert(Relation rel,
                                   ResultRelSegFileInfo *segfileinfo) {
   OrcInsertDescData *insertDesc =
@@ -299,17 +298,7 @@ OrcInsertDescData *orcBeginInsert(Relation rel,
   AppendOnlyEntry *aoentry =
       GetAppendOnlyEntry(RelationGetRelid(rel), SnapshotNow);
   StringInfoData option;
-  initStringInfo(&option);
-  appendStringInfoChar(&option, '{');
-  appendStringInfo(&option, "\"logicEof\": %" PRId64, segfileinfo->eof[0]);
-  appendStringInfo(&option, ", \"uncompressedEof\": %" PRId64,
-                   segfileinfo->uncompressed_eof[0]);
-  appendStringInfo(
-      &option, ", \"stripeSize\": %" PRId64,
-      ((StdRdOptions *)(rel->rd_options))->stripesize * 1024 * 1024);
-  if (aoentry->compresstype)
-    appendStringInfo(&option, ", %s", aoentry->compresstype);
-  appendStringInfoChar(&option, '}');
+  constructOrcFormatOptionString(&option, rel, segfileinfo, aoentry);
 
   insertDesc->orcFormatData = palloc0(sizeof(OrcFormatData));
   insertDesc->orcFormatData->fmt =
@@ -929,11 +918,7 @@ OrcDeleteDescData *orcBeginDelete(Relation rel, List *fileSplits,
   AppendOnlyEntry *aoentry =
       GetAppendOnlyEntry(RelationGetRelid(rel), SnapshotNow);
   StringInfoData option;
-  initStringInfo(&option);
-  appendStringInfoChar(&option, '{');
-  if (aoentry->compresstype)
-    appendStringInfo(&option, "%s", aoentry->compresstype);
-  appendStringInfoChar(&option, '}');
+  constructOrcFormatOptionString(&option, rel, NULL, aoentry);
 
   int hdfsPathMaxLen = AOSegmentFilePathNameLen(rel) + 1;
   char *hdfsPath = (char *)palloc0(hdfsPathMaxLen);
@@ -1047,11 +1032,7 @@ OrcUpdateDescData *orcBeginUpdate(Relation rel, List *fileSplits,
   AppendOnlyEntry *aoentry =
       GetAppendOnlyEntry(RelationGetRelid(rel), SnapshotNow);
   StringInfoData option;
-  initStringInfo(&option);
-  appendStringInfoChar(&option, '{');
-  if (aoentry->compresstype)
-    appendStringInfo(&option, "%s", aoentry->compresstype);
-  appendStringInfoChar(&option, '}');
+  constructOrcFormatOptionString(&option, rel, NULL, aoentry);
 
   int hdfsPathMaxLen = AOSegmentFilePathNameLen(rel) + 1;
   char *hdfsPath = (char *)palloc0(hdfsPathMaxLen);
diff --git a/src/backend/utils/cache/relcache.c b/src/backend/utils/cache/relcache.c
index a9f62ac..6c69dd7 100644
--- a/src/backend/utils/cache/relcache.c
+++ b/src/backend/utils/cache/relcache.c
@@ -789,9 +789,46 @@ RelationParseRelOptions(Relation relation, HeapTuple tuple)
 		case RELKIND_AOSEGMENTS:
 		case RELKIND_AOBLOCKDIR:
 		case RELKIND_UNCATALOGED:
+		{
+			// check for bloom filter here because
+			// we could not get tupledesc in default_reloptions.
+			const char *const keywords[] =
+			{ "bloomfilter" };
+			const int32_t keywords_size = 1;
+			char *values[keywords_size];
+			char *bloomfilter = NULL;
+			char *key = "bloomfilter";
+
+			parseRelOptions(datum, keywords_size, keywords, values, false);
+			if (values[0] != NULL)
+			{
+				TupleDesc tup_desc = relation->rd_att;
+				int attnum = tup_desc->natts;
+				char **attribute_names = palloc0(attnum * sizeof(char*));
+				for (int i = 0; i < attnum; ++i)
+				{
+					int name_len =
+							strlen(
+									((Form_pg_attribute) (tup_desc->attrs[i]))->attname.data);
+					char *attribute = palloc0(name_len + 1);
+					strncpy(attribute, ((Form_pg_attribute )
+					(tup_desc->attrs[i]))->attname.data, name_len);
+					attribute_names[i] = attribute;
+				}
+				char *dup_val = pstrdup(values[0]);
+				char *token = strtok(dup_val, ",");
+				while (token)
+				{
+					checkPlugStorageFormatOption(&bloomfilter, key, token, true,
+							attnum, attribute_names);
+					bloomfilter = NULL;
+					token = strtok(NULL, ",");
+				}
+			}
 			options = heap_reloptions(relation->rd_rel->relkind, datum,
 									  false);
 			break;
+		}
 		case RELKIND_INDEX:
 			options = index_reloptions(relation->rd_am->amoptions, datum,
 									   false);
diff --git a/src/include/access/orcam.h b/src/include/access/orcam.h
index 47d24c2..871fc05 100644
--- a/src/include/access/orcam.h
+++ b/src/include/access/orcam.h
@@ -26,6 +26,13 @@
 #include "cdb/cdbquerycontextdispatching.h"
 #include "nodes/relation.h"
 
+#define DEFAULT_ORC_ROW_GROUP_SIZE 65536
+#define MIN_ORC_ROW_GROUP_SIZE 1000
+#define MAX_ORC_ROW_GROUP_SIZE 1024 * 1024 * 1024
+// here we use orc block size in Bytes
+#define DEFAULT_ORC_COMPRESS_BLOCK_SIZE 256 * 1024
+#define MIN_ORC_COMPRESS_BLOCK_SIZE 1
+#define MAX_ORC_COMPRESS_BLOCK_SIZE 1024 * 1024 * 1024
 // here we use orc stripe size in MBytes
 #define DEFAULT_ORC_STRIPE_SIZE 64
 #define MIN_ORC_STRIPE_SIZE 1
diff --git a/src/include/utils/rel.h b/src/include/utils/rel.h
index 3fbeee3..2fbd2ac 100644
--- a/src/include/utils/rel.h
+++ b/src/include/utils/rel.h
@@ -285,7 +285,6 @@ typedef struct StdRdOptions
 	int			blocksize;		/* max varblock size (AO rels only) */
 	int			pagesize;		/* page size(Parquet rels only) */
 	int			rowgroupsize;	/* row group size (Parquet rels only)*/
-	int     stripesize;  /* stripe size (ORC rels only) */
 	int			compresslevel;  /* compression level (AO rels only) */
 	char*		compresstype;   /* compression type (AO rels only) */
 	bool		checksum;		/* checksum (AO rels only) */
@@ -293,6 +292,10 @@ typedef struct StdRdOptions
 	bool		forceHeap;		/* specified appendonly=false */
 	bool		errorTable;		/* skip GOH tablespace checking. */
 	int 		bucket_num;		/* default init segment num for random/hash/external table */
+	char*		bloomfilter;	/* columns using bloomfilter (ORC rels only) */
+	int 		stripesize; 	/* stripe size (ORC rels only) */
+	int 		rowindexstride;	/* row index stride (ORC rels only) */
+	int 		compressblocksize;  /* compressblocksize in native orc, different from blocksize (ORC rels only) */
 } StdRdOptions;
 
 #define HEAP_MIN_FILLFACTOR			10