You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@drill.apache.org by jn...@apache.org on 2017/03/02 20:59:28 UTC

[01/27] drill git commit: Bump maxsize of jdbc-all jar to accommodate the increased size of jar file due to new code.

Repository: drill
Updated Branches:
  refs/heads/master 3c3b08c5a -> 7ebb985ed


Bump maxsize of jdbc-all jar to accommodate the increased size of jar file due to new code.


Project: http://git-wip-us.apache.org/repos/asf/drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/drill/commit/e2b52713
Tree: http://git-wip-us.apache.org/repos/asf/drill/tree/e2b52713
Diff: http://git-wip-us.apache.org/repos/asf/drill/diff/e2b52713

Branch: refs/heads/master
Commit: e2b52713d7191a9b8ce37cfafc4ece8bcc1169ca
Parents: 3c3b08c
Author: Jinfeng Ni <jn...@apache.org>
Authored: Wed Mar 1 18:01:13 2017 -0800
Committer: Jinfeng Ni <jn...@apache.org>
Committed: Wed Mar 1 18:07:44 2017 -0800

----------------------------------------------------------------------
 exec/jdbc-all/pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/drill/blob/e2b52713/exec/jdbc-all/pom.xml
----------------------------------------------------------------------
diff --git a/exec/jdbc-all/pom.xml b/exec/jdbc-all/pom.xml
index 4610a92..e69c1f4 100644
--- a/exec/jdbc-all/pom.xml
+++ b/exec/jdbc-all/pom.xml
@@ -453,7 +453,7 @@
                   This is likely due to you adding new dependencies to a java-exec and not updating the excludes in this module. This is important as it minimizes the size of the dependency of Drill application users.
 
                   </message>
-                  <maxsize>21000000</maxsize>
+                  <maxsize>22000000</maxsize>
                   <minsize>15000000</minsize>
                   <files>
                    <file>${project.build.directory}/drill-jdbc-all-${project.version}.jar</file>


[08/27] drill git commit: DRILL-5301: Add C++ client support for Server metadata API

Posted by jn...@apache.org.
http://git-wip-us.apache.org/repos/asf/drill/blob/d3238b1b/contrib/native/client/src/clientlib/metadata.hpp
----------------------------------------------------------------------
diff --git a/contrib/native/client/src/clientlib/metadata.hpp b/contrib/native/client/src/clientlib/metadata.hpp
index 0cc8987..5edb16f 100644
--- a/contrib/native/client/src/clientlib/metadata.hpp
+++ b/contrib/native/client/src/clientlib/metadata.hpp
@@ -21,6 +21,7 @@
 #define DRILL_METADATA_H
 
 #include <boost/ref.hpp>
+#include <boost/unordered_set.hpp>
 
 #include "drill/common.hpp"
 #include "drill/drillClient.hpp"
@@ -159,6 +160,25 @@ namespace meta {
 		boost::reference_wrapper<const ::exec::user::ColumnMetadata> m_pMetadata;
 	};
 
+	struct ConvertSupportHasher {
+		std::size_t operator()(const exec::user::ConvertSupport& key) const {
+			std::size_t hash = 0;
+
+			boost::hash_combine(hash, key.from());
+			boost::hash_combine(hash, key.to());
+
+			return hash;
+		}
+	};
+
+	struct ConvertSupportEqualTo {
+		bool operator()(exec::user::ConvertSupport const& cs1, exec::user::ConvertSupport const& cs2) const {
+			return cs1.from() == cs2.from() && cs1.to() == cs2.to();
+		}
+	};
+
+	typedef boost::unordered_set<exec::user::ConvertSupport, ConvertSupportHasher, ConvertSupportEqualTo> convert_support_set;
+
     class DrillMetadata: public Metadata {
     public:
         static const std::string s_connectorName; 
@@ -167,21 +187,10 @@ namespace meta {
         static const std::string s_serverName;
         static const std::string s_serverVersion;
 
-        static const std::string s_catalogSeparator;
-        static const std::string s_catalogTerm;
-
-        static const std::string s_identifierQuoteString;
-        static const std::vector<std::string> s_sqlKeywords;
-        static const std::vector<std::string> s_numericFunctions;
-        static const std::string s_schemaTerm;
-        static const std::string s_searchEscapeString;
-        static const std::string s_specialCharacters;
-        static const std::vector<std::string> s_stringFunctions;
-        static const std::vector<std::string> s_systemFunctions;
-        static const std::string s_tableTerm;
-        static const std::vector<std::string> s_dateTimeFunctions;
-
-        DrillMetadata(DrillClientImpl& client): Metadata(), m_client(client) {}
+        // Default server meta, to be used as fallback if cannot be queried
+        static const exec::user::ServerMeta s_defaultServerMeta;
+
+        DrillMetadata(DrillClientImpl& client, const exec::user::ServerMeta&  serverMeta);
         ~DrillMetadata() {}
 
         DrillClientImpl& client() { return m_client; }
@@ -203,85 +212,111 @@ namespace meta {
         status_t getTables(const std::string& catalogPattern, const std::string& schemaPattern, const std::string& tablePattern, const std::vector<std::string>* tableTypes, Metadata::pfnTableMetadataListener listener, void* listenerCtx, QueryHandle_t* qHandle);
         status_t getColumns(const std::string& catalogPattern, const std::string& schemaPattern, const std:: string& tablePattern, const std::string& columnPattern, Metadata::pfnColumnMetadataListener listener, void* listenerCtx, QueryHandle_t* qHandle);
 
-        bool areAllTableSelectable() const { return false; }
-        bool isCatalogAtStart() const { return true; }
-        const std::string& getCatalogSeparator() const { return s_catalogSeparator; }
-        const std::string& getCatalogTerm() const { return s_catalogTerm; }
-        bool isColumnAliasingSupported() const { return true; }
-        bool isNullPlusNonNullNull() const { return true; }
+        bool areAllTableSelectable() const { return m_allTablesSelectable; }
+        bool isCatalogAtStart() const { return m_catalogAtStart; }
+        const std::string& getCatalogSeparator() const { return m_catalogSeparator; }
+        const std::string& getCatalogTerm() const { return m_catalogTerm; }
+        bool isColumnAliasingSupported() const { return m_columnAliasingSupported; }
+        bool isNullPlusNonNullNull() const { return m_nullPlusNonNullEqualsNull; }
         bool isConvertSupported(common::MinorType from, common::MinorType to) const;
-        meta::CorrelationNamesSupport getCorrelationNames() const { return meta::CN_ANY_NAMES; }
-        bool isReadOnly() const { return false; }
-        meta::DateTimeLiteralSupport getDateTimeLiteralsSupport() const {
-            return DL_DATE
-                | DL_TIME
-                | DL_TIMESTAMP
-                | DL_INTERVAL_YEAR
-                | DL_INTERVAL_MONTH
-                | DL_INTERVAL_DAY
-                | DL_INTERVAL_HOUR
-                | DL_INTERVAL_MINUTE
-                | DL_INTERVAL_SECOND
-                | DL_INTERVAL_YEAR_TO_MONTH
-                | DL_INTERVAL_DAY_TO_HOUR
-                | DL_INTERVAL_DAY_TO_MINUTE
-                | DL_INTERVAL_DAY_TO_SECOND
-                | DL_INTERVAL_HOUR_TO_MINUTE
-                | DL_INTERVAL_HOUR_TO_SECOND
-                | DL_INTERVAL_MINUTE_TO_SECOND;
-        }
-
-        meta::CollateSupport getCollateSupport() const { return meta::C_NONE; }// supported?
-        meta::GroupBySupport getGroupBySupport() const { return meta::GB_UNRELATED; }
-        meta::IdentifierCase getIdentifierCase() const { return meta::IC_STORES_UPPER; } // to check?
-
-        const std::string& getIdentifierQuoteString() const { return s_identifierQuoteString; }
-        const std::vector<std::string>& getSQLKeywords() const { return s_sqlKeywords; }
-        bool isLikeEscapeClauseSupported() const { return true; }
-        std::size_t getMaxBinaryLiteralLength() const { return 0; }
-        std::size_t getMaxCatalogNameLength() const { return 0; }
-        std::size_t getMaxCharLiteralLength() const { return 0; }
-        std::size_t getMaxColumnNameLength() const { return 0; }
-        std::size_t getMaxColumnsInGroupBy() const { return 0; }
-        std::size_t getMaxColumnsInOrderBy() const { return 0; }
-        std::size_t getMaxColumnsInSelect() const { return 0; }
-        std::size_t getMaxCursorNameLength() const { return 0; }
-        std::size_t getMaxLogicalLobSize() const { return 0; }
-        std::size_t getMaxStatements() const { return 0; }
-        std::size_t getMaxRowSize() const { return 0; }
-        bool isBlobIncludedInMaxRowSize() const { return true; }
-        std::size_t getMaxSchemaNameLength() const { return 0; }
-        std::size_t getMaxStatementLength() const { return 0; }
-        std::size_t getMaxTableNameLength() const { return 0; }
-        std::size_t getMaxTablesInSelect() const { return 0; }
-        std::size_t getMaxUserNameLength() const { return 0; }
-        meta::NullCollation getNullCollation() const { return meta::NC_AT_END; }
-        const std::vector<std::string>& getNumericFunctions() const { return s_numericFunctions; }
-        meta::OuterJoinSupport getOuterJoinSupport() const { return meta::OJ_LEFT 
-            | meta::OJ_RIGHT 
-            | meta::OJ_FULL;
-        }
-        bool isUnrelatedColumnsInOrderBySupported() const { return true; }
-        meta::QuotedIdentifierCase getQuotedIdentifierCase() const { return meta::QIC_SUPPORTS_MIXED; }
-        const std::string& getSchemaTerm() const { return s_schemaTerm; }
-        const std::string& getSearchEscapeString() const { return s_searchEscapeString; }
-        const std::string& getSpecialCharacters() const { return s_specialCharacters; }
-        const std::vector<std::string>& getStringFunctions() const { return s_stringFunctions; }
-        meta::SubQuerySupport getSubQuerySupport() const { return SQ_CORRELATED
-                | SQ_IN_COMPARISON
-                | SQ_IN_EXISTS
-                | SQ_IN_QUANTIFIED;
-        }
-        const std::vector<std::string>& getSystemFunctions() const { return s_systemFunctions; }
-        const std::string& getTableTerm() const { return s_tableTerm; }
-        const std::vector<std::string>& getDateTimeFunctions() const { return s_dateTimeFunctions; }
-        bool isTransactionSupported() const { return false; }
-        meta::UnionSupport getUnionSupport() const { return meta::U_UNION | meta::U_UNION_ALL; }
-        bool isSelectForUpdateSupported() const { return false; }
+        meta::CorrelationNamesSupport getCorrelationNames() const { return m_correlationNamesSupport; }
+        bool isReadOnly() const { return m_readOnly; }
+        meta::DateTimeLiteralSupport getDateTimeLiteralsSupport() const { return m_dateTimeLiteralsSupport; }
+
+        meta::CollateSupport getCollateSupport() const { return m_collateSupport; }
+        meta::GroupBySupport getGroupBySupport() const { return m_groupBySupport; }
+        meta::IdentifierCase getIdentifierCase() const { return m_identifierCase; }
+
+        const std::string& getIdentifierQuoteString() const { return m_identifierQuoteString; }
+        const std::vector<std::string>& getSQLKeywords() const { return m_sqlKeywords; }
+        bool isLikeEscapeClauseSupported() const { return m_likeEscapeClauseSupported; }
+        std::size_t getMaxBinaryLiteralLength() const { return m_maxBinaryLiteralLength; }
+        std::size_t getMaxCatalogNameLength() const { return m_maxCatalogNameLength; }
+        std::size_t getMaxCharLiteralLength() const { return m_maxCharLIteralLength; }
+        std::size_t getMaxColumnNameLength() const { return m_maxColumnNameLength; }
+        std::size_t getMaxColumnsInGroupBy() const { return m_maxColumnsInGroupBy; }
+        std::size_t getMaxColumnsInOrderBy() const { return m_maxColumnsInOrderBy; }
+        std::size_t getMaxColumnsInSelect() const { return m_maxColumnsInSelect; }
+        std::size_t getMaxCursorNameLength() const { return m_maxCursorNameLength; }
+        std::size_t getMaxLogicalLobSize() const { return m_maxLogicalLobSize; }
+        std::size_t getMaxStatements() const { return m_maxStatements; }
+        std::size_t getMaxRowSize() const { return m_maxRowSize; }
+        bool isBlobIncludedInMaxRowSize() const { return m_blobIncludedInMaxRowSize; }
+        std::size_t getMaxSchemaNameLength() const { return m_maxSchemaNameLength; }
+        std::size_t getMaxStatementLength() const { return m_maxStatementLength; }
+        std::size_t getMaxTableNameLength() const { return m_maxTableNameLength; }
+        std::size_t getMaxTablesInSelect() const { return m_maxTablesInSelectLength; }
+        std::size_t getMaxUserNameLength() const { return m_maxUserNameLength; }
+        meta::NullCollation getNullCollation() const { return m_nullCollation; }
+        const std::vector<std::string>& getNumericFunctions() const { return m_numericFunctions; }
+        meta::OuterJoinSupport getOuterJoinSupport() const { return m_outerJoinSupport; }
+        bool isUnrelatedColumnsInOrderBySupported() const { return m_unrelatedColumnsInOrderBySupported; }
+        meta::QuotedIdentifierCase getQuotedIdentifierCase() const { return m_quotedIdentifierCase; }
+        const std::string& getSchemaTerm() const { return m_schemaTerm; }
+        const std::string& getSearchEscapeString() const { return m_searchEscapeString; }
+        const std::string& getSpecialCharacters() const { return m_specialCharacters; }
+        const std::vector<std::string>& getStringFunctions() const { return m_stringFunctions; }
+        meta::SubQuerySupport getSubQuerySupport() const { return m_subQuerySupport; }
+        const std::vector<std::string>& getSystemFunctions() const { return m_systemFunctions; }
+        const std::string& getTableTerm() const { return m_tableTerm; }
+        const std::vector<std::string>& getDateTimeFunctions() const { return m_dateTimeFunctions; }
+        bool isTransactionSupported() const { return m_transactionSupported; }
+        meta::UnionSupport getUnionSupport() const { return m_unionSupport; }
+        bool isSelectForUpdateSupported() const { return m_selectForUpdateSupported; }
 
     private:
         DrillClientImpl& m_client;
-    };
+
+		bool m_allTablesSelectable;
+		bool m_blobIncludedInMaxRowSize;
+		bool m_catalogAtStart;
+        std::string m_catalogSeparator;
+        std::string m_catalogTerm;
+		Drill::meta::CollateSupport m_collateSupport;
+		bool m_columnAliasingSupported;
+		Drill::meta::CorrelationNamesSupport m_correlationNamesSupport;
+		convert_support_set m_convertSupport;
+        std::vector<std::string> m_dateTimeFunctions;
+		Drill::meta::DateTimeLiteralSupport m_dateTimeLiteralsSupport;
+		Drill::meta::GroupBySupport m_groupBySupport;
+		Drill::meta::IdentifierCase m_identifierCase;
+        std::string m_identifierQuoteString;
+		bool m_likeEscapeClauseSupported;
+		std::size_t m_maxBinaryLiteralLength;
+		std::size_t m_maxCatalogNameLength;
+		std::size_t m_maxCharLIteralLength;
+		std::size_t m_maxColumnNameLength;
+		std::size_t m_maxColumnsInGroupBy;
+		std::size_t m_maxColumnsInOrderBy;
+		std::size_t m_maxColumnsInSelect;
+		std::size_t m_maxCursorNameLength;
+		std::size_t m_maxLogicalLobSize;
+		std::size_t m_maxRowSize;
+		std::size_t m_maxSchemaNameLength;
+		std::size_t m_maxStatementLength;
+		std::size_t m_maxStatements;
+		std::size_t m_maxTableNameLength;
+		std::size_t m_maxTablesInSelectLength;
+		std::size_t m_maxUserNameLength;
+		Drill::meta::NullCollation m_nullCollation;
+		bool m_nullPlusNonNullEqualsNull;
+        std::vector<std::string> m_numericFunctions;
+		Drill::meta::OuterJoinSupport m_outerJoinSupport;
+		Drill::meta::QuotedIdentifierCase m_quotedIdentifierCase;
+		bool m_readOnly;
+        std::string m_schemaTerm;
+        std::string m_searchEscapeString;
+		bool m_selectForUpdateSupported;
+        std::string m_specialCharacters;
+        std::vector<std::string> m_sqlKeywords;
+        std::vector<std::string> m_stringFunctions;
+		Drill::meta::SubQuerySupport m_subQuerySupport;
+        std::vector<std::string> m_systemFunctions;
+        std::string m_tableTerm;
+		bool m_transactionSupported;
+		Drill::meta::UnionSupport m_unionSupport;
+		bool m_unrelatedColumnsInOrderBySupported;
+};
 } // namespace meta
 } // namespace Drill
 

http://git-wip-us.apache.org/repos/asf/drill/blob/d3238b1b/contrib/native/client/src/include/drill/drillClient.hpp
----------------------------------------------------------------------
diff --git a/contrib/native/client/src/include/drill/drillClient.hpp b/contrib/native/client/src/include/drill/drillClient.hpp
index 01c9f67..1eb97cd 100644
--- a/contrib/native/client/src/include/drill/drillClient.hpp
+++ b/contrib/native/client/src/include/drill/drillClient.hpp
@@ -439,24 +439,26 @@ namespace meta {
    * Identified case support
    */
   enum IdentifierCase {
-      IC_STORES_LOWER,  /**< Mixed case unquoted SQL identifier are treated as
-	  	  	  	  	  	    case insensitive and stored in lower case */
-      IC_STORES_MIXED,  /**< Mixed case unquoted SQL identifier are treated as
-	  	  	  	  	  	    case insensitive and stored in mixed case */
-      IC_STORES_UPPER,  /**< Mixed case unquoted SQL identifier are treated as
-	  	  	  	  	  	    case insensitive and stored in upper case */
-      IC_SUPPORTS_MIXED /**< Mixed case unquoted SQL identifier are treated as
-	  	  	  	  	  	    case sensitive and stored in mixed case */
+	  IC_UNKNOWN      = -1, /**< Unknown support */
+      IC_STORES_LOWER = 0,  /**< Mixed case unquoted SQL identifier are treated as
+	  	  	  	  	  	         case insensitive and stored in lower case */
+      IC_STORES_MIXED = 1,  /**< Mixed case unquoted SQL identifier are treated as
+	  	  	  	  	  	    	 case insensitive and stored in mixed case */
+      IC_STORES_UPPER = 2,  /**< Mixed case unquoted SQL identifier are treated as
+	  	  	  	  	  	    	 case insensitive and stored in upper case */
+      IC_SUPPORTS_MIXED =3  /**< Mixed case unquoted SQL identifier are treated as
+	  	  	  	  	  	     	 case sensitive and stored in mixed case */
   };
 
   /**
    * Null collation support
    */
   enum NullCollation {
-      NC_AT_START,/**< NULL values are sorted at the start regardless of the order*/
-      NC_AT_END,  /**< NULL values are sorted at the end regardless of the order*/
-      NC_HIGH,    /**< NULL is the highest value */
-      NC_LOW      /**< NULL is the lowest value */
+	  NC_UNKNOWN = -1,  /**< Unknown support */
+      NC_AT_START = 0,	/**< NULL values are sorted at the start regardless of the order*/
+      NC_AT_END   = 1,  /**< NULL values are sorted at the end regardless of the order*/
+      NC_HIGH     = 2,  /**< NULL is the highest value */
+      NC_LOW      = 3	/**< NULL is the lowest value */
   };
 
 
@@ -516,14 +518,15 @@ namespace meta {
    * Quoted Identified case support
    */
   enum QuotedIdentifierCase {
-      QIC_STORES_LOWER,  /**< Mixed case quoted SQL identifier are treated as
-	  	  	  	  	  	    case insensitive and stored in lower case */
-      QIC_STORES_MIXED,  /**< Mixed case quoted SQL identifier are treated as
-	  	  	  	  	  	    case insensitive and stored in mixed case */
-      QIC_STORES_UPPER,  /**< Mixed case quoted SQL identifier are treated as
-	  	  	  	  	  	    case insensitive and stored in upper case */
-      QIC_SUPPORTS_MIXED /**< Mixed case quoted SQL identifier are treated as
-	  	  	  	  	  	    case sensitive and stored in mixed case */
+	  QIC_UNKNOWN = -1,		 /**< Unknown support */
+      QIC_STORES_LOWER = 0,	 /**< Mixed case quoted SQL identifier are treated as
+	  	  	  	  	  	         case insensitive and stored in lower case */
+      QIC_STORES_MIXED = 1,  /**< Mixed case quoted SQL identifier are treated as
+	  	  	  	  	  	          case insensitive and stored in mixed case */
+      QIC_STORES_UPPER = 2,  /**< Mixed case quoted SQL identifier are treated as
+	  	  	  	  	  	          case insensitive and stored in upper case */
+      QIC_SUPPORTS_MIXED =3  /**< Mixed case quoted SQL identifier are treated as
+	  	  	  	  	  	          case sensitive and stored in mixed case */
   };
 
   /*


[16/27] drill git commit: DRILL-5258: Access mock data definition from SQL

Posted by jn...@apache.org.
DRILL-5258: Access mock data definition from SQL

Extends the mock data source to allow using the full power of the mock
data source from an SQL query by referencing the JSON definition
file. See JIRA and package-info for details.

Adds a boolean data generator and a varying-length string generator.

Adds \u201cmock\u201d table stats for use in the planner.

Revisions based on code review comments

close #752


Project: http://git-wip-us.apache.org/repos/asf/drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/drill/commit/974c6134
Tree: http://git-wip-us.apache.org/repos/asf/drill/tree/974c6134
Diff: http://git-wip-us.apache.org/repos/asf/drill/diff/974c6134

Branch: refs/heads/master
Commit: 974c613402604b86f9d36568b4b62c22a7a291d9
Parents: 20a374c
Author: Paul Rogers <pr...@maprtech.com>
Authored: Tue Feb 14 10:02:13 2017 -0800
Committer: Jinfeng Ni <jn...@apache.org>
Committed: Wed Mar 1 23:15:33 2017 -0800

----------------------------------------------------------------------
 .../drill/exec/physical/impl/ScanBatch.java     |   6 +-
 .../apache/drill/exec/record/SchemaUtil.java    |  23 +-
 .../apache/drill/exec/record/TypedFieldId.java  |  13 ++
 .../drill/exec/record/VectorContainer.java      |   9 +-
 .../drill/exec/store/AbstractStoragePlugin.java |  14 +-
 .../exec/store/dfs/easy/EasyGroupScan.java      |   3 +-
 .../drill/exec/store/mock/BooleanGen.java       |  42 ++++
 .../apache/drill/exec/store/mock/ColumnDef.java |  19 +-
 .../apache/drill/exec/store/mock/DateGen.java   |   2 +-
 .../store/mock/ExtendedMockRecordReader.java    |  24 +-
 .../drill/exec/store/mock/MockGroupScanPOP.java | 232 +++++--------------
 .../drill/exec/store/mock/MockRecordReader.java |   4 +-
 .../exec/store/mock/MockScanBatchCreator.java   |   7 +-
 .../exec/store/mock/MockStorageEngine.java      |  92 ++++++--
 .../drill/exec/store/mock/MockSubScanPOP.java   |  11 +-
 .../drill/exec/store/mock/MockTableDef.java     | 213 +++++++++++++++++
 .../drill/exec/store/mock/VaryingStringGen.java |  70 ++++++
 .../drill/exec/store/mock/package-info.java     |  41 +++-
 .../apache/drill/exec/util/TestUtilities.java   |   6 +-
 .../fn/interp/ExpressionInterpreterTest.java    |  15 +-
 .../physical/impl/TestConvertFunctions.java     |   5 +-
 .../impl/mergereceiver/TestMergingReceiver.java |   4 +
 .../src/test/resources/test/example-mock.json   |  16 ++
 23 files changed, 597 insertions(+), 274 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/drill/blob/974c6134/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/ScanBatch.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/ScanBatch.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/ScanBatch.java
index ad82668..e20c394 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/ScanBatch.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/ScanBatch.java
@@ -230,7 +230,7 @@ public class ScanBatch implements CloseableRecordBatch {
       hasReadNonEmptyFile = true;
       populateImplicitVectors();
 
-      for (VectorWrapper w : container) {
+      for (VectorWrapper<?> w : container) {
         w.getValueVector().getMutator().setValueCount(recordCount);
       }
 
@@ -270,6 +270,7 @@ public class ScanBatch implements CloseableRecordBatch {
       if (implicitValues != null) {
         for (String column : implicitValues.keySet()) {
           final MaterializedField field = MaterializedField.create(column, Types.optional(MinorType.VARCHAR));
+          @SuppressWarnings("resource")
           final ValueVector v = mutator.addField(field, NullableVarCharVector.class);
           implicitVectors.put(column, v);
         }
@@ -282,6 +283,7 @@ public class ScanBatch implements CloseableRecordBatch {
   private void populateImplicitVectors() {
     if (implicitValues != null) {
       for (Map.Entry<String, String> entry : implicitValues.entrySet()) {
+        @SuppressWarnings("resource")
         final NullableVarCharVector v = (NullableVarCharVector) implicitVectors.get(entry.getKey());
         String val;
         if ((val = entry.getValue()) != null) {
@@ -325,7 +327,7 @@ public class ScanBatch implements CloseableRecordBatch {
     private boolean schemaChanged = true;
 
 
-    @SuppressWarnings("unchecked")
+    @SuppressWarnings("resource")
     @Override
     public <T extends ValueVector> T addField(MaterializedField field,
                                               Class<T> clazz) throws SchemaChangeException {

http://git-wip-us.apache.org/repos/asf/drill/blob/974c6134/exec/java-exec/src/main/java/org/apache/drill/exec/record/SchemaUtil.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/record/SchemaUtil.java b/exec/java-exec/src/main/java/org/apache/drill/exec/record/SchemaUtil.java
index d6a8a40..2fc9314 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/record/SchemaUtil.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/record/SchemaUtil.java
@@ -17,26 +17,24 @@
  */
 package org.apache.drill.exec.record;
 
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
-import com.google.common.collect.Sets;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
 import org.apache.drill.common.expression.SchemaPath;
 import org.apache.drill.common.types.TypeProtos.DataMode;
 import org.apache.drill.common.types.TypeProtos.MajorType;
 import org.apache.drill.common.types.TypeProtos.MinorType;
 import org.apache.drill.common.types.Types;
 import org.apache.drill.exec.expr.TypeHelper;
-import org.apache.drill.exec.memory.BufferAllocator;
 import org.apache.drill.exec.ops.OperatorContext;
-import org.apache.drill.exec.physical.impl.sort.RecordBatchData;
-import org.apache.drill.exec.record.BatchSchema.SelectionVectorMode;
 import org.apache.drill.exec.vector.ValueVector;
 import org.apache.drill.exec.vector.complex.UnionVector;
 
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import com.google.common.collect.Sets;
 
 /**
  * Utility class for dealing with changing schemas
@@ -96,6 +94,7 @@ public class SchemaUtil {
     return s;
   }
 
+  @SuppressWarnings("resource")
   private static  ValueVector coerceVector(ValueVector v, VectorContainer c, MaterializedField field,
                                            int recordCount, OperatorContext context) {
     if (v != null) {
@@ -154,13 +153,14 @@ public class SchemaUtil {
     int recordCount = in.getRecordCount();
     boolean isHyper = false;
     Map<String, Object> vectorMap = Maps.newHashMap();
-    for (VectorWrapper w : in) {
+    for (VectorWrapper<?> w : in) {
       if (w.isHyper()) {
         isHyper = true;
         final ValueVector[] vvs = w.getValueVectors();
         vectorMap.put(vvs[0].getField().getPath(), vvs);
       } else {
         assert !isHyper;
+        @SuppressWarnings("resource")
         final ValueVector v = w.getValueVector();
         vectorMap.put(v.getField().getPath(), v);
       }
@@ -183,6 +183,7 @@ public class SchemaUtil {
         }
         c.add(vvsOut);
       } else {
+        @SuppressWarnings("resource")
         final ValueVector v = (ValueVector) vectorMap.remove(field.getPath());
         c.add(coerceVector(v, c, field, recordCount, context));
       }

http://git-wip-us.apache.org/repos/asf/drill/blob/974c6134/exec/java-exec/src/main/java/org/apache/drill/exec/record/TypedFieldId.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/record/TypedFieldId.java b/exec/java-exec/src/main/java/org/apache/drill/exec/record/TypedFieldId.java
index a322f72..615c7a2 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/record/TypedFieldId.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/record/TypedFieldId.java
@@ -28,6 +28,12 @@ import org.apache.drill.exec.vector.ValueVector;
 import com.carrotsearch.hppc.IntArrayList;
 import com.google.common.base.Preconditions;
 
+/**
+ * Declares a value vector field, providing metadata about the field.
+ * Drives code generation by providing type and other structural
+ * information that determine code structure.
+ */
+
 public class TypedFieldId {
   final MajorType finalType;
   final MajorType secondaryFinal;
@@ -104,6 +110,13 @@ public class TypedFieldId {
     return intermediateType;
   }
 
+  /**
+   * Return the class for the value vector (type, mode).
+   *
+   * @return the specific, generated ValueVector subclass that
+   * stores values of the given (type, mode) combination
+   */
+
   public Class<? extends ValueVector> getIntermediateClass() {
     return (Class<? extends ValueVector>) BasicTypeHelper.getValueVectorClass(intermediateType.getMinorType(),
         intermediateType.getMode());

http://git-wip-us.apache.org/repos/asf/drill/blob/974c6134/exec/java-exec/src/main/java/org/apache/drill/exec/record/VectorContainer.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/record/VectorContainer.java b/exec/java-exec/src/main/java/org/apache/drill/exec/record/VectorContainer.java
index 96d9ba6..ceedb84 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/record/VectorContainer.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/record/VectorContainer.java
@@ -28,7 +28,6 @@ import java.util.Set;
 import org.apache.drill.common.expression.SchemaPath;
 import org.apache.drill.common.types.TypeProtos.MajorType;
 import org.apache.drill.exec.expr.TypeHelper;
-import org.apache.drill.exec.memory.BufferAllocator;
 import org.apache.drill.exec.ops.OperatorContext;
 import org.apache.drill.exec.record.BatchSchema.SelectionVectorMode;
 import org.apache.drill.exec.record.selection.SelectionVector2;
@@ -117,6 +116,7 @@ public class VectorContainer implements Iterable<VectorWrapper<?>>, VectorAccess
     return addOrGet(field, null);
   }
 
+  @SuppressWarnings({ "resource", "unchecked" })
   public <T extends ValueVector> T addOrGet(final MaterializedField field, final SchemaChangeCallBack callBack) {
     final TypedFieldId id = getValueVectorId(SchemaPath.getSimplePath(field.getPath()));
     final ValueVector vector;
@@ -159,10 +159,10 @@ public class VectorContainer implements Iterable<VectorWrapper<?>>, VectorAccess
     return vc;
   }
 
-  public static VectorContainer getTransferClone(VectorAccessible incoming, VectorWrapper[] ignoreWrappers, OperatorContext oContext) {
+  public static VectorContainer getTransferClone(VectorAccessible incoming, VectorWrapper<?>[] ignoreWrappers, OperatorContext oContext) {
     Iterable<VectorWrapper<?>> wrappers = incoming;
     if (ignoreWrappers != null) {
-      final List<VectorWrapper> ignored = Lists.newArrayList(ignoreWrappers);
+      final List<VectorWrapper<?>> ignored = Lists.newArrayList(ignoreWrappers);
       final Set<VectorWrapper<?>> resultant = Sets.newLinkedHashSet(incoming);
       resultant.removeAll(ignored);
       wrappers = resultant;
@@ -184,6 +184,7 @@ public class VectorContainer implements Iterable<VectorWrapper<?>>, VectorAccess
     List<VectorWrapper<?>> canonicalWrappers = new ArrayList<VectorWrapper<?>>(original.wrappers);
     // Sort list of VectorWrapper alphabetically based on SchemaPath.
     Collections.sort(canonicalWrappers, new Comparator<VectorWrapper<?>>() {
+      @Override
       public int compare(VectorWrapper<?> v1, VectorWrapper<?> v2) {
         return v1.getField().getPath().compareTo(v2.getField().getPath());
       }
@@ -265,6 +266,7 @@ public class VectorContainer implements Iterable<VectorWrapper<?>>, VectorAccess
     throw new IllegalStateException("You attempted to remove a vector that didn't exist.");
   }
 
+  @Override
   public TypedFieldId getValueVectorId(SchemaPath path) {
     for (int i = 0; i < wrappers.size(); i++) {
       VectorWrapper<?> va = wrappers.get(i);
@@ -310,6 +312,7 @@ public class VectorContainer implements Iterable<VectorWrapper<?>>, VectorAccess
     return schema != null;
   }
 
+  @Override
   public BatchSchema getSchema() {
     Preconditions
         .checkNotNull(schema,

http://git-wip-us.apache.org/repos/asf/drill/blob/974c6134/exec/java-exec/src/main/java/org/apache/drill/exec/store/AbstractStoragePlugin.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/AbstractStoragePlugin.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/AbstractStoragePlugin.java
index fa2c450..1bd56ae 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/AbstractStoragePlugin.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/AbstractStoragePlugin.java
@@ -33,11 +33,9 @@ import com.google.common.collect.ImmutableSet;
 /** Abstract class for StorePlugin implementations.
  * See StoragePlugin for description of the interface intent and its methods.
  */
-public abstract class AbstractStoragePlugin implements StoragePlugin{
-  static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(AbstractStoragePlugin.class);
+public abstract class AbstractStoragePlugin implements StoragePlugin {
 
-  protected AbstractStoragePlugin(){
-  }
+  protected AbstractStoragePlugin() { }
 
   @Override
   public boolean supportsRead() {
@@ -95,7 +93,6 @@ public abstract class AbstractStoragePlugin implements StoragePlugin{
     default:
       return ImmutableSet.of();
     }
-
   }
 
   @Override
@@ -109,11 +106,8 @@ public abstract class AbstractStoragePlugin implements StoragePlugin{
   }
 
   @Override
-  public void start() throws IOException {
-  }
+  public void start() throws IOException { }
 
   @Override
-  public void close() throws Exception {
-  }
-
+  public void close() throws Exception { }
 }

http://git-wip-us.apache.org/repos/asf/drill/blob/974c6134/exec/java-exec/src/main/java/org/apache/drill/exec/store/dfs/easy/EasyGroupScan.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/dfs/easy/EasyGroupScan.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/dfs/easy/EasyGroupScan.java
index 7a80db3..d60b753 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/dfs/easy/EasyGroupScan.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/dfs/easy/EasyGroupScan.java
@@ -54,7 +54,7 @@ import com.google.common.collect.ListMultimap;
 import com.google.common.collect.Lists;
 
 @JsonTypeName("fs-scan")
-public class EasyGroupScan extends AbstractFileGroupScan{
+public class EasyGroupScan extends AbstractFileGroupScan {
   private static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(EasyGroupScan.class);
 
   private FileSelection selection;
@@ -127,6 +127,7 @@ public class EasyGroupScan extends AbstractFileGroupScan{
   }
 
   private void initFromSelection(FileSelection selection, EasyFormatPlugin<?> formatPlugin) throws IOException {
+    @SuppressWarnings("resource")
     final DrillFileSystem dfs = ImpersonationUtil.createFileSystem(getUserName(), formatPlugin.getFsConf());
     this.selection = selection;
     BlockMapBuilder b = new BlockMapBuilder(dfs, formatPlugin.getContext().getBits());

http://git-wip-us.apache.org/repos/asf/drill/blob/974c6134/exec/java-exec/src/main/java/org/apache/drill/exec/store/mock/BooleanGen.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/mock/BooleanGen.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/mock/BooleanGen.java
new file mode 100644
index 0000000..dd84f4d
--- /dev/null
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/mock/BooleanGen.java
@@ -0,0 +1,42 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.store.mock;
+
+import java.util.Random;
+
+import org.apache.drill.exec.vector.BitVector;
+import org.apache.drill.exec.vector.ValueVector;
+
+public class BooleanGen implements FieldGen {
+
+  private Random rand = new Random();
+
+  @Override
+  public void setup(ColumnDef colDef) { }
+
+  public int value() {
+    return rand.nextBoolean() ? 1 : 0;
+  }
+
+  @Override
+  public void setValue(ValueVector v, int index ) {
+    BitVector vector = (BitVector) v;
+    vector.getMutator().set(index, value());
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/drill/blob/974c6134/exec/java-exec/src/main/java/org/apache/drill/exec/store/mock/ColumnDef.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/mock/ColumnDef.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/mock/ColumnDef.java
index cfaacdd..2300990 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/mock/ColumnDef.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/mock/ColumnDef.java
@@ -19,7 +19,7 @@ package org.apache.drill.exec.store.mock;
 
 import org.apache.drill.common.types.TypeProtos.MinorType;
 import org.apache.drill.exec.expr.TypeHelper;
-import org.apache.drill.exec.store.mock.MockGroupScanPOP.MockColumn;
+import org.apache.drill.exec.store.mock.MockTableDef.MockColumn;
 
 /**
  * Defines a column for the "enhanced" version of the mock data
@@ -37,7 +37,12 @@ public class ColumnDef {
   public ColumnDef(MockColumn mockCol) {
     this.mockCol = mockCol;
     name = mockCol.getName();
-    width = TypeHelper.getSize(mockCol.getMajorType());
+    if (mockCol.getMinorType() == MinorType.VARCHAR &&
+        mockCol.getWidth() > 0) {
+      width = mockCol.getWidth();
+    } else {
+      width = TypeHelper.getSize(mockCol.getMajorType());
+    }
     makeGenerator();
   }
 
@@ -78,6 +83,7 @@ public class ColumnDef {
     case BIGINT:
       break;
     case BIT:
+      generator = new BooleanGen();
       break;
     case DATE:
       break;
@@ -168,11 +174,6 @@ public class ColumnDef {
     name += Integer.toString(rep);
   }
 
-  public MockColumn getConfig() {
-    return mockCol;
-  }
-
-  public String getName() {
-    return name;
-  }
+  public MockColumn getConfig() { return mockCol; }
+  public String getName() { return name; }
 }

http://git-wip-us.apache.org/repos/asf/drill/blob/974c6134/exec/java-exec/src/main/java/org/apache/drill/exec/store/mock/DateGen.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/mock/DateGen.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/mock/DateGen.java
index f7d53ed..100d427 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/mock/DateGen.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/mock/DateGen.java
@@ -25,7 +25,7 @@ import org.apache.drill.exec.vector.ValueVector;
 import org.apache.drill.exec.vector.VarCharVector;
 
 /**
- * Very simple date vaue generator that produces ISO dates
+ * Very simple date value generator that produces ISO dates
  * uniformly distributed over the last year. ISO format
  * is: 2016-12-07.
  * <p>

http://git-wip-us.apache.org/repos/asf/drill/blob/974c6134/exec/java-exec/src/main/java/org/apache/drill/exec/store/mock/ExtendedMockRecordReader.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/mock/ExtendedMockRecordReader.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/mock/ExtendedMockRecordReader.java
index f3804d4..ac9cb6a 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/mock/ExtendedMockRecordReader.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/mock/ExtendedMockRecordReader.java
@@ -31,10 +31,11 @@ import org.apache.drill.exec.expr.TypeHelper;
 import org.apache.drill.exec.ops.FragmentContext;
 import org.apache.drill.exec.ops.OperatorContext;
 import org.apache.drill.exec.physical.impl.OutputMutator;
+import org.apache.drill.exec.physical.impl.ScanBatch;
 import org.apache.drill.exec.record.MaterializedField;
 import org.apache.drill.exec.store.AbstractRecordReader;
-import org.apache.drill.exec.store.mock.MockGroupScanPOP.MockColumn;
-import org.apache.drill.exec.store.mock.MockGroupScanPOP.MockScanEntry;
+import org.apache.drill.exec.store.mock.MockTableDef.MockColumn;
+import org.apache.drill.exec.store.mock.MockTableDef.MockScanEntry;
 import org.apache.drill.exec.vector.AllocationHelper;
 import org.apache.drill.exec.vector.ValueVector;
 
@@ -55,11 +56,9 @@ public class ExtendedMockRecordReader extends AbstractRecordReader {
   private int recordsRead;
 
   private final MockScanEntry config;
-  private final FragmentContext context;
   private final ColumnDef fields[];
 
   public ExtendedMockRecordReader(FragmentContext context, MockScanEntry config) {
-    this.context = context;
     this.config = config;
 
     fields = buildColumnDefs();
@@ -76,7 +75,7 @@ public class ExtendedMockRecordReader extends AbstractRecordReader {
     Set<String> names = new HashSet<>();
     MockColumn cols[] = config.getTypes();
     for (int i = 0; i < cols.length; i++) {
-      MockColumn col = cols[i];
+      MockTableDef.MockColumn col = cols[i];
       if (names.contains(col.name)) {
         throw new IllegalArgumentException("Duplicate column name: " + col.name);
       }
@@ -95,10 +94,10 @@ public class ExtendedMockRecordReader extends AbstractRecordReader {
     return defArray;
   }
 
-  private int getEstimatedRecordSize(MockColumn[] types) {
+  private int getEstimatedRecordSize() {
     int size = 0;
     for (int i = 0; i < fields.length; i++) {
-      size += TypeHelper.getSize(fields[i].getConfig().getMajorType());
+      size += fields[i].width;
     }
     return size;
   }
@@ -106,9 +105,14 @@ public class ExtendedMockRecordReader extends AbstractRecordReader {
   @Override
   public void setup(OperatorContext context, OutputMutator output) throws ExecutionSetupException {
     try {
-      final int estimateRowSize = getEstimatedRecordSize(config.getTypes());
-      valueVectors = new ValueVector[config.getTypes().length];
-      batchRecordCount = 250000 / estimateRowSize;
+      final int estimateRowSize = getEstimatedRecordSize();
+      valueVectors = new ValueVector[fields.length];
+      int batchSize = config.getBatchSize();
+      if (batchSize == 0) {
+        batchSize = 10 * 1024 * 1024;
+      }
+      batchRecordCount = Math.max(1, batchSize / estimateRowSize);
+      batchRecordCount = Math.min(batchRecordCount, Character.MAX_VALUE);
 
       for (int i = 0; i < fields.length; i++) {
         final ColumnDef col = fields[i];

http://git-wip-us.apache.org/repos/asf/drill/blob/974c6134/exec/java-exec/src/main/java/org/apache/drill/exec/store/mock/MockGroupScanPOP.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/mock/MockGroupScanPOP.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/mock/MockGroupScanPOP.java
index 2e8af42..c8082a8 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/mock/MockGroupScanPOP.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/mock/MockGroupScanPOP.java
@@ -18,7 +18,6 @@
 package org.apache.drill.exec.store.mock;
 
 import java.util.ArrayList;
-import java.util.Arrays;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.regex.Matcher;
@@ -26,19 +25,21 @@ import java.util.regex.Pattern;
 
 import org.apache.drill.common.expression.SchemaPath;
 import org.apache.drill.common.types.TypeProtos.DataMode;
-import org.apache.drill.common.types.TypeProtos.MajorType;
 import org.apache.drill.common.types.TypeProtos.MinorType;
+import org.apache.drill.exec.expr.TypeHelper;
 import org.apache.drill.exec.physical.base.AbstractGroupScan;
 import org.apache.drill.exec.physical.base.GroupScan;
 import org.apache.drill.exec.physical.base.PhysicalOperator;
 import org.apache.drill.exec.physical.base.ScanStats;
+import org.apache.drill.exec.physical.base.ScanStats.GroupScanProperty;
 import org.apache.drill.exec.physical.base.SubScan;
+import org.apache.drill.exec.planner.cost.DrillCostBase;
 import org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint;
+import org.apache.drill.exec.store.mock.MockTableDef.MockColumn;
+import org.apache.drill.exec.store.mock.MockTableDef.MockScanEntry;
 
 import com.fasterxml.jackson.annotation.JsonCreator;
 import com.fasterxml.jackson.annotation.JsonIgnore;
-import com.fasterxml.jackson.annotation.JsonInclude;
-import com.fasterxml.jackson.annotation.JsonInclude.Include;
 import com.fasterxml.jackson.annotation.JsonProperty;
 import com.fasterxml.jackson.annotation.JsonTypeName;
 import com.google.common.base.Preconditions;
@@ -75,20 +76,66 @@ public class MockGroupScanPOP extends AbstractGroupScan {
    */
 
   private boolean extended;
+  private ScanStats scanStats = ScanStats.TRIVIAL_TABLE;
 
   @JsonCreator
   public MockGroupScanPOP(@JsonProperty("url") String url,
-      @JsonProperty("extended") Boolean extended,
       @JsonProperty("entries") List<MockScanEntry> readEntries) {
     super((String) null);
     this.readEntries = readEntries;
     this.url = url;
-    this.extended = extended == null ? false : extended;
+
+    // Compute decent row-count stats for this mock data source so that
+    // the planner is "fooled" into thinking that this operator will do
+    // disk I/O.
+
+    int rowCount = 0;
+    int rowWidth = 0;
+
+    // Can have multiple "read entries" which simulate blocks or
+    // row groups.
+
+    for (MockScanEntry entry : readEntries) {
+      rowCount += entry.getRecords();
+      int groupRowWidth = 0;
+      if (entry.getTypes() == null) {
+        // If no columns, assume a row width.
+        groupRowWidth = 50;
+      } else {
+        // The normal case: we do have columns. Use them
+        // to compute the row width.
+
+        for (MockColumn col : entry.getTypes()) {
+          int colWidth = 0;
+          if (col.getWidthValue() == 0) {
+            // Fixed width columns
+            colWidth = TypeHelper.getSize(col.getMajorType());
+          } else {
+            // Variable width columns with a specified column
+            // width
+            colWidth = col.getWidthValue();
+          }
+
+          // Columns can repeat
+          colWidth *= col.getRepeatCount();
+          groupRowWidth += colWidth;
+        }
+      }
+
+      // Overall row width is the greatest group row width.
+
+      rowWidth = Math.max(rowWidth, groupRowWidth);
+    }
+    int dataSize = rowCount * rowWidth;
+    scanStats = new ScanStats(GroupScanProperty.EXACT_ROW_COUNT,
+                               rowCount,
+                               DrillCostBase.BASE_CPU_COST * dataSize,
+                               DrillCostBase.BYTE_DISK_READ_COST * dataSize);
   }
 
   @Override
   public ScanStats getScanStats() {
-    return ScanStats.TRIVIAL_TABLE;
+    return scanStats;
   }
 
   public String getUrl() {
@@ -100,162 +147,6 @@ public class MockGroupScanPOP extends AbstractGroupScan {
     return readEntries;
   }
 
-  /**
-   * Describes one simulated file (or block) within the logical file scan
-   * described by this group scan. Each block can have a distinct schema to test
-   * for schema changes.
-   */
-
-  public static class MockScanEntry {
-
-    private final int records;
-    private final MockColumn[] types;
-
-    @JsonCreator
-    public MockScanEntry(@JsonProperty("records") int records,
-        @JsonProperty("types") MockColumn[] types) {
-      this.records = records;
-      this.types = types;
-    }
-
-    public int getRecords() {
-      return records;
-    }
-
-    public MockColumn[] getTypes() {
-      return types;
-    }
-
-    @Override
-    public String toString() {
-      return "MockScanEntry [records=" + records + ", columns="
-          + Arrays.toString(types) + "]";
-    }
-  }
-
-  /**
-   * Meta-data description of the columns we wish to create during a simulated
-   * scan.
-   */
-
-  @JsonInclude(Include.NON_NULL)
-  public static class MockColumn {
-
-    /**
-     * Column type given as a Drill minor type (that is, a type without the
-     * extra information such as cardinality, width, etc.
-     */
-
-    @JsonProperty("type")
-    public MinorType minorType;
-    public String name;
-    public DataMode mode;
-    public Integer width;
-    public Integer precision;
-    public Integer scale;
-
-    /**
-     * The scan can request to use a specific data generator class. The name of
-     * that class appears here. The name can be a simple class name, if that
-     * class resides in this Java package. Or, it can be a fully qualified name
-     * of a class that resides elsewhere. If null, the default generator for the
-     * data type is used.
-     */
-
-    public String generator;
-
-    /**
-     * Some tests want to create a very wide row with many columns. This field
-     * eases that task: specify a value other than 1 and the data source will
-     * generate that many copies of the column, each with separately generated
-     * random values. For example, to create 20 copies of field, "foo", set
-     * repeat to 20 and the actual generated batches will contain fields
-     * foo1, foo2, ... foo20.
-     */
-
-    public Integer repeat;
-
-    @JsonCreator
-    public MockColumn(@JsonProperty("name") String name,
-        @JsonProperty("type") MinorType minorType,
-        @JsonProperty("mode") DataMode mode,
-        @JsonProperty("width") Integer width,
-        @JsonProperty("precision") Integer precision,
-        @JsonProperty("scale") Integer scale,
-        @JsonProperty("generator") String generator,
-        @JsonProperty("repeat") Integer repeat) {
-      this.name = name;
-      this.minorType = minorType;
-      this.mode = mode;
-      this.width = width;
-      this.precision = precision;
-      this.scale = scale;
-      this.generator = generator;
-      this.repeat = repeat;
-    }
-
-    @JsonProperty("type")
-    public MinorType getMinorType() {
-      return minorType;
-    }
-
-    public String getName() {
-      return name;
-    }
-
-    public DataMode getMode() {
-      return mode;
-    }
-
-    public Integer getWidth() {
-      return width;
-    }
-
-    public Integer getPrecision() {
-      return precision;
-    }
-
-    public Integer getScale() {
-      return scale;
-    }
-
-    public String getGenerator() {
-      return generator;
-    }
-
-    public Integer getRepeat() {
-      return repeat;
-    }
-
-    @JsonIgnore
-    public int getRepeatCount() {
-      return repeat == null ? 1 : repeat;
-    }
-
-    @JsonIgnore
-    public MajorType getMajorType() {
-      MajorType.Builder b = MajorType.newBuilder();
-      b.setMode(mode);
-      b.setMinorType(minorType);
-      if (precision != null) {
-        b.setPrecision(precision);
-      }
-      if (width != null) {
-        b.setWidth(width);
-      }
-      if (scale != null) {
-        b.setScale(scale);
-      }
-      return b.build();
-    }
-
-    @Override
-    public String toString() {
-      return "MockColumn [minorType=" + minorType + ", name=" + name + ", mode="
-          + mode + "]";
-    }
-  }
-
   @SuppressWarnings("unchecked")
   @Override
   public void applyAssignments(List<DrillbitEndpoint> endpoints) {
@@ -295,7 +186,7 @@ public class MockGroupScanPOP extends AbstractGroupScan {
   @JsonIgnore
   public PhysicalOperator getNewWithChildren(List<PhysicalOperator> children) {
     Preconditions.checkArgument(children.isEmpty());
-    return new MockGroupScanPOP(url, extended, readEntries);
+    return new MockGroupScanPOP(url, readEntries);
   }
 
   @Override
@@ -304,7 +195,7 @@ public class MockGroupScanPOP extends AbstractGroupScan {
       throw new IllegalArgumentException("No columns for mock scan");
     }
     List<MockColumn> mockCols = new ArrayList<>();
-    Pattern p = Pattern.compile("(\\w+)_([isd])(\\d*)");
+    Pattern p = Pattern.compile("(\\w+)_([isdb])(\\d*)");
     for (SchemaPath path : columns) {
       String col = path.getLastSegment().getNameSegment().getPath();
       if (col.equals("*")) {
@@ -334,21 +225,24 @@ public class MockGroupScanPOP extends AbstractGroupScan {
       case "d":
         minorType = MinorType.FLOAT8;
         break;
+      case "b":
+        minorType = MinorType.BIT;
+        break;
       default:
         throw new IllegalArgumentException(
             "Unsupported field type " + type + " for mock column " + col);
       }
-      MockColumn mockCol = new MockColumn(col, minorType, DataMode.REQUIRED,
-          width, 0, 0, null, 1);
+      MockTableDef.MockColumn mockCol = new MockColumn(
+          col, minorType, DataMode.REQUIRED, width, 0, 0, null, 1, null);
       mockCols.add(mockCol);
     }
     MockScanEntry entry = readEntries.get(0);
     MockColumn types[] = new MockColumn[mockCols.size()];
     mockCols.toArray(types);
-    MockScanEntry newEntry = new MockScanEntry(entry.records, types);
+    MockScanEntry newEntry = new MockScanEntry(entry.records, true, 0, 1, types);
     List<MockScanEntry> newEntries = new ArrayList<>();
     newEntries.add(newEntry);
-    return new MockGroupScanPOP(url, true, newEntries);
+    return new MockGroupScanPOP(url, newEntries);
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/drill/blob/974c6134/exec/java-exec/src/main/java/org/apache/drill/exec/store/mock/MockRecordReader.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/mock/MockRecordReader.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/mock/MockRecordReader.java
index 6f8cb39..2d9973e 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/mock/MockRecordReader.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/mock/MockRecordReader.java
@@ -29,8 +29,8 @@ import org.apache.drill.exec.ops.OperatorContext;
 import org.apache.drill.exec.physical.impl.OutputMutator;
 import org.apache.drill.exec.record.MaterializedField;
 import org.apache.drill.exec.store.AbstractRecordReader;
-import org.apache.drill.exec.store.mock.MockGroupScanPOP.MockColumn;
-import org.apache.drill.exec.store.mock.MockGroupScanPOP.MockScanEntry;
+import org.apache.drill.exec.store.mock.MockTableDef.MockColumn;
+import org.apache.drill.exec.store.mock.MockTableDef.MockScanEntry;
 import org.apache.drill.exec.vector.AllocationHelper;
 import org.apache.drill.exec.vector.ValueVector;
 

http://git-wip-us.apache.org/repos/asf/drill/blob/974c6134/exec/java-exec/src/main/java/org/apache/drill/exec/store/mock/MockScanBatchCreator.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/mock/MockScanBatchCreator.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/mock/MockScanBatchCreator.java
index 9cdb7ad..9a7563a 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/mock/MockScanBatchCreator.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/mock/MockScanBatchCreator.java
@@ -25,7 +25,8 @@ import org.apache.drill.exec.physical.impl.BatchCreator;
 import org.apache.drill.exec.physical.impl.ScanBatch;
 import org.apache.drill.exec.record.RecordBatch;
 import org.apache.drill.exec.store.RecordReader;
-import org.apache.drill.exec.store.mock.MockGroupScanPOP.MockScanEntry;
+
+import org.apache.drill.exec.store.mock.MockTableDef.MockScanEntry;
 
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
@@ -39,8 +40,8 @@ public class MockScanBatchCreator implements BatchCreator<MockSubScanPOP> {
     Preconditions.checkArgument(children.isEmpty());
     final List<MockScanEntry> entries = config.getReadEntries();
     final List<RecordReader> readers = Lists.newArrayList();
-    for(final MockScanEntry e : entries) {
-      if ( config.isExtended( ) ) {
+    for(final MockTableDef.MockScanEntry e : entries) {
+      if ( e.isExtended( ) ) {
         readers.add(new ExtendedMockRecordReader(context, e));
       } else {
         readers.add(new MockRecordReader(context, e));

http://git-wip-us.apache.org/repos/asf/drill/blob/974c6134/exec/java-exec/src/main/java/org/apache/drill/exec/store/mock/MockStorageEngine.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/mock/MockStorageEngine.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/mock/MockStorageEngine.java
index df8ee50..90644b5 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/mock/MockStorageEngine.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/mock/MockStorageEngine.java
@@ -18,6 +18,7 @@
 package org.apache.drill.exec.store.mock;
 
 import java.io.IOException;
+import java.net.URL;
 import java.util.ArrayList;
 import java.util.HashSet;
 import java.util.List;
@@ -36,11 +37,15 @@ import org.apache.drill.exec.server.DrillbitContext;
 import org.apache.drill.exec.store.AbstractSchema;
 import org.apache.drill.exec.store.AbstractStoragePlugin;
 import org.apache.drill.exec.store.SchemaConfig;
-import org.apache.drill.exec.store.mock.MockGroupScanPOP.MockScanEntry;
 
+import com.fasterxml.jackson.core.JsonParseException;
+import com.fasterxml.jackson.core.JsonParser;
 import com.fasterxml.jackson.core.type.TypeReference;
+import com.fasterxml.jackson.databind.JsonMappingException;
 import com.fasterxml.jackson.databind.ObjectMapper;
+import com.google.common.base.Charsets;
 import com.google.common.collect.ImmutableList;
+import com.google.common.io.Resources;
 
 public class MockStorageEngine extends AbstractStoragePlugin {
   static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(MockStorageEngine.class);
@@ -57,21 +62,12 @@ public class MockStorageEngine extends AbstractStoragePlugin {
   public AbstractGroupScan getPhysicalScan(String userName, JSONOptions selection, List<SchemaPath> columns)
       throws IOException {
 
-    List<MockScanEntry> readEntries = selection.getListWith(new ObjectMapper(),
-        new TypeReference<ArrayList<MockScanEntry>>() {
+    List<MockTableDef.MockScanEntry> readEntries = selection.getListWith(new ObjectMapper(),
+        new TypeReference<ArrayList<MockTableDef.MockScanEntry>>() {
         });
 
-    // The classic (logical-plan based) and extended (SQL-based) paths
-    // come through here. If this is a SQL query, then no columns are
-    // defined in the plan.
-
     assert ! readEntries.isEmpty();
-    boolean extended = readEntries.size() == 1;
-    if (extended) {
-      MockScanEntry entry = readEntries.get(0);
-      extended = entry.getTypes() == null;
-    }
-    return new MockGroupScanPOP(null, extended, readEntries);
+    return new MockGroupScanPOP(null, readEntries);
   }
 
   @Override
@@ -89,14 +85,31 @@ public class MockStorageEngine extends AbstractStoragePlugin {
     return true;
   }
 
-//  public static class ImplicitTable extends DynamicDrillTable {
-//
-//    public ImplicitTable(StoragePlugin plugin, String storageEngineName,
-//        Object selection) {
-//      super(plugin, storageEngineName, selection);
-//    }
-//
-//  }
+  /**
+   * Resolves table names within the mock data source. Tables can be of two forms:
+   * <p>
+   * <tt><name>_<n><unit></tt>
+   * <p>
+   * Where the "name" can be anything, "n" is the number of rows, and "unit" is
+   * the units for the row count: non, K (thousand) or M (million).
+   * <p>
+   * The above form generates a table directly with no other information needed.
+   * Column names must be provided, and must be of the form:
+   * <p>
+   * <tt><name>_<type><size></tt>
+   * <p>
+   * Where the name can be anything, the type must be i (integer), d (double),
+   * b (boolean)
+   * or s (string, AKA VarChar). The length is needed only for string fields.
+   * <p>
+   * Direct tables are quick, but limited. The other option is to provide the
+   * name of a definition file:
+   * <p>
+   * <tt><fileName>.json</tt>
+   * <p>
+   * In this case, the JSON file must be a resource visible on the class path.
+   * Omit the leading slash in the resource path name.
+   */
 
   private static class MockSchema extends AbstractSchema {
 
@@ -109,6 +122,36 @@ public class MockStorageEngine extends AbstractStoragePlugin {
 
     @Override
     public Table getTable(String name) {
+      if (name.toLowerCase().endsWith(".json")) {
+        return getConfigFile(name);
+      } else {
+        return getDirectTable(name);
+      }
+    }
+
+    private Table getConfigFile(String name) {
+      final URL url = Resources.getResource(name);
+      if (url == null) {
+        throw new IllegalArgumentException(
+            "Unable to find mock table config file " + name);
+      }
+      MockTableDef mockTableDefn;
+      try {
+        String json = Resources.toString(url, Charsets.UTF_8);
+        final ObjectMapper mapper = new ObjectMapper();
+        mapper.configure(JsonParser.Feature.ALLOW_UNQUOTED_FIELD_NAMES, true);
+        mockTableDefn = mapper.readValue(json, MockTableDef.class);
+      } catch (JsonParseException e) {
+        throw new IllegalArgumentException("Unable to parse mock table definition file: " + name, e);
+      } catch (JsonMappingException e) {
+        throw new IllegalArgumentException("Unable to Jackson deserialize mock table definition file: " + name, e);
+      } catch (IOException e) {
+        throw new IllegalArgumentException("Unable to read mock table definition file: " + name, e);
+      }
+      return new DynamicDrillTable(engine, this.name, mockTableDefn.getEntries());
+    }
+
+    private Table getDirectTable(String name) {
       Pattern p = Pattern.compile("(\\w+)_(\\d+)(k|m)?", Pattern.CASE_INSENSITIVE);
       Matcher m = p.matcher(name);
       if (! m.matches()) {
@@ -118,10 +161,11 @@ public class MockStorageEngine extends AbstractStoragePlugin {
       String baseName = m.group(1);
       int n = Integer.parseInt(m.group(2));
       String unit = m.group(3);
-      if (unit.equalsIgnoreCase("K")) { n *= 1000; }
+      if (unit == null) { }
+      else if (unit.equalsIgnoreCase("K")) { n *= 1000; }
       else if (unit.equalsIgnoreCase("M")) { n *= 1_000_000; }
-      MockScanEntry entry = new MockScanEntry(n, null);
-      List<MockScanEntry> list = new ArrayList<>();
+      MockTableDef.MockScanEntry entry = new MockTableDef.MockScanEntry(n, true, 0, 1, null);
+      List<MockTableDef.MockScanEntry> list = new ArrayList<>();
       list.add(entry);
       return new DynamicDrillTable(engine, this.name, list);
     }

http://git-wip-us.apache.org/repos/asf/drill/blob/974c6134/exec/java-exec/src/main/java/org/apache/drill/exec/store/mock/MockSubScanPOP.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/mock/MockSubScanPOP.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/mock/MockSubScanPOP.java
index f169f51..8e474ca 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/mock/MockSubScanPOP.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/mock/MockSubScanPOP.java
@@ -17,6 +17,7 @@
  */
 package org.apache.drill.exec.store.mock;
 
+import java.util.Collections;
 import java.util.Iterator;
 import java.util.List;
 
@@ -25,13 +26,13 @@ import org.apache.drill.exec.physical.base.PhysicalOperator;
 import org.apache.drill.exec.physical.base.PhysicalVisitor;
 import org.apache.drill.exec.physical.base.SubScan;
 import org.apache.drill.exec.proto.UserBitShared.CoreOperatorType;
+import org.apache.drill.exec.store.mock.MockTableDef.MockScanEntry;
 
 import com.fasterxml.jackson.annotation.JsonCreator;
 import com.fasterxml.jackson.annotation.JsonIgnore;
 import com.fasterxml.jackson.annotation.JsonProperty;
 import com.fasterxml.jackson.annotation.JsonTypeName;
 import com.google.common.base.Preconditions;
-import com.google.common.collect.Iterators;
 
 /**
  * Describes a physical scan operation for the mock data source. Each operator
@@ -44,7 +45,7 @@ public class MockSubScanPOP extends AbstractBase implements SubScan {
   static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(MockGroupScanPOP.class);
 
   private final String url;
-  protected final List<MockGroupScanPOP.MockScanEntry> readEntries;
+  protected final List<MockScanEntry> readEntries;
   private final boolean extended;
 
   /**
@@ -68,7 +69,7 @@ public class MockSubScanPOP extends AbstractBase implements SubScan {
   @JsonCreator
   public MockSubScanPOP(@JsonProperty("url") String url,
                         @JsonProperty("extended") Boolean extended,
-                        @JsonProperty("entries") List<MockGroupScanPOP.MockScanEntry> readEntries) {
+                        @JsonProperty("entries") List<MockScanEntry> readEntries) {
     this.readEntries = readEntries;
 //    OperatorCost cost = new OperatorCost(0,0,0,0);
 //    Size size = new Size(0,0);
@@ -86,13 +87,13 @@ public class MockSubScanPOP extends AbstractBase implements SubScan {
   public boolean isExtended() { return extended; }
 
   @JsonProperty("entries")
-  public List<MockGroupScanPOP.MockScanEntry> getReadEntries() {
+  public List<MockScanEntry> getReadEntries() {
     return readEntries;
   }
 
   @Override
   public Iterator<PhysicalOperator> iterator() {
-    return Iterators.emptyIterator();
+    return Collections.emptyIterator();
   }
 
   // will want to replace these two methods with an interface above for AbstractSubScan

http://git-wip-us.apache.org/repos/asf/drill/blob/974c6134/exec/java-exec/src/main/java/org/apache/drill/exec/store/mock/MockTableDef.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/mock/MockTableDef.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/mock/MockTableDef.java
new file mode 100644
index 0000000..81f92b1
--- /dev/null
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/mock/MockTableDef.java
@@ -0,0 +1,213 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.store.mock;
+
+import java.util.Arrays;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.drill.common.types.TypeProtos.DataMode;
+import org.apache.drill.common.types.TypeProtos.MajorType;
+import org.apache.drill.common.types.TypeProtos.MinorType;
+
+import com.fasterxml.jackson.annotation.JsonCreator;
+import com.fasterxml.jackson.annotation.JsonIgnore;
+import com.fasterxml.jackson.annotation.JsonInclude;
+import com.fasterxml.jackson.annotation.JsonInclude.Include;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import com.fasterxml.jackson.annotation.JsonTypeName;
+
+/**
+ * Structure of a mock table definition file. Yes, using Jackson deserialization to parse
+ * the file is brittle, but this is for testing so we're favoring convenience
+ * over robustness.
+ */
+
+@JsonTypeName("mock-table")
+public class MockTableDef {
+  /**
+   * Describes one simulated file (or block) within the logical file scan
+   * described by this group scan. Each block can have a distinct schema to test
+   * for schema changes.
+   */
+
+  public static class MockScanEntry {
+
+    final int records;
+    final boolean extended;
+    final int batchSize;
+    final int repeat;
+    private final MockColumn[] types;
+
+    @JsonCreator
+    public MockScanEntry(@JsonProperty("records") int records,
+                         @JsonProperty("extended") Boolean extended,
+                         @JsonProperty("batchSize") Integer batchSize,
+                         @JsonProperty("repeat") Integer repeat,
+                         @JsonProperty("types") MockTableDef.MockColumn[] types) {
+      this.records = records;
+      this.types = types;
+      this.extended = (extended == null) ? false : extended;
+      this.batchSize = (batchSize == null) ? 0 : batchSize;
+      this.repeat = (repeat == null) ? 1 : repeat;
+    }
+
+    public int getRecords() { return records; }
+    public boolean isExtended() { return extended; }
+    public int getBatchSize() { return batchSize; }
+    public int getRepeat() { return repeat; }
+
+    public MockTableDef.MockColumn[] getTypes() {
+      return types;
+    }
+
+    @Override
+    public String toString() {
+      return "MockScanEntry [records=" + records + ", columns="
+          + Arrays.toString(types) + "]";
+    }
+  }
+
+  /**
+   * Meta-data description of the columns we wish to create during a simulated
+   * scan.
+   */
+
+  @JsonInclude(Include.NON_NULL)
+  public static class MockColumn {
+
+    /**
+     * Column type given as a Drill minor type (that is, a type without the
+     * extra information such as cardinality, width, etc.
+     */
+
+    @JsonProperty("type")
+    public MinorType minorType;
+    public String name;
+    public DataMode mode;
+    public Integer width;
+    public Integer precision;
+    public Integer scale;
+
+    /**
+     * The scan can request to use a specific data generator class. The name of
+     * that class appears here. The name can be a simple class name, if that
+     * class resides in this Java package. Or, it can be a fully qualified name
+     * of a class that resides elsewhere. If null, the default generator for the
+     * data type is used.
+     */
+
+    public String generator;
+
+    /**
+     * Some tests want to create a very wide row with many columns. This field
+     * eases that task: specify a value other than 1 and the data source will
+     * generate that many copies of the column, each with separately generated
+     * random values. For example, to create 20 copies of field, "foo", set
+     * repeat to 20 and the actual generated batches will contain fields
+     * foo1, foo2, ... foo20.
+     */
+
+    public Integer repeat;
+    public Map<String,Object> properties;
+
+    @JsonCreator
+    public MockColumn(@JsonProperty("name") String name,
+                      @JsonProperty("type") MinorType minorType,
+                      @JsonProperty("mode") DataMode mode,
+                      @JsonProperty("width") Integer width,
+                      @JsonProperty("precision") Integer precision,
+                      @JsonProperty("scale") Integer scale,
+                      @JsonProperty("generator") String generator,
+                      @JsonProperty("repeat") Integer repeat,
+                      @JsonProperty("properties") Map<String,Object> properties) {
+      this.name = name;
+      this.minorType = minorType;
+      this.mode = mode;
+      this.width = width;
+      this.precision = precision;
+      this.scale = scale;
+      this.generator = generator;
+      this.repeat = repeat;
+      this.properties = properties;
+    }
+
+    @JsonProperty("type")
+    public MinorType getMinorType() { return minorType; }
+    public String getName() { return name; }
+    public DataMode getMode() { return mode; }
+    public Integer getWidth() { return width; }
+    public Integer getPrecision() { return precision; }
+    public Integer getScale() { return scale; }
+    public String getGenerator() { return generator; }
+    public Integer getRepeat() { return repeat; }
+    @JsonIgnore
+    public int getRepeatCount() { return repeat == null ? 1 : repeat; }
+    @JsonIgnore
+    public int getWidthValue() { return width == null ? 0 : width; }
+    public Map<String,Object> getProperties() { return properties; }
+
+    @JsonIgnore
+    public MajorType getMajorType() {
+      MajorType.Builder b = MajorType.newBuilder();
+      b.setMode(mode);
+      b.setMinorType(minorType);
+      if (precision != null) {
+        b.setPrecision(precision);
+      }
+      if (width != null) {
+        b.setWidth(width);
+      }
+      if (scale != null) {
+        b.setScale(scale);
+      }
+      return b.build();
+    }
+
+    @Override
+    public String toString() {
+      return "MockColumn [minorType=" + minorType + ", name=" + name + ", mode="
+          + mode + "]";
+    }
+  }
+
+  private String descrip;
+  List<MockTableDef.MockScanEntry> entries;
+
+  public MockTableDef(@JsonProperty("descrip") final String descrip,
+                      @JsonProperty("entries") final List<MockTableDef.MockScanEntry> entries) {
+    this.descrip = descrip;
+    this.entries = entries;
+  }
+
+  /**
+   * Description of this data source. Ignored by the scanner, purely
+   * for the convenience of the author.
+   */
+
+  public String getDescrip() { return descrip; }
+
+  /**
+   * The set of entries that define the groups within the file. Each
+   * group can have a distinct schema; each may be read in a separate
+   * fragment.
+   * @return
+   */
+
+  public List<MockTableDef.MockScanEntry> getEntries() { return entries; }
+}

http://git-wip-us.apache.org/repos/asf/drill/blob/974c6134/exec/java-exec/src/main/java/org/apache/drill/exec/store/mock/VaryingStringGen.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/mock/VaryingStringGen.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/mock/VaryingStringGen.java
new file mode 100644
index 0000000..bf0dec7
--- /dev/null
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/mock/VaryingStringGen.java
@@ -0,0 +1,70 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.store.mock;
+
+import java.util.Map;
+import java.util.Random;
+
+import org.apache.drill.exec.vector.ValueVector;
+import org.apache.drill.exec.vector.VarCharVector;
+
+public class VaryingStringGen implements FieldGen {
+
+  private Random rand = new Random();
+  private int length;
+  private int span;
+  private int deltaPerSpan;
+  private int valueCount;
+
+  @Override
+  public void setup(ColumnDef colDef) {
+    length = colDef.width;
+    Map<String,Object> props = colDef.mockCol.properties;
+    span = 1000;
+    deltaPerSpan = 100;
+    if (props != null) {
+      Integer value = (Integer) props.get("span");
+      if (value != null) {
+        span = Math.max(1, value);
+      }
+      value = (Integer) props.get("delta");
+      if (value != null) {
+        deltaPerSpan = value;
+      }
+    }
+  }
+
+  public String value() {
+    if (valueCount++ >= span) {
+      valueCount = 0;
+      length = Math.max(0, length + deltaPerSpan);
+    }
+    String c = Character.toString((char) (rand.nextInt(26) + 'A'));
+    StringBuilder buf = new StringBuilder();
+    for (int i = 0;  i < length;  i++) {
+      buf.append(c);
+    }
+    return buf.toString();
+  }
+
+  @Override
+  public void setValue(ValueVector v, int index) {
+    VarCharVector vector = (VarCharVector) v;
+    vector.getMutator().setSafe(index, value().getBytes());
+  }
+}

http://git-wip-us.apache.org/repos/asf/drill/blob/974c6134/exec/java-exec/src/main/java/org/apache/drill/exec/store/mock/package-info.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/mock/package-info.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/mock/package-info.java
index e99cfc5..ad4595d 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/mock/package-info.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/mock/package-info.java
@@ -30,19 +30,21 @@
  * </ul>
  * <h3>Classic Mode</h3>
  * Create a scan operator that looks like the following (from
- * <tt></tt>):
+ * <tt>/src/test/resources/functions/cast/two_way_implicit_cast.json</tt>,
+ * used in {@link TestReverseImplicitCast}):
  * <pre><code>
  *    graph:[
- *      {
- *        {@literal @}id:1,
- *        pop:"mock-scan",
- *        url: "http://apache.org",
- *        entries:[
- *          {records: 1000000, types: [
- *             {name: "blue", type: "INT", mode: "REQUIRED"},
- *             {name: "green", type: "INT", mode: "REQUIRED"}
- *        ]}
- *      ]
+ *        {
+ *            @id:1,
+ *            pop:"mock-scan",
+ *            url: "http://apache.org",
+ *            entries:[
+ *                {records: 1, types: [
+ *                    {name: "col1", type: "FLOAT4", mode: "REQUIRED"},
+ *                    {name: "col2", type: "FLOAT8", mode: "REQUIRED"}
+ *                ]}
+ *            ]
+ *        },
  *    }, ...
  * </code></pre>
  * Here:
@@ -60,6 +62,18 @@
  * <li>The <tt>mode</tt> is one of the supported Drill
  * {@link DataMode} names: usually <tt>OPTIONAL</tt> or <tt>REQUIRED</tt>.</li>
  * </ul>
+ * <p>
+ * Recent extensions include:
+ * <ul>
+ * <li><tt>repeat</tt> in either the "entry" or "record" elements allow
+ * repeating entries (simulating multiple blocks or row groups) and
+ * repeating fields (easily create a dozen fields of some type.)</li>
+ * <li><tt>generator</tt> in a field definition lets you specify a
+ * specific data generator (see below.)</tt>
+ * <li><tt>properties</tt> in a field definition lets you pass
+ * generator-specific values to the data generator (such as, say
+ * a minimum and maximum value.)</li>
+ * </ul>
  *
  * <h3>Enhanced Mode</h3>
  * Enhanced builds on the Classic mode to add additional capabilities.
@@ -67,7 +81,7 @@
  * is randomly generated over a wide range of values and can be
  * controlled by custom generator classes. When
  * in a physical plan, the <tt>records</tt> section has additional
- * attributes as described in {@link MockGroupScanPOP.MockColumn}:
+ * attributes as described in {@link MockTableDef.MockColumn}:
  * <ul>
  * <li>The <tt>generator</tt> lets you specify a class to generate the
  * sample data. Rules for the class name is that it can either contain
@@ -111,6 +125,9 @@
  * (multiply row count by one million), case insensitive.</li>
  * <li>Another field (not yet implemented) might specify the split count.</li>
  * </ul>
+ * <h3>Enhanced Mode with Definition File</h3>
+ * You can reference a mock data definition file directly from SQL as follows:
+ * <pre<code>SELECT * FROM `mock`.`your_defn_file.json`</code></pre>
  * <h3>Data Generators</h3>
  * The classic mode uses data generators built into each vector to generate
  * the sample data. These generators use a very simple black/white alternating

http://git-wip-us.apache.org/repos/asf/drill/blob/974c6134/exec/java-exec/src/main/java/org/apache/drill/exec/util/TestUtilities.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/util/TestUtilities.java b/exec/java-exec/src/main/java/org/apache/drill/exec/util/TestUtilities.java
index 7215d10..5498ad4 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/util/TestUtilities.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/util/TestUtilities.java
@@ -17,15 +17,15 @@
  */
 package org.apache.drill.exec.util;
 
-import com.google.common.io.Files;
+import java.io.File;
+
 import org.apache.drill.common.exceptions.ExecutionSetupException;
-import org.apache.drill.exec.server.DrillbitContext;
 import org.apache.drill.exec.store.StoragePluginRegistry;
 import org.apache.drill.exec.store.dfs.FileSystemConfig;
 import org.apache.drill.exec.store.dfs.FileSystemPlugin;
 import org.apache.drill.exec.store.dfs.WorkspaceConfig;
 
-import java.io.File;
+import com.google.common.io.Files;
 
 /**
  * This class contains utility methods to speed up tests. Some of the production code currently calls this method

http://git-wip-us.apache.org/repos/asf/drill/blob/974c6134/exec/java-exec/src/test/java/org/apache/drill/exec/fn/interp/ExpressionInterpreterTest.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/fn/interp/ExpressionInterpreterTest.java b/exec/java-exec/src/test/java/org/apache/drill/exec/fn/interp/ExpressionInterpreterTest.java
index e191d35..673bf80 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/fn/interp/ExpressionInterpreterTest.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/fn/interp/ExpressionInterpreterTest.java
@@ -42,11 +42,12 @@ import org.apache.drill.exec.record.MaterializedField;
 import org.apache.drill.exec.record.RecordBatch;
 import org.apache.drill.exec.server.Drillbit;
 import org.apache.drill.exec.server.RemoteServiceSet;
-import org.apache.drill.exec.store.mock.MockGroupScanPOP;
 import org.apache.drill.exec.store.mock.MockScanBatchCreator;
 import org.apache.drill.exec.store.mock.MockSubScanPOP;
+import org.apache.drill.exec.store.mock.MockTableDef;
 import org.apache.drill.exec.vector.ValueVector;
 import org.joda.time.DateTime;
+import org.joda.time.DateTimeZone;
 import org.junit.Test;
 
 import com.google.common.collect.Lists;
@@ -123,9 +124,9 @@ public class ExpressionInterpreterTest  extends PopUnitTestBase {
     final String expressionStr = "now()";
     final BitControl.PlanFragment planFragment = BitControl.PlanFragment.getDefaultInstance();
     final QueryContextInformation queryContextInfo = planFragment.getContext();
-    final int                        timeZoneIndex = queryContextInfo.getTimeZone();
-    final org.joda.time.DateTimeZone timeZone = org.joda.time.DateTimeZone.forID(org.apache.drill.exec.expr.fn.impl.DateUtility.getTimeZone(timeZoneIndex));
-    final org.joda.time.DateTime     now = new org.joda.time.DateTime(queryContextInfo.getQueryStartTime(), timeZone);
+    final int timeZoneIndex = queryContextInfo.getTimeZone();
+    final DateTimeZone timeZone = DateTimeZone.forID(org.apache.drill.exec.expr.fn.impl.DateUtility.getTimeZone(timeZoneIndex));
+    final org.joda.time.DateTime now = new org.joda.time.DateTime(queryContextInfo.getQueryStartTime(), timeZone);
 
     final long queryStartDate = now.getMillis();
 
@@ -159,13 +160,13 @@ public class ExpressionInterpreterTest  extends PopUnitTestBase {
     // Create a mock scan batch as input for evaluation.
     assertEquals(colNames.length, colTypes.length);
 
-    final MockGroupScanPOP.MockColumn[] columns = new MockGroupScanPOP.MockColumn[colNames.length];
+    final MockTableDef.MockColumn[] columns = new MockTableDef.MockColumn[colNames.length];
 
     for (int i = 0; i < colNames.length; i++ ) {
-      columns[i] = new MockGroupScanPOP.MockColumn(colNames[i], colTypes[i].getMinorType(), colTypes[i].getMode(), 0, 0, 0, null, null);
+      columns[i] = new MockTableDef.MockColumn(colNames[i], colTypes[i].getMinorType(), colTypes[i].getMode(), 0, 0, 0, null, null, null);
     }
 
-    final MockGroupScanPOP.MockScanEntry entry = new MockGroupScanPOP.MockScanEntry(10, columns);
+    final MockTableDef.MockScanEntry entry = new MockTableDef.MockScanEntry(10, false, 0, 1, columns);
     final MockSubScanPOP scanPOP = new MockSubScanPOP("testTable", false, java.util.Collections.singletonList(entry));
 
     @SuppressWarnings("resource")

http://git-wip-us.apache.org/repos/asf/drill/blob/974c6134/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/TestConvertFunctions.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/TestConvertFunctions.java b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/TestConvertFunctions.java
index 16dd0ab..23912eb 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/TestConvertFunctions.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/TestConvertFunctions.java
@@ -31,9 +31,8 @@ import java.util.List;
 import org.apache.drill.BaseTestQuery;
 import org.apache.drill.QueryTestUtil;
 import org.apache.drill.exec.ExecConstants;
-import org.apache.drill.exec.compile.ClassTransformer;
-import org.apache.drill.exec.compile.CodeCompiler;
 import org.apache.drill.exec.compile.ClassTransformer.ScalarReplacementOption;
+import org.apache.drill.exec.compile.CodeCompiler;
 import org.apache.drill.exec.expr.fn.impl.DateUtility;
 import org.apache.drill.exec.proto.UserBitShared.QueryType;
 import org.apache.drill.exec.record.RecordBatchLoader;
@@ -588,6 +587,7 @@ public class TestConvertFunctions extends BaseTestQuery {
   public void testHadooopVInt() throws Exception {
     final int _0 = 0;
     final int _9 = 9;
+    @SuppressWarnings("resource")
     final DrillBuf buffer = getAllocator().buffer(_9);
 
     long longVal = 0;
@@ -677,6 +677,7 @@ public class TestConvertFunctions extends BaseTestQuery {
     for(QueryDataBatch result : resultList) {
       if (result.getData() != null) {
         loader.load(result.getHeader().getDef(), result.getData());
+        @SuppressWarnings("resource")
         ValueVector v = loader.iterator().next().getValueVector();
         for (int j = 0; j < v.getAccessor().getValueCount(); j++) {
           if  (v instanceof VarCharVector) {

http://git-wip-us.apache.org/repos/asf/drill/blob/974c6134/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/mergereceiver/TestMergingReceiver.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/mergereceiver/TestMergingReceiver.java b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/mergereceiver/TestMergingReceiver.java
index 71a5070..e4a96bd 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/mergereceiver/TestMergingReceiver.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/mergereceiver/TestMergingReceiver.java
@@ -44,6 +44,7 @@ public class TestMergingReceiver extends PopUnitTestBase {
 
   @Test
   public void twoBitTwoExchange() throws Exception {
+    @SuppressWarnings("resource")
     final RemoteServiceSet serviceSet = RemoteServiceSet.getLocalServiceSet();
 
     try (final Drillbit bit1 = new Drillbit(CONFIG, serviceSet);
@@ -72,6 +73,7 @@ public class TestMergingReceiver extends PopUnitTestBase {
 
   @Test
   public void testMultipleProvidersMixedSizes() throws Exception {
+    @SuppressWarnings("resource")
     final RemoteServiceSet serviceSet = RemoteServiceSet.getLocalServiceSet();
 
     try (final Drillbit bit1 = new Drillbit(CONFIG, serviceSet);
@@ -95,6 +97,7 @@ public class TestMergingReceiver extends PopUnitTestBase {
         count += batchRowCount;
         batchLoader.load(queryData.getDef(), b.getData());
         for (final VectorWrapper<?> vw : batchLoader) {
+          @SuppressWarnings("resource")
           final ValueVector vv = vw.getValueVector();
           final ValueVector.Accessor va = vv.getAccessor();
           final MaterializedField materializedField = vv.getField();
@@ -119,6 +122,7 @@ public class TestMergingReceiver extends PopUnitTestBase {
 
   @Test
   public void handleEmptyBatch() throws Exception {
+    @SuppressWarnings("resource")
     final RemoteServiceSet serviceSet = RemoteServiceSet.getLocalServiceSet();
 
     try (final Drillbit bit1 = new Drillbit(CONFIG, serviceSet);

http://git-wip-us.apache.org/repos/asf/drill/blob/974c6134/exec/java-exec/src/test/resources/test/example-mock.json
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/resources/test/example-mock.json b/exec/java-exec/src/test/resources/test/example-mock.json
new file mode 100644
index 0000000..a0d2d73
--- /dev/null
+++ b/exec/java-exec/src/test/resources/test/example-mock.json
@@ -0,0 +1,16 @@
+{
+    descrip: "basic example",
+    entries:[
+        {records: 10, types: [
+          {name: "blue", type: "INT", mode: "REQUIRED", repeat: 2},
+          {name: "red", type: "BIGINT", mode: "REQUIRED"},
+          {name: "green", type: "INT", mode: "REQUIRED",
+           properties: { a: 10, b: "foo" }}
+        ]},
+        {records: 10, repeat: 2, types: [
+          {name: "blue", type: "INT", mode: "REQUIRED", repeat: 2},
+          {name: "red", type: "BIGINT", mode: "REQUIRED"},
+          {name: "green", type: "INT", mode: "REQUIRED"}
+        ]}
+    ]
+}


[10/27] drill git commit: DRILL-5301: Add C++ client support for Server metadata API

Posted by jn...@apache.org.
DRILL-5301: Add C++ client support for Server metadata API

Add support to the Server metadata API to the C++ client if
available. If the API is not supported to the server, fallback
to the previous hard-coded values.

Update the querySubmitter example program to query the information.

close #764


Project: http://git-wip-us.apache.org/repos/asf/drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/drill/commit/d3238b1b
Tree: http://git-wip-us.apache.org/repos/asf/drill/tree/d3238b1b
Diff: http://git-wip-us.apache.org/repos/asf/drill/diff/d3238b1b

Branch: refs/heads/master
Commit: d3238b1b2270533285025d69b35906506212f492
Parents: d2e0f41
Author: Laurent Goujon <la...@dremio.com>
Authored: Sun Feb 26 10:23:59 2017 -0800
Committer: Jinfeng Ni <jn...@apache.org>
Committed: Wed Mar 1 23:15:32 2017 -0800

----------------------------------------------------------------------
 .../native/client/example/querySubmitter.cpp    |   88 +-
 contrib/native/client/readme.boost              |    2 +-
 .../client/src/clientlib/drillClientImpl.cpp    |  119 +-
 .../client/src/clientlib/drillClientImpl.hpp    |   17 +-
 .../native/client/src/clientlib/metadata.cpp    | 1680 +--
 .../native/client/src/clientlib/metadata.hpp    |  215 +-
 .../client/src/include/drill/drillClient.hpp    |   43 +-
 contrib/native/client/src/protobuf/User.pb.cc   | 3844 ++++++-
 contrib/native/client/src/protobuf/User.pb.h    | 9971 +++++++++++-------
 9 files changed, 11565 insertions(+), 4414 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/drill/blob/d3238b1b/contrib/native/client/example/querySubmitter.cpp
----------------------------------------------------------------------
diff --git a/contrib/native/client/example/querySubmitter.cpp b/contrib/native/client/example/querySubmitter.cpp
index 5b85a3e..5990897 100644
--- a/contrib/native/client/example/querySubmitter.cpp
+++ b/contrib/native/client/example/querySubmitter.cpp
@@ -21,6 +21,7 @@
 #include <stdio.h>
 #include <stdlib.h>
 #include <boost/thread.hpp>
+#include <boost/algorithm/string/join.hpp>
 #include "drill/drillc.hpp"
 
 int nOptions=15;
@@ -32,12 +33,12 @@ struct Option{
 }qsOptions[]= {
     {"plan", "Plan files separated by semicolons", false},
     {"query", "Query strings, separated by semicolons", false},
-    {"type", "Query type [physical|logical|sql]", true},
+    {"type", "Query type [physical|logical|sql|server]", true},
     {"connectStr", "Connect string", true},
     {"schema", "Default schema", false},
-    {"api", "API type [sync|async]", true},
+    {"api", "API type [sync|async|meta]", true},
     {"logLevel", "Logging level [trace|debug|info|warn|error|fatal]", false},
-    {"testCancel", "Cancel the query afterthe first record batch.", false},
+    {"testCancel", "Cancel the query after the first record batch.", false},
     {"syncSend", "Send query only after previous result is received", false},
     {"hshakeTimeout", "Handshake timeout (second).", false},
     {"queryTimeout", "Query timeout (second).", false},
@@ -55,12 +56,12 @@ bool bSyncSend=false;
 
 Drill::status_t SchemaListener(void* ctx, Drill::FieldDefPtr fields, Drill::DrillClientError* err){
     if(!err){
-        printf("SCHEMA CHANGE DETECTED:\n");
+        std::cout<< "SCHEMA CHANGE DETECTED:" << std::endl;
         for(size_t i=0; i<fields->size(); i++){
             std::string name= fields->at(i)->getName();
-            printf("%s\t", name.c_str());
+            std::cout << name << "\t";
         }
-        printf("\n");
+        std::cout << std::endl;
         return Drill::QRY_SUCCESS ;
     }else{
         std::cerr<< "ERROR: " << err->msg << std::endl;
@@ -113,6 +114,7 @@ void print(const Drill::FieldMetadata* pFieldMetadata, void* buf, size_t sz){
             switch (mode) {
                 case common::DM_REQUIRED:
                     sprintf((char*)printBuffer, "%lld", *(uint64_t*)buf);
+                    break;
                 case common::DM_OPTIONAL:
                     break;
                 case common::DM_REPEATED:
@@ -123,6 +125,7 @@ void print(const Drill::FieldMetadata* pFieldMetadata, void* buf, size_t sz){
             switch (mode) {
                 case common::DM_REQUIRED:
                     memcpy(printBuffer, buf, sz);
+                    break;
                 case common::DM_OPTIONAL:
                     break;
                 case common::DM_REPEATED:
@@ -133,6 +136,7 @@ void print(const Drill::FieldMetadata* pFieldMetadata, void* buf, size_t sz){
             switch (mode) {
                 case common::DM_REQUIRED:
                     memcpy(printBuffer, buf, sz);
+                    break;
                 case common::DM_OPTIONAL:
                     break;
                 case common::DM_REPEATED:
@@ -233,6 +237,9 @@ int readQueries(const std::string& queryList, std::vector<std::string>& queries)
 }
 
 bool validate(const std::string& type, const std::string& query, const std::string& plan){
+	if (type != "sync" || type != "async") {
+		return true;
+	}
     if(query.empty() && plan.empty()){
         std::cerr<< "Either query or plan must be specified"<<std::endl;
         return false;    }
@@ -365,14 +372,77 @@ int main(int argc, char* argv[]) {
             props.setProperty(USERPROP_PASSWORD, password);
         }
 
-        props.setProperty("someRandomProperty", "someRandomValue");
-
         if(client.connect(connectStr.c_str(), &props)!=Drill::CONN_SUCCESS){
             std::cerr<< "Failed to connect with error: "<< client.getError() << " (Using:"<<connectStr<<")"<<std::endl;
             return -1;
         }
         std::cout<< "Connected!\n" << std::endl;
-        if(api=="sync"){
+        if(api=="meta") {
+        	Drill::Metadata* metadata = client.getMetadata();
+        	if (metadata) {
+        		std::cout << "Connector:" << std::endl;
+        		std::cout << "\tname:" << metadata->getConnectorName() << std::endl;
+        		std::cout << "\tversion:" << metadata->getConnectorVersion() << std::endl;
+        		std::cout << std::endl;
+        		std::cout << "Server:" << std::endl;
+        		std::cout << "\tname:" << metadata->getServerName() << std::endl;
+        		std::cout << "\tversion:" << metadata->getServerVersion() << std::endl;
+        		std::cout << std::endl;
+        		std::cout << "Metadata:" << std::endl;
+        		std::cout << "\tall tables are selectable: " << metadata->areAllTableSelectable() << std::endl;
+        		std::cout << "\tcatalog separator: " << metadata->getCatalogSeparator() << std::endl;
+        		std::cout << "\tcatalog term: " << metadata->getCatalogTerm() << std::endl;
+        		std::cout << "\tCOLLATE support: " << metadata->getCollateSupport() << std::endl;
+        		std::cout << "\tcorrelation names: " << metadata->getCorrelationNames() << std::endl;
+        		std::cout << "\tdate time functions: " << boost::algorithm::join(metadata->getDateTimeFunctions(), ", ") << std::endl;
+        		std::cout << "\tdate time literals support: " << metadata->getDateTimeLiteralsSupport() << std::endl;
+        		std::cout << "\tGROUP BY support: " << metadata->getGroupBySupport() << std::endl;
+        		std::cout << "\tidentifier case: " << metadata->getIdentifierCase() << std::endl;
+        		std::cout << "\tidentifier quote string: " << metadata->getIdentifierQuoteString() << std::endl;
+        		std::cout << "\tmax binary literal length: " << metadata->getMaxBinaryLiteralLength() << std::endl;
+        		std::cout << "\tmax catalog name length: " << metadata->getMaxCatalogNameLength() << std::endl;
+        		std::cout << "\tmax char literal length: " << metadata->getMaxCharLiteralLength() << std::endl;
+        		std::cout << "\tmax column name length: " << metadata->getMaxColumnNameLength() << std::endl;
+        		std::cout << "\tmax columns in GROUP BY: " << metadata->getMaxColumnsInGroupBy() << std::endl;
+        		std::cout << "\tmax columns in ORDER BY: " << metadata->getMaxColumnsInOrderBy() << std::endl;
+        		std::cout << "\tmax columns in SELECT: " << metadata->getMaxColumnsInSelect() << std::endl;
+        		std::cout << "\tmax cursor name length: " << metadata->getMaxCursorNameLength() << std::endl;
+        		std::cout << "\tmax logical lob size: " << metadata->getMaxLogicalLobSize() << std::endl;
+        		std::cout << "\tmax row size: " << metadata->getMaxRowSize() << std::endl;
+        		std::cout << "\tmax schema name length: " << metadata->getMaxSchemaNameLength() << std::endl;
+        		std::cout << "\tmax statement length: " << metadata->getMaxStatementLength() << std::endl;
+        		std::cout << "\tmax statements: " << metadata->getMaxStatements() << std::endl;
+        		std::cout << "\tmax table name length: " << metadata->getMaxTableNameLength() << std::endl;
+        		std::cout << "\tmax tables in SELECT: " << metadata->getMaxTablesInSelect() << std::endl;
+        		std::cout << "\tmax user name length: " << metadata->getMaxUserNameLength() << std::endl;
+        		std::cout << "\tNULL collation: " << metadata->getNullCollation() << std::endl;
+        		std::cout << "\tnumeric functions: " << boost::algorithm::join(metadata->getNumericFunctions(), ", ") << std::endl;
+        		std::cout << "\tOUTER JOIN support: " << metadata->getOuterJoinSupport() << std::endl;
+        		std::cout << "\tquoted identifier case: " << metadata->getQuotedIdentifierCase() << std::endl;
+        		std::cout << "\tSQL keywords: " << boost::algorithm::join(metadata->getSQLKeywords(), ",") << std::endl;
+        		std::cout << "\tschema term: " << metadata->getSchemaTerm() << std::endl;
+        		std::cout << "\tsearch escape string: " << metadata->getSearchEscapeString() << std::endl;
+        		std::cout << "\tspecial characters: " << metadata->getSpecialCharacters() << std::endl;
+        		std::cout << "\tstring functions: " << boost::algorithm::join(metadata->getStringFunctions(), ",") << std::endl;
+        		std::cout << "\tsub query support: " << metadata->getSubQuerySupport() << std::endl;
+        		std::cout << "\tsystem functions: " << boost::algorithm::join(metadata->getSystemFunctions(), ",") << std::endl;
+        		std::cout << "\ttable term: " << metadata->getTableTerm() << std::endl;
+        		std::cout << "\tUNION support: " << metadata->getUnionSupport() << std::endl;
+        		std::cout << "\tBLOB included in max row size: " << metadata->isBlobIncludedInMaxRowSize() << std::endl;
+        		std::cout << "\tcatalog at start: " << metadata->isCatalogAtStart() << std::endl;
+        		std::cout << "\tcolumn aliasing supported: " << metadata->isColumnAliasingSupported() << std::endl;
+        		std::cout << "\tLIKE escape clause supported: " << metadata->isLikeEscapeClauseSupported() << std::endl;
+        		std::cout << "\tNULL plus non NULL equals to NULL: " << metadata->isNullPlusNonNullNull() << std::endl;
+        		std::cout << "\tread-only: " << metadata->isReadOnly() << std::endl;
+        		std::cout << "\tSELECT FOR UPDATE supported: " << metadata->isSelectForUpdateSupported() << std::endl;
+        		std::cout << "\ttransaction supported: " << metadata->isTransactionSupported() << std::endl;
+        		std::cout << "\tunrelated columns in ORDER BY supported: " << metadata->isUnrelatedColumnsInOrderBySupported() << std::endl;
+
+        		client.freeMetadata(&metadata);
+        	} else {
+        		std::cerr << "Cannot get metadata:" << client.getError() << std::endl;
+        	}
+        } else if(api=="sync"){
             Drill::DrillClientError* err=NULL;
             Drill::status_t ret;
             int nQueries=0;

http://git-wip-us.apache.org/repos/asf/drill/blob/d3238b1b/contrib/native/client/readme.boost
----------------------------------------------------------------------
diff --git a/contrib/native/client/readme.boost b/contrib/native/client/readme.boost
index a6035e4..39a7bfb 100644
--- a/contrib/native/client/readme.boost
+++ b/contrib/native/client/readme.boost
@@ -34,7 +34,7 @@ $ cd $BOOST_BUILD_DIR/drill_boost_1_60_0
 # the following builds a subset of boost without icu. You may need to add more modules to include icu. 
 # bcp documentation can be found here: http://www.boost.org/doc/libs/1_60_0/tools/bcp/doc/html/index.html
 
-$ $BOOST_BUILD_DIR/boost_1_60_0/dist/bin/bcp --namespace=drill_boost --namespace-alias --boost=$BOOST_BUILD_DIR/boost_1_60_0/ shared_ptr random context chrono date_time regex system timer thread asio smart_ptr bind config build regex config assign functional multiprecision $BOOST_BUILD_DIR/drill_boost_1_60_0 
+$ $BOOST_BUILD_DIR/boost_1_60_0/dist/bin/bcp --namespace=drill_boost --namespace-alias --boost=$BOOST_BUILD_DIR/boost_1_60_0/ shared_ptr random context chrono date_time regex system timer thread asio smart_ptr bind config build regex config assign functional multiprecision algorithm $BOOST_BUILD_DIR/drill_boost_1_60_0 
 
 $ cd $BOOST_BUILD_DIR/drill_boost_1_60_0
 $ ./bootstrap.sh --prefix=$BOOST_BUILD_DIR/drill_boost_1_60_0/

http://git-wip-us.apache.org/repos/asf/drill/blob/d3238b1b/contrib/native/client/src/clientlib/drillClientImpl.cpp
----------------------------------------------------------------------
diff --git a/contrib/native/client/src/clientlib/drillClientImpl.cpp b/contrib/native/client/src/clientlib/drillClientImpl.cpp
index 4486068..808595c 100644
--- a/contrib/native/client/src/clientlib/drillClientImpl.cpp
+++ b/contrib/native/client/src/clientlib/drillClientImpl.cpp
@@ -47,7 +47,7 @@
 #include "saslAuthenticatorImpl.hpp"
 
 namespace Drill{
-
+namespace { // anonymous namespace
 static std::map<exec::shared::QueryResult_QueryState, status_t> QUERYSTATE_TO_STATUS_MAP = boost::assign::map_list_of
     (exec::shared::QueryResult_QueryState_STARTING, QRY_PENDING)
     (exec::shared::QueryResult_QueryState_RUNNING, QRY_RUNNING)
@@ -60,6 +60,13 @@ static std::string debugPrintQid(const exec::shared::QueryId& qid){
     return std::string("[")+boost::lexical_cast<std::string>(qid.part1()) +std::string(":") + boost::lexical_cast<std::string>(qid.part2())+std::string("] ");
 }
 
+// Convertion helper
+struct ToRpcType: public std::unary_function<google::protobuf::int32, exec::user::RpcType> {
+	exec::user::RpcType operator() (google::protobuf::int32 i) const {
+		return static_cast<exec::user::RpcType>(i);
+	}
+};
+}
 connectionStatus_t DrillClientImpl::connect(const char* connStr, DrillUserProperties* props){
     std::string pathToDrill, protocol, hostPortStr;
     std::string host;
@@ -319,6 +326,9 @@ void DrillClientImpl::handleHandshake(ByteBuf_t _buf,
         this->m_handshakeErrorId=b2u.errorid();
         this->m_handshakeErrorMsg=b2u.errormessage();
         this->m_serverInfos = b2u.server_infos();
+        std::transform(b2u.supported_methods().begin(), b2u.supported_methods().end(),
+        		std::back_inserter(this->m_supportedMethods),
+        		ToRpcType());
         for (int i=0; i<b2u.authenticationmechanisms_size(); i++) {
             std::string mechanism = b2u.authenticationmechanisms(i);
             boost::algorithm::to_lower(mechanism);
@@ -1324,6 +1334,44 @@ status_t DrillClientImpl::processColumnsResult(AllocatedBufferPtr allocatedBuffe
     return ret;
 }
 
+status_t DrillClientImpl::processServerMetaResult(AllocatedBufferPtr allocatedBuffer, const rpc::InBoundRpcMessage& msg ){
+    DRILL_MT_LOG(DRILL_LOG(LOG_DEBUG) << "Processing GetServerMetaResp with coordination id:" << msg.m_coord_id << std::endl;)
+    status_t ret=QRY_SUCCESS;
+
+    // make sure to deallocate buffer
+    boost::shared_ptr<AllocatedBuffer> deallocationGuard(allocatedBuffer);
+    boost::lock_guard<boost::mutex> lock(m_dcMutex);
+
+    if(msg.m_coord_id==0){
+         DRILL_MT_LOG(DRILL_LOG(LOG_TRACE) << "DrillClientImpl::processServerMetaResult: m_coord_id=0. Ignore and return QRY_SUCCESS." << std::endl;)
+        return QRY_SUCCESS;
+    }
+    std::map<int,DrillClientQueryHandle*>::const_iterator it=this->m_queryHandles.find(msg.m_coord_id);
+    if(it!=this->m_queryHandles.end()){
+        DrillClientServerMetaHandle* pHandle=static_cast<DrillClientServerMetaHandle*>((*it).second);
+        exec::user::GetServerMetaResp* resp = new exec::user::GetServerMetaResp();
+        DRILL_MT_LOG(DRILL_LOG(LOG_TRACE)  << "Received GetServerMetaResp result Handle " << msg.m_pbody.size() << std::endl;)
+        if (!(resp->ParseFromArray(msg.m_pbody.data(), msg.m_pbody.size()))) {
+            return handleQryError(QRY_COMM_ERROR, "Cannot decode GetServerMetaResp results", pHandle);
+        }
+        if (resp->status() != exec::user::OK) {
+            return handleQryError(QRY_FAILED, resp->error(), pHandle);
+        }
+        pHandle->notifyListener(&(resp->server_meta()), NULL);
+        DRILL_MT_LOG(DRILL_LOG(LOG_DEBUG) << "GetServerMetaResp result " << std::endl;)
+    }else{
+        return handleQryError(QRY_INTERNAL_ERROR, getMessage(ERR_QRY_INVQUERYID), NULL);
+    }
+    m_pendingRequests--;
+    DRILL_MT_LOG(DRILL_LOG(LOG_TRACE) << "DrillClientImpl::processServerMetaResult: " << m_pendingRequests << " requests pending." << std::endl;)
+    if(m_pendingRequests==0){
+        // signal any waiting client that it can exit because there are no more any query results to arrive.
+        // We keep the heartbeat going though.
+        m_cv.notify_one();
+    }
+    return ret;
+}
+
 DrillClientQueryResult* DrillClientImpl::findQueryResult(const exec::shared::QueryId& qid){
     DrillClientQueryResult* pDrillClientQueryResult=NULL;
     DRILL_MT_LOG(DRILL_LOG(LOG_DEBUG) << "Searching for Query Id - " << debugPrintQid(qid) << std::endl;)
@@ -1508,6 +1556,10 @@ void DrillClientImpl::handleRead(ByteBuf_t _buf,
             processSaslChallenge(allocatedBuffer, msg);
             break;
 
+        case exec::user::SERVER_META:
+        	processServerMetaResult(allocatedBuffer, msg);
+        	break;
+
         case exec::user::ACK:
             // Cancel requests will result in an ACK sent back.
             // Consume silently
@@ -1722,8 +1774,71 @@ void DrillClientImpl::shutdownSocket(){
     DRILL_MT_LOG(DRILL_LOG(LOG_TRACE) << "Socket shutdown" << std::endl;)
 }
 
+namespace { // anonymous
+
+}
+
+namespace { // anonymous
+// Helper class to wait on ServerMeta results
+struct ServerMetaContext {
+	bool m_done;
+	status_t m_status;
+	exec::user::ServerMeta m_serverMeta;
+	boost::mutex m_mutex;
+	boost::condition_variable m_cv;
+
+	static status_t listener(void* ctx, const exec::user::ServerMeta* serverMeta, DrillClientError* err) {
+		ServerMetaContext* context = static_cast<ServerMetaContext*>(ctx);
+			if (err) {
+				context->m_status = QRY_FAILURE;
+			} else {
+				context->m_status = QRY_SUCCESS;
+				context->m_serverMeta.CopyFrom(*serverMeta);
+			}
+
+			{
+				boost::lock_guard<boost::mutex> lock(context->m_mutex);
+				context->m_done = true;
+			}
+			context->m_cv.notify_one();
+			return QRY_SUCCESS;
+		}
+};
+}
+
 meta::DrillMetadata* DrillClientImpl::getMetadata() {
-    return new meta::DrillMetadata(*this);
+	DRILL_MT_LOG(DRILL_LOG(LOG_TRACE) << "Getting metadata" << std::endl;)
+	if (std::find(m_supportedMethods.begin(), m_supportedMethods.end(), exec::user::GET_SERVER_META) == m_supportedMethods.end()) {
+		DRILL_MT_LOG(DRILL_LOG(LOG_TRACE) << "Server metadata not supported " << m_supportedMethods.size() << ". Falling back to default." << std::endl;)
+		return new meta::DrillMetadata(*this, meta::DrillMetadata::s_defaultServerMeta);
+	}
+
+	DRILL_MT_LOG(DRILL_LOG(LOG_TRACE) << "Server metadata supported." << std::endl;)
+	exec::user::GetServerMetaReq req;
+	ServerMetaContext ctx;
+	boost::function<DrillClientServerMetaHandle*(int32_t)> factory = boost::bind(
+	            boost::factory<DrillClientServerMetaHandle*>(),
+	            boost::ref(*this),
+	            _1,
+				ServerMetaContext::listener,
+	            &ctx);
+	// Getting a query handle, and make sure to free when done
+	boost::shared_ptr<DrillClientServerMetaHandle> handle = boost::shared_ptr<DrillClientServerMetaHandle>(
+					sendMsg(factory, exec::user::GET_SERVER_META, req),
+					boost::bind(&DrillClientImpl::freeQueryResources, this, _1));
+	{
+		boost::unique_lock<boost::mutex> lock(ctx.m_mutex);
+		while(!ctx.m_done) {
+			ctx.m_cv.wait(lock);
+		}
+	}
+
+	DRILL_MT_LOG(DRILL_LOG(LOG_TRACE) << "Server metadata received." << std::endl;)
+	if (ctx.m_status != QRY_SUCCESS) {
+		return NULL;
+	}
+	return new meta::DrillMetadata(*this, ctx.m_serverMeta);
+
 }
 
 void DrillClientImpl::freeMetadata(meta::DrillMetadata* metadata) {

http://git-wip-us.apache.org/repos/asf/drill/blob/d3238b1b/contrib/native/client/src/clientlib/drillClientImpl.hpp
----------------------------------------------------------------------
diff --git a/contrib/native/client/src/clientlib/drillClientImpl.hpp b/contrib/native/client/src/clientlib/drillClientImpl.hpp
index 262edc9..5eb850d 100644
--- a/contrib/native/client/src/clientlib/drillClientImpl.hpp
+++ b/contrib/native/client/src/clientlib/drillClientImpl.hpp
@@ -201,7 +201,7 @@ class DrillClientQueryResult: public DrillClientBaseHandle<pfnQueryResultsListen
         m_pSchemaListener(NULL) {
     };
 
-    ~DrillClientQueryResult(){
+    virtual ~DrillClientQueryResult(){
         this->clearAndDestroy();
     };
 
@@ -307,6 +307,18 @@ class DrillClientPrepareHandle: public DrillClientBaseHandle<pfnPreparedStatemen
     ::exec::user::PreparedStatementHandle m_preparedStatementHandle;
 };
 
+typedef status_t (*pfnServerMetaListener)(void* ctx, const exec::user::ServerMeta* serverMeta, DrillClientError* err);
+class DrillClientServerMetaHandle: public DrillClientBaseHandle<pfnServerMetaListener, const exec::user::ServerMeta*> {
+    public:
+	DrillClientServerMetaHandle(DrillClientImpl& client, int32_t coordId, pfnServerMetaListener listener, void* listenerCtx):
+    	DrillClientBaseHandle<pfnServerMetaListener, const exec::user::ServerMeta*>(client, coordId, "server meta", listener, listenerCtx) {
+    };
+
+    private:
+    friend class DrillClientImpl;
+
+};
+
 template<typename Listener, typename MetaType, typename MetaImpl, typename MetadataResult>
 class DrillClientMetadataResult: public DrillClientBaseHandle<Listener, const DrillCollection<MetaType>*> {
 public:
@@ -364,6 +376,7 @@ class DrillClientImpl : public DrillClientImplBase{
             m_handshakeStatus(exec::user::SUCCESS),
             m_bIsConnected(false),
             m_saslAuthenticator(NULL),
+    		m_saslResultCode(SASL_OK),
             m_saslDone(false),
             m_pendingRequests(0),
             m_pError(NULL),
@@ -487,6 +500,7 @@ class DrillClientImpl : public DrillClientImplBase{
         status_t processSchemasResult(AllocatedBufferPtr allocatedBuffer, const rpc::InBoundRpcMessage& msg );
         status_t processTablesResult(AllocatedBufferPtr allocatedBuffer, const rpc::InBoundRpcMessage& msg );
         status_t processColumnsResult(AllocatedBufferPtr allocatedBuffer, const rpc::InBoundRpcMessage& msg );
+        status_t processServerMetaResult(AllocatedBufferPtr allocatedBuffer, const rpc::InBoundRpcMessage& msg );
         DrillClientQueryResult* findQueryResult(const exec::shared::QueryId& qid);
         status_t processQueryStatusResult( exec::shared::QueryResult* qr,
                 DrillClientQueryResult* pDrillClientQueryResult);
@@ -533,6 +547,7 @@ class DrillClientImpl : public DrillClientImplBase{
         std::string m_handshakeErrorId;
         std::string m_handshakeErrorMsg;
         exec::user::RpcEndpointInfos m_serverInfos;
+        std::vector<exec::user::RpcType> m_supportedMethods;
         bool m_bIsConnected;
 
         std::vector<std::string> m_serverAuthMechanisms;


[12/27] drill git commit: DRILL-5301: Server metadata API

Posted by jn...@apache.org.
http://git-wip-us.apache.org/repos/asf/drill/blob/d2e0f415/protocol/src/main/java/org/apache/drill/exec/proto/beans/CollateSupport.java
----------------------------------------------------------------------
diff --git a/protocol/src/main/java/org/apache/drill/exec/proto/beans/CollateSupport.java b/protocol/src/main/java/org/apache/drill/exec/proto/beans/CollateSupport.java
new file mode 100644
index 0000000..599bf86
--- /dev/null
+++ b/protocol/src/main/java/org/apache/drill/exec/proto/beans/CollateSupport.java
@@ -0,0 +1,49 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+// Generated by http://code.google.com/p/protostuff/ ... DO NOT EDIT!
+// Generated from protobuf
+
+package org.apache.drill.exec.proto.beans;
+
+public enum CollateSupport implements com.dyuproject.protostuff.EnumLite<CollateSupport>
+{
+    CS_UNKNOWN(0),
+    CS_GROUP_BY(1);
+    
+    public final int number;
+    
+    private CollateSupport (int number)
+    {
+        this.number = number;
+    }
+    
+    public int getNumber()
+    {
+        return number;
+    }
+    
+    public static CollateSupport valueOf(int number)
+    {
+        switch(number) 
+        {
+            case 0: return CS_UNKNOWN;
+            case 1: return CS_GROUP_BY;
+            default: return null;
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/drill/blob/d2e0f415/protocol/src/main/java/org/apache/drill/exec/proto/beans/ConvertSupport.java
----------------------------------------------------------------------
diff --git a/protocol/src/main/java/org/apache/drill/exec/proto/beans/ConvertSupport.java b/protocol/src/main/java/org/apache/drill/exec/proto/beans/ConvertSupport.java
new file mode 100644
index 0000000..1c2396c
--- /dev/null
+++ b/protocol/src/main/java/org/apache/drill/exec/proto/beans/ConvertSupport.java
@@ -0,0 +1,199 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+// Generated by http://code.google.com/p/protostuff/ ... DO NOT EDIT!
+// Generated from protobuf
+
+package org.apache.drill.exec.proto.beans;
+
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+
+import com.dyuproject.protostuff.GraphIOUtil;
+import com.dyuproject.protostuff.Input;
+import com.dyuproject.protostuff.Message;
+import com.dyuproject.protostuff.Output;
+import com.dyuproject.protostuff.Schema;
+import com.dyuproject.protostuff.UninitializedMessageException;
+
+public final class ConvertSupport implements Externalizable, Message<ConvertSupport>, Schema<ConvertSupport>
+{
+
+    public static Schema<ConvertSupport> getSchema()
+    {
+        return DEFAULT_INSTANCE;
+    }
+
+    public static ConvertSupport getDefaultInstance()
+    {
+        return DEFAULT_INSTANCE;
+    }
+
+    static final ConvertSupport DEFAULT_INSTANCE = new ConvertSupport();
+
+    
+    private org.apache.drill.common.types.MinorType from;
+    private org.apache.drill.common.types.MinorType to;
+
+    public ConvertSupport()
+    {
+        
+    }
+
+    public ConvertSupport(
+        org.apache.drill.common.types.MinorType from,
+        org.apache.drill.common.types.MinorType to
+    )
+    {
+        this.from = from;
+        this.to = to;
+    }
+
+    // getters and setters
+
+    // from
+
+    public org.apache.drill.common.types.MinorType getFrom()
+    {
+        return from;
+    }
+
+    public ConvertSupport setFrom(org.apache.drill.common.types.MinorType from)
+    {
+        this.from = from;
+        return this;
+    }
+
+    // to
+
+    public org.apache.drill.common.types.MinorType getTo()
+    {
+        return to;
+    }
+
+    public ConvertSupport setTo(org.apache.drill.common.types.MinorType to)
+    {
+        this.to = to;
+        return this;
+    }
+
+    // java serialization
+
+    public void readExternal(ObjectInput in) throws IOException
+    {
+        GraphIOUtil.mergeDelimitedFrom(in, this, this);
+    }
+
+    public void writeExternal(ObjectOutput out) throws IOException
+    {
+        GraphIOUtil.writeDelimitedTo(out, this, this);
+    }
+
+    // message method
+
+    public Schema<ConvertSupport> cachedSchema()
+    {
+        return DEFAULT_INSTANCE;
+    }
+
+    // schema methods
+
+    public ConvertSupport newMessage()
+    {
+        return new ConvertSupport();
+    }
+
+    public Class<ConvertSupport> typeClass()
+    {
+        return ConvertSupport.class;
+    }
+
+    public String messageName()
+    {
+        return ConvertSupport.class.getSimpleName();
+    }
+
+    public String messageFullName()
+    {
+        return ConvertSupport.class.getName();
+    }
+
+    public boolean isInitialized(ConvertSupport message)
+    {
+        return 
+            message.from != null 
+            && message.to != null;
+    }
+
+    public void mergeFrom(Input input, ConvertSupport message) throws IOException
+    {
+        for(int number = input.readFieldNumber(this);; number = input.readFieldNumber(this))
+        {
+            switch(number)
+            {
+                case 0:
+                    return;
+                case 1:
+                    message.from = org.apache.drill.common.types.MinorType.valueOf(input.readEnum());
+                    break;
+                case 2:
+                    message.to = org.apache.drill.common.types.MinorType.valueOf(input.readEnum());
+                    break;
+                default:
+                    input.handleUnknownField(number, this);
+            }   
+        }
+    }
+
+
+    public void writeTo(Output output, ConvertSupport message) throws IOException
+    {
+        if(message.from == null)
+            throw new UninitializedMessageException(message);
+        output.writeEnum(1, message.from.number, false);
+
+        if(message.to == null)
+            throw new UninitializedMessageException(message);
+        output.writeEnum(2, message.to.number, false);
+    }
+
+    public String getFieldName(int number)
+    {
+        switch(number)
+        {
+            case 1: return "from";
+            case 2: return "to";
+            default: return null;
+        }
+    }
+
+    public int getFieldNumber(String name)
+    {
+        final Integer number = __fieldMap.get(name);
+        return number == null ? 0 : number.intValue();
+    }
+
+    private static final java.util.HashMap<String,Integer> __fieldMap = new java.util.HashMap<String,Integer>();
+    static
+    {
+        __fieldMap.put("from", 1);
+        __fieldMap.put("to", 2);
+    }
+    
+}

http://git-wip-us.apache.org/repos/asf/drill/blob/d2e0f415/protocol/src/main/java/org/apache/drill/exec/proto/beans/CorrelationNamesSupport.java
----------------------------------------------------------------------
diff --git a/protocol/src/main/java/org/apache/drill/exec/proto/beans/CorrelationNamesSupport.java b/protocol/src/main/java/org/apache/drill/exec/proto/beans/CorrelationNamesSupport.java
new file mode 100644
index 0000000..faf46c1
--- /dev/null
+++ b/protocol/src/main/java/org/apache/drill/exec/proto/beans/CorrelationNamesSupport.java
@@ -0,0 +1,51 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+// Generated by http://code.google.com/p/protostuff/ ... DO NOT EDIT!
+// Generated from protobuf
+
+package org.apache.drill.exec.proto.beans;
+
+public enum CorrelationNamesSupport implements com.dyuproject.protostuff.EnumLite<CorrelationNamesSupport>
+{
+    CN_NONE(1),
+    CN_DIFFERENT_NAMES(2),
+    CN_ANY(3);
+    
+    public final int number;
+    
+    private CorrelationNamesSupport (int number)
+    {
+        this.number = number;
+    }
+    
+    public int getNumber()
+    {
+        return number;
+    }
+    
+    public static CorrelationNamesSupport valueOf(int number)
+    {
+        switch(number) 
+        {
+            case 1: return CN_NONE;
+            case 2: return CN_DIFFERENT_NAMES;
+            case 3: return CN_ANY;
+            default: return null;
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/drill/blob/d2e0f415/protocol/src/main/java/org/apache/drill/exec/proto/beans/DateTimeLiteralsSupport.java
----------------------------------------------------------------------
diff --git a/protocol/src/main/java/org/apache/drill/exec/proto/beans/DateTimeLiteralsSupport.java b/protocol/src/main/java/org/apache/drill/exec/proto/beans/DateTimeLiteralsSupport.java
new file mode 100644
index 0000000..a2330ed
--- /dev/null
+++ b/protocol/src/main/java/org/apache/drill/exec/proto/beans/DateTimeLiteralsSupport.java
@@ -0,0 +1,79 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+// Generated by http://code.google.com/p/protostuff/ ... DO NOT EDIT!
+// Generated from protobuf
+
+package org.apache.drill.exec.proto.beans;
+
+public enum DateTimeLiteralsSupport implements com.dyuproject.protostuff.EnumLite<DateTimeLiteralsSupport>
+{
+    DL_UNKNOWN(0),
+    DL_DATE(1),
+    DL_TIME(2),
+    DL_TIMESTAMP(3),
+    DL_INTERVAL_YEAR(4),
+    DL_INTERVAL_MONTH(5),
+    DL_INTERVAL_DAY(6),
+    DL_INTERVAL_HOUR(7),
+    DL_INTERVAL_MINUTE(8),
+    DL_INTERVAL_SECOND(9),
+    DL_INTERVAL_YEAR_TO_MONTH(10),
+    DL_INTERVAL_DAY_TO_HOUR(11),
+    DL_INTERVAL_DAY_TO_MINUTE(12),
+    DL_INTERVAL_DAY_TO_SECOND(13),
+    DL_INTERVAL_HOUR_TO_MINUTE(14),
+    DL_INTERVAL_HOUR_TO_SECOND(15),
+    DL_INTERVAL_MINUTE_TO_SECOND(16);
+    
+    public final int number;
+    
+    private DateTimeLiteralsSupport (int number)
+    {
+        this.number = number;
+    }
+    
+    public int getNumber()
+    {
+        return number;
+    }
+    
+    public static DateTimeLiteralsSupport valueOf(int number)
+    {
+        switch(number) 
+        {
+            case 0: return DL_UNKNOWN;
+            case 1: return DL_DATE;
+            case 2: return DL_TIME;
+            case 3: return DL_TIMESTAMP;
+            case 4: return DL_INTERVAL_YEAR;
+            case 5: return DL_INTERVAL_MONTH;
+            case 6: return DL_INTERVAL_DAY;
+            case 7: return DL_INTERVAL_HOUR;
+            case 8: return DL_INTERVAL_MINUTE;
+            case 9: return DL_INTERVAL_SECOND;
+            case 10: return DL_INTERVAL_YEAR_TO_MONTH;
+            case 11: return DL_INTERVAL_DAY_TO_HOUR;
+            case 12: return DL_INTERVAL_DAY_TO_MINUTE;
+            case 13: return DL_INTERVAL_DAY_TO_SECOND;
+            case 14: return DL_INTERVAL_HOUR_TO_MINUTE;
+            case 15: return DL_INTERVAL_HOUR_TO_SECOND;
+            case 16: return DL_INTERVAL_MINUTE_TO_SECOND;
+            default: return null;
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/drill/blob/d2e0f415/protocol/src/main/java/org/apache/drill/exec/proto/beans/GetServerMetaResp.java
----------------------------------------------------------------------
diff --git a/protocol/src/main/java/org/apache/drill/exec/proto/beans/GetServerMetaResp.java b/protocol/src/main/java/org/apache/drill/exec/proto/beans/GetServerMetaResp.java
new file mode 100644
index 0000000..32c84db
--- /dev/null
+++ b/protocol/src/main/java/org/apache/drill/exec/proto/beans/GetServerMetaResp.java
@@ -0,0 +1,211 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+// Generated by http://code.google.com/p/protostuff/ ... DO NOT EDIT!
+// Generated from protobuf
+
+package org.apache.drill.exec.proto.beans;
+
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+
+import com.dyuproject.protostuff.GraphIOUtil;
+import com.dyuproject.protostuff.Input;
+import com.dyuproject.protostuff.Message;
+import com.dyuproject.protostuff.Output;
+import com.dyuproject.protostuff.Schema;
+
+public final class GetServerMetaResp implements Externalizable, Message<GetServerMetaResp>, Schema<GetServerMetaResp>
+{
+
+    public static Schema<GetServerMetaResp> getSchema()
+    {
+        return DEFAULT_INSTANCE;
+    }
+
+    public static GetServerMetaResp getDefaultInstance()
+    {
+        return DEFAULT_INSTANCE;
+    }
+
+    static final GetServerMetaResp DEFAULT_INSTANCE = new GetServerMetaResp();
+
+    
+    private RequestStatus status;
+    private ServerMeta serverMeta;
+    private DrillPBError error;
+
+    public GetServerMetaResp()
+    {
+        
+    }
+
+    // getters and setters
+
+    // status
+
+    public RequestStatus getStatus()
+    {
+        return status == null ? RequestStatus.UNKNOWN_STATUS : status;
+    }
+
+    public GetServerMetaResp setStatus(RequestStatus status)
+    {
+        this.status = status;
+        return this;
+    }
+
+    // serverMeta
+
+    public ServerMeta getServerMeta()
+    {
+        return serverMeta;
+    }
+
+    public GetServerMetaResp setServerMeta(ServerMeta serverMeta)
+    {
+        this.serverMeta = serverMeta;
+        return this;
+    }
+
+    // error
+
+    public DrillPBError getError()
+    {
+        return error;
+    }
+
+    public GetServerMetaResp setError(DrillPBError error)
+    {
+        this.error = error;
+        return this;
+    }
+
+    // java serialization
+
+    public void readExternal(ObjectInput in) throws IOException
+    {
+        GraphIOUtil.mergeDelimitedFrom(in, this, this);
+    }
+
+    public void writeExternal(ObjectOutput out) throws IOException
+    {
+        GraphIOUtil.writeDelimitedTo(out, this, this);
+    }
+
+    // message method
+
+    public Schema<GetServerMetaResp> cachedSchema()
+    {
+        return DEFAULT_INSTANCE;
+    }
+
+    // schema methods
+
+    public GetServerMetaResp newMessage()
+    {
+        return new GetServerMetaResp();
+    }
+
+    public Class<GetServerMetaResp> typeClass()
+    {
+        return GetServerMetaResp.class;
+    }
+
+    public String messageName()
+    {
+        return GetServerMetaResp.class.getSimpleName();
+    }
+
+    public String messageFullName()
+    {
+        return GetServerMetaResp.class.getName();
+    }
+
+    public boolean isInitialized(GetServerMetaResp message)
+    {
+        return true;
+    }
+
+    public void mergeFrom(Input input, GetServerMetaResp message) throws IOException
+    {
+        for(int number = input.readFieldNumber(this);; number = input.readFieldNumber(this))
+        {
+            switch(number)
+            {
+                case 0:
+                    return;
+                case 1:
+                    message.status = RequestStatus.valueOf(input.readEnum());
+                    break;
+                case 2:
+                    message.serverMeta = input.mergeObject(message.serverMeta, ServerMeta.getSchema());
+                    break;
+
+                case 3:
+                    message.error = input.mergeObject(message.error, DrillPBError.getSchema());
+                    break;
+
+                default:
+                    input.handleUnknownField(number, this);
+            }   
+        }
+    }
+
+
+    public void writeTo(Output output, GetServerMetaResp message) throws IOException
+    {
+        if(message.status != null)
+             output.writeEnum(1, message.status.number, false);
+
+        if(message.serverMeta != null)
+             output.writeObject(2, message.serverMeta, ServerMeta.getSchema(), false);
+
+
+        if(message.error != null)
+             output.writeObject(3, message.error, DrillPBError.getSchema(), false);
+
+    }
+
+    public String getFieldName(int number)
+    {
+        switch(number)
+        {
+            case 1: return "status";
+            case 2: return "serverMeta";
+            case 3: return "error";
+            default: return null;
+        }
+    }
+
+    public int getFieldNumber(String name)
+    {
+        final Integer number = __fieldMap.get(name);
+        return number == null ? 0 : number.intValue();
+    }
+
+    private static final java.util.HashMap<String,Integer> __fieldMap = new java.util.HashMap<String,Integer>();
+    static
+    {
+        __fieldMap.put("status", 1);
+        __fieldMap.put("serverMeta", 2);
+        __fieldMap.put("error", 3);
+    }
+    
+}

http://git-wip-us.apache.org/repos/asf/drill/blob/d2e0f415/protocol/src/main/java/org/apache/drill/exec/proto/beans/GroupBySupport.java
----------------------------------------------------------------------
diff --git a/protocol/src/main/java/org/apache/drill/exec/proto/beans/GroupBySupport.java b/protocol/src/main/java/org/apache/drill/exec/proto/beans/GroupBySupport.java
new file mode 100644
index 0000000..3b4b79e
--- /dev/null
+++ b/protocol/src/main/java/org/apache/drill/exec/proto/beans/GroupBySupport.java
@@ -0,0 +1,53 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+// Generated by http://code.google.com/p/protostuff/ ... DO NOT EDIT!
+// Generated from protobuf
+
+package org.apache.drill.exec.proto.beans;
+
+public enum GroupBySupport implements com.dyuproject.protostuff.EnumLite<GroupBySupport>
+{
+    GB_NONE(1),
+    GB_SELECT_ONLY(2),
+    GB_BEYOND_SELECT(3),
+    GB_UNRELATED(4);
+    
+    public final int number;
+    
+    private GroupBySupport (int number)
+    {
+        this.number = number;
+    }
+    
+    public int getNumber()
+    {
+        return number;
+    }
+    
+    public static GroupBySupport valueOf(int number)
+    {
+        switch(number) 
+        {
+            case 1: return GB_NONE;
+            case 2: return GB_SELECT_ONLY;
+            case 3: return GB_BEYOND_SELECT;
+            case 4: return GB_UNRELATED;
+            default: return null;
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/drill/blob/d2e0f415/protocol/src/main/java/org/apache/drill/exec/proto/beans/IdentifierCasing.java
----------------------------------------------------------------------
diff --git a/protocol/src/main/java/org/apache/drill/exec/proto/beans/IdentifierCasing.java b/protocol/src/main/java/org/apache/drill/exec/proto/beans/IdentifierCasing.java
new file mode 100644
index 0000000..d991cd6
--- /dev/null
+++ b/protocol/src/main/java/org/apache/drill/exec/proto/beans/IdentifierCasing.java
@@ -0,0 +1,55 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+// Generated by http://code.google.com/p/protostuff/ ... DO NOT EDIT!
+// Generated from protobuf
+
+package org.apache.drill.exec.proto.beans;
+
+public enum IdentifierCasing implements com.dyuproject.protostuff.EnumLite<IdentifierCasing>
+{
+    IC_UNKNOWN(0),
+    IC_STORES_LOWER(1),
+    IC_STORES_MIXED(2),
+    IC_STORES_UPPER(3),
+    IC_SUPPORTS_MIXED(4);
+    
+    public final int number;
+    
+    private IdentifierCasing (int number)
+    {
+        this.number = number;
+    }
+    
+    public int getNumber()
+    {
+        return number;
+    }
+    
+    public static IdentifierCasing valueOf(int number)
+    {
+        switch(number) 
+        {
+            case 0: return IC_UNKNOWN;
+            case 1: return IC_STORES_LOWER;
+            case 2: return IC_STORES_MIXED;
+            case 3: return IC_STORES_UPPER;
+            case 4: return IC_SUPPORTS_MIXED;
+            default: return null;
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/drill/blob/d2e0f415/protocol/src/main/java/org/apache/drill/exec/proto/beans/NullCollation.java
----------------------------------------------------------------------
diff --git a/protocol/src/main/java/org/apache/drill/exec/proto/beans/NullCollation.java b/protocol/src/main/java/org/apache/drill/exec/proto/beans/NullCollation.java
new file mode 100644
index 0000000..62a164a
--- /dev/null
+++ b/protocol/src/main/java/org/apache/drill/exec/proto/beans/NullCollation.java
@@ -0,0 +1,55 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+// Generated by http://code.google.com/p/protostuff/ ... DO NOT EDIT!
+// Generated from protobuf
+
+package org.apache.drill.exec.proto.beans;
+
+public enum NullCollation implements com.dyuproject.protostuff.EnumLite<NullCollation>
+{
+    NC_UNKNOWN(0),
+    NC_AT_START(1),
+    NC_AT_END(2),
+    NC_HIGH(3),
+    NC_LOW(4);
+    
+    public final int number;
+    
+    private NullCollation (int number)
+    {
+        this.number = number;
+    }
+    
+    public int getNumber()
+    {
+        return number;
+    }
+    
+    public static NullCollation valueOf(int number)
+    {
+        switch(number) 
+        {
+            case 0: return NC_UNKNOWN;
+            case 1: return NC_AT_START;
+            case 2: return NC_AT_END;
+            case 3: return NC_HIGH;
+            case 4: return NC_LOW;
+            default: return null;
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/drill/blob/d2e0f415/protocol/src/main/java/org/apache/drill/exec/proto/beans/OrderBySupport.java
----------------------------------------------------------------------
diff --git a/protocol/src/main/java/org/apache/drill/exec/proto/beans/OrderBySupport.java b/protocol/src/main/java/org/apache/drill/exec/proto/beans/OrderBySupport.java
new file mode 100644
index 0000000..5174d8c
--- /dev/null
+++ b/protocol/src/main/java/org/apache/drill/exec/proto/beans/OrderBySupport.java
@@ -0,0 +1,51 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+// Generated by http://code.google.com/p/protostuff/ ... DO NOT EDIT!
+// Generated from protobuf
+
+package org.apache.drill.exec.proto.beans;
+
+public enum OrderBySupport implements com.dyuproject.protostuff.EnumLite<OrderBySupport>
+{
+    OB_UNKNOWN(0),
+    OB_UNRELATED(1),
+    OB_EXPRESSION(2);
+    
+    public final int number;
+    
+    private OrderBySupport (int number)
+    {
+        this.number = number;
+    }
+    
+    public int getNumber()
+    {
+        return number;
+    }
+    
+    public static OrderBySupport valueOf(int number)
+    {
+        switch(number) 
+        {
+            case 0: return OB_UNKNOWN;
+            case 1: return OB_UNRELATED;
+            case 2: return OB_EXPRESSION;
+            default: return null;
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/drill/blob/d2e0f415/protocol/src/main/java/org/apache/drill/exec/proto/beans/OuterJoinSupport.java
----------------------------------------------------------------------
diff --git a/protocol/src/main/java/org/apache/drill/exec/proto/beans/OuterJoinSupport.java b/protocol/src/main/java/org/apache/drill/exec/proto/beans/OuterJoinSupport.java
new file mode 100644
index 0000000..3620416
--- /dev/null
+++ b/protocol/src/main/java/org/apache/drill/exec/proto/beans/OuterJoinSupport.java
@@ -0,0 +1,61 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+// Generated by http://code.google.com/p/protostuff/ ... DO NOT EDIT!
+// Generated from protobuf
+
+package org.apache.drill.exec.proto.beans;
+
+public enum OuterJoinSupport implements com.dyuproject.protostuff.EnumLite<OuterJoinSupport>
+{
+    OJ_UNKNOWN(0),
+    OJ_LEFT(1),
+    OJ_RIGHT(2),
+    OJ_FULL(3),
+    OJ_NESTED(4),
+    OJ_NOT_ORDERED(5),
+    OJ_INNER(6),
+    OJ_ALL_COMPARISON_OPS(7);
+    
+    public final int number;
+    
+    private OuterJoinSupport (int number)
+    {
+        this.number = number;
+    }
+    
+    public int getNumber()
+    {
+        return number;
+    }
+    
+    public static OuterJoinSupport valueOf(int number)
+    {
+        switch(number) 
+        {
+            case 0: return OJ_UNKNOWN;
+            case 1: return OJ_LEFT;
+            case 2: return OJ_RIGHT;
+            case 3: return OJ_FULL;
+            case 4: return OJ_NESTED;
+            case 5: return OJ_NOT_ORDERED;
+            case 6: return OJ_INNER;
+            case 7: return OJ_ALL_COMPARISON_OPS;
+            default: return null;
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/drill/blob/d2e0f415/protocol/src/main/java/org/apache/drill/exec/proto/beans/RpcType.java
----------------------------------------------------------------------
diff --git a/protocol/src/main/java/org/apache/drill/exec/proto/beans/RpcType.java b/protocol/src/main/java/org/apache/drill/exec/proto/beans/RpcType.java
index ae74bcd..8357088 100644
--- a/protocol/src/main/java/org/apache/drill/exec/proto/beans/RpcType.java
+++ b/protocol/src/main/java/org/apache/drill/exec/proto/beans/RpcType.java
@@ -35,6 +35,7 @@ public enum RpcType implements com.dyuproject.protostuff.EnumLite<RpcType>
     GET_TABLES(16),
     GET_COLUMNS(17),
     CREATE_PREPARED_STATEMENT(22),
+    GET_SERVER_META(8),
     QUERY_DATA(6),
     QUERY_HANDLE(7),
     QUERY_PLAN_FRAGMENTS(13),
@@ -43,8 +44,7 @@ public enum RpcType implements com.dyuproject.protostuff.EnumLite<RpcType>
     TABLES(20),
     COLUMNS(21),
     PREPARED_STATEMENT(23),
-    REQ_META_FUNCTIONS(8),
-    RESP_FUNCTION_LIST(9),
+    SERVER_META(9),
     QUERY_RESULT(10),
     SASL_MESSAGE(24);
     
@@ -72,8 +72,8 @@ public enum RpcType implements com.dyuproject.protostuff.EnumLite<RpcType>
             case 5: return REQUEST_RESULTS;
             case 6: return QUERY_DATA;
             case 7: return QUERY_HANDLE;
-            case 8: return REQ_META_FUNCTIONS;
-            case 9: return RESP_FUNCTION_LIST;
+            case 8: return GET_SERVER_META;
+            case 9: return SERVER_META;
             case 10: return QUERY_RESULT;
             case 11: return RESUME_PAUSED_QUERY;
             case 12: return GET_QUERY_PLAN_FRAGMENTS;

http://git-wip-us.apache.org/repos/asf/drill/blob/d2e0f415/protocol/src/main/java/org/apache/drill/exec/proto/beans/ServerMeta.java
----------------------------------------------------------------------
diff --git a/protocol/src/main/java/org/apache/drill/exec/proto/beans/ServerMeta.java b/protocol/src/main/java/org/apache/drill/exec/proto/beans/ServerMeta.java
new file mode 100644
index 0000000..0a7f020
--- /dev/null
+++ b/protocol/src/main/java/org/apache/drill/exec/proto/beans/ServerMeta.java
@@ -0,0 +1,1319 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+// Generated by http://code.google.com/p/protostuff/ ... DO NOT EDIT!
+// Generated from protobuf
+
+package org.apache.drill.exec.proto.beans;
+
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import java.util.ArrayList;
+import java.util.List;
+
+import com.dyuproject.protostuff.GraphIOUtil;
+import com.dyuproject.protostuff.Input;
+import com.dyuproject.protostuff.Message;
+import com.dyuproject.protostuff.Output;
+import com.dyuproject.protostuff.Schema;
+
+public final class ServerMeta implements Externalizable, Message<ServerMeta>, Schema<ServerMeta>
+{
+
+    public static Schema<ServerMeta> getSchema()
+    {
+        return DEFAULT_INSTANCE;
+    }
+
+    public static ServerMeta getDefaultInstance()
+    {
+        return DEFAULT_INSTANCE;
+    }
+
+    static final ServerMeta DEFAULT_INSTANCE = new ServerMeta();
+
+    
+    private Boolean allTablesSelectable;
+    private Boolean blobIncludedInMaxRowSize;
+    private Boolean catalogAtStart;
+    private String catalogSeparator;
+    private String catalogTerm;
+    private List<CollateSupport> collateSupport;
+    private Boolean columnAliasingSupported;
+    private List<ConvertSupport> convertSupport;
+    private CorrelationNamesSupport correlationNamesSupport;
+    private List<String> dateTimeFunctions;
+    private List<DateTimeLiteralsSupport> dateTimeLiteralsSupport;
+    private GroupBySupport groupBySupport;
+    private IdentifierCasing identifierCasing;
+    private String identifierQuoteString;
+    private Boolean likeEscapeClauseSupported;
+    private int maxBinaryLiteralLength;
+    private int maxCatalogNameLength;
+    private int maxCharLiteralLength;
+    private int maxColumnNameLength;
+    private int maxColumnsInGroupBy;
+    private int maxColumnsInOrderBy;
+    private int maxColumnsInSelect;
+    private int maxCursorNameLength;
+    private int maxLogicalLobSize;
+    private int maxRowSize;
+    private int maxSchemaNameLength;
+    private int maxStatementLength;
+    private int maxStatements;
+    private int maxTableNameLength;
+    private int maxTablesInSelect;
+    private int maxUserNameLength;
+    private NullCollation nullCollation;
+    private Boolean nullPlusNonNullEqualsNull;
+    private List<String> numericFunctions;
+    private List<OrderBySupport> orderBySupport;
+    private List<OuterJoinSupport> outerJoinSupport;
+    private IdentifierCasing quotedIdentifierCasing;
+    private Boolean readOnly;
+    private String schemaTerm;
+    private String searchEscapeString;
+    private Boolean selectForUpdateSupported;
+    private String specialCharacters;
+    private List<String> sqlKeywords;
+    private List<String> stringFunctions;
+    private List<SubQuerySupport> subquerySupport;
+    private List<String> systemFunctions;
+    private String tableTerm;
+    private Boolean transactionSupported;
+    private List<UnionSupport> unionSupport;
+
+    public ServerMeta()
+    {
+        
+    }
+
+    // getters and setters
+
+    // allTablesSelectable
+
+    public Boolean getAllTablesSelectable()
+    {
+        return allTablesSelectable;
+    }
+
+    public ServerMeta setAllTablesSelectable(Boolean allTablesSelectable)
+    {
+        this.allTablesSelectable = allTablesSelectable;
+        return this;
+    }
+
+    // blobIncludedInMaxRowSize
+
+    public Boolean getBlobIncludedInMaxRowSize()
+    {
+        return blobIncludedInMaxRowSize;
+    }
+
+    public ServerMeta setBlobIncludedInMaxRowSize(Boolean blobIncludedInMaxRowSize)
+    {
+        this.blobIncludedInMaxRowSize = blobIncludedInMaxRowSize;
+        return this;
+    }
+
+    // catalogAtStart
+
+    public Boolean getCatalogAtStart()
+    {
+        return catalogAtStart;
+    }
+
+    public ServerMeta setCatalogAtStart(Boolean catalogAtStart)
+    {
+        this.catalogAtStart = catalogAtStart;
+        return this;
+    }
+
+    // catalogSeparator
+
+    public String getCatalogSeparator()
+    {
+        return catalogSeparator;
+    }
+
+    public ServerMeta setCatalogSeparator(String catalogSeparator)
+    {
+        this.catalogSeparator = catalogSeparator;
+        return this;
+    }
+
+    // catalogTerm
+
+    public String getCatalogTerm()
+    {
+        return catalogTerm;
+    }
+
+    public ServerMeta setCatalogTerm(String catalogTerm)
+    {
+        this.catalogTerm = catalogTerm;
+        return this;
+    }
+
+    // collateSupport
+
+    public List<CollateSupport> getCollateSupportList()
+    {
+        return collateSupport;
+    }
+
+    public ServerMeta setCollateSupportList(List<CollateSupport> collateSupport)
+    {
+        this.collateSupport = collateSupport;
+        return this;
+    }
+
+    // columnAliasingSupported
+
+    public Boolean getColumnAliasingSupported()
+    {
+        return columnAliasingSupported;
+    }
+
+    public ServerMeta setColumnAliasingSupported(Boolean columnAliasingSupported)
+    {
+        this.columnAliasingSupported = columnAliasingSupported;
+        return this;
+    }
+
+    // convertSupport
+
+    public List<ConvertSupport> getConvertSupportList()
+    {
+        return convertSupport;
+    }
+
+    public ServerMeta setConvertSupportList(List<ConvertSupport> convertSupport)
+    {
+        this.convertSupport = convertSupport;
+        return this;
+    }
+
+    // correlationNamesSupport
+
+    public CorrelationNamesSupport getCorrelationNamesSupport()
+    {
+        return correlationNamesSupport == null ? CorrelationNamesSupport.CN_NONE : correlationNamesSupport;
+    }
+
+    public ServerMeta setCorrelationNamesSupport(CorrelationNamesSupport correlationNamesSupport)
+    {
+        this.correlationNamesSupport = correlationNamesSupport;
+        return this;
+    }
+
+    // dateTimeFunctions
+
+    public List<String> getDateTimeFunctionsList()
+    {
+        return dateTimeFunctions;
+    }
+
+    public ServerMeta setDateTimeFunctionsList(List<String> dateTimeFunctions)
+    {
+        this.dateTimeFunctions = dateTimeFunctions;
+        return this;
+    }
+
+    // dateTimeLiteralsSupport
+
+    public List<DateTimeLiteralsSupport> getDateTimeLiteralsSupportList()
+    {
+        return dateTimeLiteralsSupport;
+    }
+
+    public ServerMeta setDateTimeLiteralsSupportList(List<DateTimeLiteralsSupport> dateTimeLiteralsSupport)
+    {
+        this.dateTimeLiteralsSupport = dateTimeLiteralsSupport;
+        return this;
+    }
+
+    // groupBySupport
+
+    public GroupBySupport getGroupBySupport()
+    {
+        return groupBySupport == null ? GroupBySupport.GB_NONE : groupBySupport;
+    }
+
+    public ServerMeta setGroupBySupport(GroupBySupport groupBySupport)
+    {
+        this.groupBySupport = groupBySupport;
+        return this;
+    }
+
+    // identifierCasing
+
+    public IdentifierCasing getIdentifierCasing()
+    {
+        return identifierCasing == null ? IdentifierCasing.IC_UNKNOWN : identifierCasing;
+    }
+
+    public ServerMeta setIdentifierCasing(IdentifierCasing identifierCasing)
+    {
+        this.identifierCasing = identifierCasing;
+        return this;
+    }
+
+    // identifierQuoteString
+
+    public String getIdentifierQuoteString()
+    {
+        return identifierQuoteString;
+    }
+
+    public ServerMeta setIdentifierQuoteString(String identifierQuoteString)
+    {
+        this.identifierQuoteString = identifierQuoteString;
+        return this;
+    }
+
+    // likeEscapeClauseSupported
+
+    public Boolean getLikeEscapeClauseSupported()
+    {
+        return likeEscapeClauseSupported;
+    }
+
+    public ServerMeta setLikeEscapeClauseSupported(Boolean likeEscapeClauseSupported)
+    {
+        this.likeEscapeClauseSupported = likeEscapeClauseSupported;
+        return this;
+    }
+
+    // maxBinaryLiteralLength
+
+    public int getMaxBinaryLiteralLength()
+    {
+        return maxBinaryLiteralLength;
+    }
+
+    public ServerMeta setMaxBinaryLiteralLength(int maxBinaryLiteralLength)
+    {
+        this.maxBinaryLiteralLength = maxBinaryLiteralLength;
+        return this;
+    }
+
+    // maxCatalogNameLength
+
+    public int getMaxCatalogNameLength()
+    {
+        return maxCatalogNameLength;
+    }
+
+    public ServerMeta setMaxCatalogNameLength(int maxCatalogNameLength)
+    {
+        this.maxCatalogNameLength = maxCatalogNameLength;
+        return this;
+    }
+
+    // maxCharLiteralLength
+
+    public int getMaxCharLiteralLength()
+    {
+        return maxCharLiteralLength;
+    }
+
+    public ServerMeta setMaxCharLiteralLength(int maxCharLiteralLength)
+    {
+        this.maxCharLiteralLength = maxCharLiteralLength;
+        return this;
+    }
+
+    // maxColumnNameLength
+
+    public int getMaxColumnNameLength()
+    {
+        return maxColumnNameLength;
+    }
+
+    public ServerMeta setMaxColumnNameLength(int maxColumnNameLength)
+    {
+        this.maxColumnNameLength = maxColumnNameLength;
+        return this;
+    }
+
+    // maxColumnsInGroupBy
+
+    public int getMaxColumnsInGroupBy()
+    {
+        return maxColumnsInGroupBy;
+    }
+
+    public ServerMeta setMaxColumnsInGroupBy(int maxColumnsInGroupBy)
+    {
+        this.maxColumnsInGroupBy = maxColumnsInGroupBy;
+        return this;
+    }
+
+    // maxColumnsInOrderBy
+
+    public int getMaxColumnsInOrderBy()
+    {
+        return maxColumnsInOrderBy;
+    }
+
+    public ServerMeta setMaxColumnsInOrderBy(int maxColumnsInOrderBy)
+    {
+        this.maxColumnsInOrderBy = maxColumnsInOrderBy;
+        return this;
+    }
+
+    // maxColumnsInSelect
+
+    public int getMaxColumnsInSelect()
+    {
+        return maxColumnsInSelect;
+    }
+
+    public ServerMeta setMaxColumnsInSelect(int maxColumnsInSelect)
+    {
+        this.maxColumnsInSelect = maxColumnsInSelect;
+        return this;
+    }
+
+    // maxCursorNameLength
+
+    public int getMaxCursorNameLength()
+    {
+        return maxCursorNameLength;
+    }
+
+    public ServerMeta setMaxCursorNameLength(int maxCursorNameLength)
+    {
+        this.maxCursorNameLength = maxCursorNameLength;
+        return this;
+    }
+
+    // maxLogicalLobSize
+
+    public int getMaxLogicalLobSize()
+    {
+        return maxLogicalLobSize;
+    }
+
+    public ServerMeta setMaxLogicalLobSize(int maxLogicalLobSize)
+    {
+        this.maxLogicalLobSize = maxLogicalLobSize;
+        return this;
+    }
+
+    // maxRowSize
+
+    public int getMaxRowSize()
+    {
+        return maxRowSize;
+    }
+
+    public ServerMeta setMaxRowSize(int maxRowSize)
+    {
+        this.maxRowSize = maxRowSize;
+        return this;
+    }
+
+    // maxSchemaNameLength
+
+    public int getMaxSchemaNameLength()
+    {
+        return maxSchemaNameLength;
+    }
+
+    public ServerMeta setMaxSchemaNameLength(int maxSchemaNameLength)
+    {
+        this.maxSchemaNameLength = maxSchemaNameLength;
+        return this;
+    }
+
+    // maxStatementLength
+
+    public int getMaxStatementLength()
+    {
+        return maxStatementLength;
+    }
+
+    public ServerMeta setMaxStatementLength(int maxStatementLength)
+    {
+        this.maxStatementLength = maxStatementLength;
+        return this;
+    }
+
+    // maxStatements
+
+    public int getMaxStatements()
+    {
+        return maxStatements;
+    }
+
+    public ServerMeta setMaxStatements(int maxStatements)
+    {
+        this.maxStatements = maxStatements;
+        return this;
+    }
+
+    // maxTableNameLength
+
+    public int getMaxTableNameLength()
+    {
+        return maxTableNameLength;
+    }
+
+    public ServerMeta setMaxTableNameLength(int maxTableNameLength)
+    {
+        this.maxTableNameLength = maxTableNameLength;
+        return this;
+    }
+
+    // maxTablesInSelect
+
+    public int getMaxTablesInSelect()
+    {
+        return maxTablesInSelect;
+    }
+
+    public ServerMeta setMaxTablesInSelect(int maxTablesInSelect)
+    {
+        this.maxTablesInSelect = maxTablesInSelect;
+        return this;
+    }
+
+    // maxUserNameLength
+
+    public int getMaxUserNameLength()
+    {
+        return maxUserNameLength;
+    }
+
+    public ServerMeta setMaxUserNameLength(int maxUserNameLength)
+    {
+        this.maxUserNameLength = maxUserNameLength;
+        return this;
+    }
+
+    // nullCollation
+
+    public NullCollation getNullCollation()
+    {
+        return nullCollation == null ? NullCollation.NC_UNKNOWN : nullCollation;
+    }
+
+    public ServerMeta setNullCollation(NullCollation nullCollation)
+    {
+        this.nullCollation = nullCollation;
+        return this;
+    }
+
+    // nullPlusNonNullEqualsNull
+
+    public Boolean getNullPlusNonNullEqualsNull()
+    {
+        return nullPlusNonNullEqualsNull;
+    }
+
+    public ServerMeta setNullPlusNonNullEqualsNull(Boolean nullPlusNonNullEqualsNull)
+    {
+        this.nullPlusNonNullEqualsNull = nullPlusNonNullEqualsNull;
+        return this;
+    }
+
+    // numericFunctions
+
+    public List<String> getNumericFunctionsList()
+    {
+        return numericFunctions;
+    }
+
+    public ServerMeta setNumericFunctionsList(List<String> numericFunctions)
+    {
+        this.numericFunctions = numericFunctions;
+        return this;
+    }
+
+    // orderBySupport
+
+    public List<OrderBySupport> getOrderBySupportList()
+    {
+        return orderBySupport;
+    }
+
+    public ServerMeta setOrderBySupportList(List<OrderBySupport> orderBySupport)
+    {
+        this.orderBySupport = orderBySupport;
+        return this;
+    }
+
+    // outerJoinSupport
+
+    public List<OuterJoinSupport> getOuterJoinSupportList()
+    {
+        return outerJoinSupport;
+    }
+
+    public ServerMeta setOuterJoinSupportList(List<OuterJoinSupport> outerJoinSupport)
+    {
+        this.outerJoinSupport = outerJoinSupport;
+        return this;
+    }
+
+    // quotedIdentifierCasing
+
+    public IdentifierCasing getQuotedIdentifierCasing()
+    {
+        return quotedIdentifierCasing == null ? IdentifierCasing.IC_UNKNOWN : quotedIdentifierCasing;
+    }
+
+    public ServerMeta setQuotedIdentifierCasing(IdentifierCasing quotedIdentifierCasing)
+    {
+        this.quotedIdentifierCasing = quotedIdentifierCasing;
+        return this;
+    }
+
+    // readOnly
+
+    public Boolean getReadOnly()
+    {
+        return readOnly;
+    }
+
+    public ServerMeta setReadOnly(Boolean readOnly)
+    {
+        this.readOnly = readOnly;
+        return this;
+    }
+
+    // schemaTerm
+
+    public String getSchemaTerm()
+    {
+        return schemaTerm;
+    }
+
+    public ServerMeta setSchemaTerm(String schemaTerm)
+    {
+        this.schemaTerm = schemaTerm;
+        return this;
+    }
+
+    // searchEscapeString
+
+    public String getSearchEscapeString()
+    {
+        return searchEscapeString;
+    }
+
+    public ServerMeta setSearchEscapeString(String searchEscapeString)
+    {
+        this.searchEscapeString = searchEscapeString;
+        return this;
+    }
+
+    // selectForUpdateSupported
+
+    public Boolean getSelectForUpdateSupported()
+    {
+        return selectForUpdateSupported;
+    }
+
+    public ServerMeta setSelectForUpdateSupported(Boolean selectForUpdateSupported)
+    {
+        this.selectForUpdateSupported = selectForUpdateSupported;
+        return this;
+    }
+
+    // specialCharacters
+
+    public String getSpecialCharacters()
+    {
+        return specialCharacters;
+    }
+
+    public ServerMeta setSpecialCharacters(String specialCharacters)
+    {
+        this.specialCharacters = specialCharacters;
+        return this;
+    }
+
+    // sqlKeywords
+
+    public List<String> getSqlKeywordsList()
+    {
+        return sqlKeywords;
+    }
+
+    public ServerMeta setSqlKeywordsList(List<String> sqlKeywords)
+    {
+        this.sqlKeywords = sqlKeywords;
+        return this;
+    }
+
+    // stringFunctions
+
+    public List<String> getStringFunctionsList()
+    {
+        return stringFunctions;
+    }
+
+    public ServerMeta setStringFunctionsList(List<String> stringFunctions)
+    {
+        this.stringFunctions = stringFunctions;
+        return this;
+    }
+
+    // subquerySupport
+
+    public List<SubQuerySupport> getSubquerySupportList()
+    {
+        return subquerySupport;
+    }
+
+    public ServerMeta setSubquerySupportList(List<SubQuerySupport> subquerySupport)
+    {
+        this.subquerySupport = subquerySupport;
+        return this;
+    }
+
+    // systemFunctions
+
+    public List<String> getSystemFunctionsList()
+    {
+        return systemFunctions;
+    }
+
+    public ServerMeta setSystemFunctionsList(List<String> systemFunctions)
+    {
+        this.systemFunctions = systemFunctions;
+        return this;
+    }
+
+    // tableTerm
+
+    public String getTableTerm()
+    {
+        return tableTerm;
+    }
+
+    public ServerMeta setTableTerm(String tableTerm)
+    {
+        this.tableTerm = tableTerm;
+        return this;
+    }
+
+    // transactionSupported
+
+    public Boolean getTransactionSupported()
+    {
+        return transactionSupported;
+    }
+
+    public ServerMeta setTransactionSupported(Boolean transactionSupported)
+    {
+        this.transactionSupported = transactionSupported;
+        return this;
+    }
+
+    // unionSupport
+
+    public List<UnionSupport> getUnionSupportList()
+    {
+        return unionSupport;
+    }
+
+    public ServerMeta setUnionSupportList(List<UnionSupport> unionSupport)
+    {
+        this.unionSupport = unionSupport;
+        return this;
+    }
+
+    // java serialization
+
+    public void readExternal(ObjectInput in) throws IOException
+    {
+        GraphIOUtil.mergeDelimitedFrom(in, this, this);
+    }
+
+    public void writeExternal(ObjectOutput out) throws IOException
+    {
+        GraphIOUtil.writeDelimitedTo(out, this, this);
+    }
+
+    // message method
+
+    public Schema<ServerMeta> cachedSchema()
+    {
+        return DEFAULT_INSTANCE;
+    }
+
+    // schema methods
+
+    public ServerMeta newMessage()
+    {
+        return new ServerMeta();
+    }
+
+    public Class<ServerMeta> typeClass()
+    {
+        return ServerMeta.class;
+    }
+
+    public String messageName()
+    {
+        return ServerMeta.class.getSimpleName();
+    }
+
+    public String messageFullName()
+    {
+        return ServerMeta.class.getName();
+    }
+
+    public boolean isInitialized(ServerMeta message)
+    {
+        return true;
+    }
+
+    public void mergeFrom(Input input, ServerMeta message) throws IOException
+    {
+        for(int number = input.readFieldNumber(this);; number = input.readFieldNumber(this))
+        {
+            switch(number)
+            {
+                case 0:
+                    return;
+                case 1:
+                    message.allTablesSelectable = input.readBool();
+                    break;
+                case 2:
+                    message.blobIncludedInMaxRowSize = input.readBool();
+                    break;
+                case 3:
+                    message.catalogAtStart = input.readBool();
+                    break;
+                case 4:
+                    message.catalogSeparator = input.readString();
+                    break;
+                case 5:
+                    message.catalogTerm = input.readString();
+                    break;
+                case 6:
+                    if(message.collateSupport == null)
+                        message.collateSupport = new ArrayList<CollateSupport>();
+                    message.collateSupport.add(CollateSupport.valueOf(input.readEnum()));
+                    break;
+                case 7:
+                    message.columnAliasingSupported = input.readBool();
+                    break;
+                case 8:
+                    if(message.convertSupport == null)
+                        message.convertSupport = new ArrayList<ConvertSupport>();
+                    message.convertSupport.add(input.mergeObject(null, ConvertSupport.getSchema()));
+                    break;
+
+                case 9:
+                    message.correlationNamesSupport = CorrelationNamesSupport.valueOf(input.readEnum());
+                    break;
+                case 10:
+                    if(message.dateTimeFunctions == null)
+                        message.dateTimeFunctions = new ArrayList<String>();
+                    message.dateTimeFunctions.add(input.readString());
+                    break;
+                case 11:
+                    if(message.dateTimeLiteralsSupport == null)
+                        message.dateTimeLiteralsSupport = new ArrayList<DateTimeLiteralsSupport>();
+                    message.dateTimeLiteralsSupport.add(DateTimeLiteralsSupport.valueOf(input.readEnum()));
+                    break;
+                case 12:
+                    message.groupBySupport = GroupBySupport.valueOf(input.readEnum());
+                    break;
+                case 13:
+                    message.identifierCasing = IdentifierCasing.valueOf(input.readEnum());
+                    break;
+                case 14:
+                    message.identifierQuoteString = input.readString();
+                    break;
+                case 15:
+                    message.likeEscapeClauseSupported = input.readBool();
+                    break;
+                case 16:
+                    message.maxBinaryLiteralLength = input.readUInt32();
+                    break;
+                case 17:
+                    message.maxCatalogNameLength = input.readUInt32();
+                    break;
+                case 18:
+                    message.maxCharLiteralLength = input.readUInt32();
+                    break;
+                case 19:
+                    message.maxColumnNameLength = input.readUInt32();
+                    break;
+                case 20:
+                    message.maxColumnsInGroupBy = input.readUInt32();
+                    break;
+                case 21:
+                    message.maxColumnsInOrderBy = input.readUInt32();
+                    break;
+                case 22:
+                    message.maxColumnsInSelect = input.readUInt32();
+                    break;
+                case 23:
+                    message.maxCursorNameLength = input.readUInt32();
+                    break;
+                case 24:
+                    message.maxLogicalLobSize = input.readUInt32();
+                    break;
+                case 25:
+                    message.maxRowSize = input.readUInt32();
+                    break;
+                case 26:
+                    message.maxSchemaNameLength = input.readUInt32();
+                    break;
+                case 27:
+                    message.maxStatementLength = input.readUInt32();
+                    break;
+                case 28:
+                    message.maxStatements = input.readUInt32();
+                    break;
+                case 29:
+                    message.maxTableNameLength = input.readUInt32();
+                    break;
+                case 30:
+                    message.maxTablesInSelect = input.readUInt32();
+                    break;
+                case 31:
+                    message.maxUserNameLength = input.readUInt32();
+                    break;
+                case 32:
+                    message.nullCollation = NullCollation.valueOf(input.readEnum());
+                    break;
+                case 33:
+                    message.nullPlusNonNullEqualsNull = input.readBool();
+                    break;
+                case 34:
+                    if(message.numericFunctions == null)
+                        message.numericFunctions = new ArrayList<String>();
+                    message.numericFunctions.add(input.readString());
+                    break;
+                case 35:
+                    if(message.orderBySupport == null)
+                        message.orderBySupport = new ArrayList<OrderBySupport>();
+                    message.orderBySupport.add(OrderBySupport.valueOf(input.readEnum()));
+                    break;
+                case 36:
+                    if(message.outerJoinSupport == null)
+                        message.outerJoinSupport = new ArrayList<OuterJoinSupport>();
+                    message.outerJoinSupport.add(OuterJoinSupport.valueOf(input.readEnum()));
+                    break;
+                case 37:
+                    message.quotedIdentifierCasing = IdentifierCasing.valueOf(input.readEnum());
+                    break;
+                case 38:
+                    message.readOnly = input.readBool();
+                    break;
+                case 39:
+                    message.schemaTerm = input.readString();
+                    break;
+                case 40:
+                    message.searchEscapeString = input.readString();
+                    break;
+                case 41:
+                    message.selectForUpdateSupported = input.readBool();
+                    break;
+                case 42:
+                    message.specialCharacters = input.readString();
+                    break;
+                case 43:
+                    if(message.sqlKeywords == null)
+                        message.sqlKeywords = new ArrayList<String>();
+                    message.sqlKeywords.add(input.readString());
+                    break;
+                case 44:
+                    if(message.stringFunctions == null)
+                        message.stringFunctions = new ArrayList<String>();
+                    message.stringFunctions.add(input.readString());
+                    break;
+                case 45:
+                    if(message.subquerySupport == null)
+                        message.subquerySupport = new ArrayList<SubQuerySupport>();
+                    message.subquerySupport.add(SubQuerySupport.valueOf(input.readEnum()));
+                    break;
+                case 46:
+                    if(message.systemFunctions == null)
+                        message.systemFunctions = new ArrayList<String>();
+                    message.systemFunctions.add(input.readString());
+                    break;
+                case 47:
+                    message.tableTerm = input.readString();
+                    break;
+                case 48:
+                    message.transactionSupported = input.readBool();
+                    break;
+                case 49:
+                    if(message.unionSupport == null)
+                        message.unionSupport = new ArrayList<UnionSupport>();
+                    message.unionSupport.add(UnionSupport.valueOf(input.readEnum()));
+                    break;
+                default:
+                    input.handleUnknownField(number, this);
+            }   
+        }
+    }
+
+
+    public void writeTo(Output output, ServerMeta message) throws IOException
+    {
+        if(message.allTablesSelectable != null)
+            output.writeBool(1, message.allTablesSelectable, false);
+
+        if(message.blobIncludedInMaxRowSize != null)
+            output.writeBool(2, message.blobIncludedInMaxRowSize, false);
+
+        if(message.catalogAtStart != null)
+            output.writeBool(3, message.catalogAtStart, false);
+
+        if(message.catalogSeparator != null)
+            output.writeString(4, message.catalogSeparator, false);
+
+        if(message.catalogTerm != null)
+            output.writeString(5, message.catalogTerm, false);
+
+        if(message.collateSupport != null)
+        {
+            for(CollateSupport collateSupport : message.collateSupport)
+            {
+                if(collateSupport != null)
+                    output.writeEnum(6, collateSupport.number, true);
+            }
+        }
+
+        if(message.columnAliasingSupported != null)
+            output.writeBool(7, message.columnAliasingSupported, false);
+
+        if(message.convertSupport != null)
+        {
+            for(ConvertSupport convertSupport : message.convertSupport)
+            {
+                if(convertSupport != null)
+                    output.writeObject(8, convertSupport, ConvertSupport.getSchema(), true);
+            }
+        }
+
+
+        if(message.correlationNamesSupport != null)
+             output.writeEnum(9, message.correlationNamesSupport.number, false);
+
+        if(message.dateTimeFunctions != null)
+        {
+            for(String dateTimeFunctions : message.dateTimeFunctions)
+            {
+                if(dateTimeFunctions != null)
+                    output.writeString(10, dateTimeFunctions, true);
+            }
+        }
+
+        if(message.dateTimeLiteralsSupport != null)
+        {
+            for(DateTimeLiteralsSupport dateTimeLiteralsSupport : message.dateTimeLiteralsSupport)
+            {
+                if(dateTimeLiteralsSupport != null)
+                    output.writeEnum(11, dateTimeLiteralsSupport.number, true);
+            }
+        }
+
+        if(message.groupBySupport != null)
+             output.writeEnum(12, message.groupBySupport.number, false);
+
+        if(message.identifierCasing != null)
+             output.writeEnum(13, message.identifierCasing.number, false);
+
+        if(message.identifierQuoteString != null)
+            output.writeString(14, message.identifierQuoteString, false);
+
+        if(message.likeEscapeClauseSupported != null)
+            output.writeBool(15, message.likeEscapeClauseSupported, false);
+
+        if(message.maxBinaryLiteralLength != 0)
+            output.writeUInt32(16, message.maxBinaryLiteralLength, false);
+
+        if(message.maxCatalogNameLength != 0)
+            output.writeUInt32(17, message.maxCatalogNameLength, false);
+
+        if(message.maxCharLiteralLength != 0)
+            output.writeUInt32(18, message.maxCharLiteralLength, false);
+
+        if(message.maxColumnNameLength != 0)
+            output.writeUInt32(19, message.maxColumnNameLength, false);
+
+        if(message.maxColumnsInGroupBy != 0)
+            output.writeUInt32(20, message.maxColumnsInGroupBy, false);
+
+        if(message.maxColumnsInOrderBy != 0)
+            output.writeUInt32(21, message.maxColumnsInOrderBy, false);
+
+        if(message.maxColumnsInSelect != 0)
+            output.writeUInt32(22, message.maxColumnsInSelect, false);
+
+        if(message.maxCursorNameLength != 0)
+            output.writeUInt32(23, message.maxCursorNameLength, false);
+
+        if(message.maxLogicalLobSize != 0)
+            output.writeUInt32(24, message.maxLogicalLobSize, false);
+
+        if(message.maxRowSize != 0)
+            output.writeUInt32(25, message.maxRowSize, false);
+
+        if(message.maxSchemaNameLength != 0)
+            output.writeUInt32(26, message.maxSchemaNameLength, false);
+
+        if(message.maxStatementLength != 0)
+            output.writeUInt32(27, message.maxStatementLength, false);
+
+        if(message.maxStatements != 0)
+            output.writeUInt32(28, message.maxStatements, false);
+
+        if(message.maxTableNameLength != 0)
+            output.writeUInt32(29, message.maxTableNameLength, false);
+
+        if(message.maxTablesInSelect != 0)
+            output.writeUInt32(30, message.maxTablesInSelect, false);
+
+        if(message.maxUserNameLength != 0)
+            output.writeUInt32(31, message.maxUserNameLength, false);
+
+        if(message.nullCollation != null)
+             output.writeEnum(32, message.nullCollation.number, false);
+
+        if(message.nullPlusNonNullEqualsNull != null)
+            output.writeBool(33, message.nullPlusNonNullEqualsNull, false);
+
+        if(message.numericFunctions != null)
+        {
+            for(String numericFunctions : message.numericFunctions)
+            {
+                if(numericFunctions != null)
+                    output.writeString(34, numericFunctions, true);
+            }
+        }
+
+        if(message.orderBySupport != null)
+        {
+            for(OrderBySupport orderBySupport : message.orderBySupport)
+            {
+                if(orderBySupport != null)
+                    output.writeEnum(35, orderBySupport.number, true);
+            }
+        }
+
+        if(message.outerJoinSupport != null)
+        {
+            for(OuterJoinSupport outerJoinSupport : message.outerJoinSupport)
+            {
+                if(outerJoinSupport != null)
+                    output.writeEnum(36, outerJoinSupport.number, true);
+            }
+        }
+
+        if(message.quotedIdentifierCasing != null)
+             output.writeEnum(37, message.quotedIdentifierCasing.number, false);
+
+        if(message.readOnly != null)
+            output.writeBool(38, message.readOnly, false);
+
+        if(message.schemaTerm != null)
+            output.writeString(39, message.schemaTerm, false);
+
+        if(message.searchEscapeString != null)
+            output.writeString(40, message.searchEscapeString, false);
+
+        if(message.selectForUpdateSupported != null)
+            output.writeBool(41, message.selectForUpdateSupported, false);
+
+        if(message.specialCharacters != null)
+            output.writeString(42, message.specialCharacters, false);
+
+        if(message.sqlKeywords != null)
+        {
+            for(String sqlKeywords : message.sqlKeywords)
+            {
+                if(sqlKeywords != null)
+                    output.writeString(43, sqlKeywords, true);
+            }
+        }
+
+        if(message.stringFunctions != null)
+        {
+            for(String stringFunctions : message.stringFunctions)
+            {
+                if(stringFunctions != null)
+                    output.writeString(44, stringFunctions, true);
+            }
+        }
+
+        if(message.subquerySupport != null)
+        {
+            for(SubQuerySupport subquerySupport : message.subquerySupport)
+            {
+                if(subquerySupport != null)
+                    output.writeEnum(45, subquerySupport.number, true);
+            }
+        }
+
+        if(message.systemFunctions != null)
+        {
+            for(String systemFunctions : message.systemFunctions)
+            {
+                if(systemFunctions != null)
+                    output.writeString(46, systemFunctions, true);
+            }
+        }
+
+        if(message.tableTerm != null)
+            output.writeString(47, message.tableTerm, false);
+
+        if(message.transactionSupported != null)
+            output.writeBool(48, message.transactionSupported, false);
+
+        if(message.unionSupport != null)
+        {
+            for(UnionSupport unionSupport : message.unionSupport)
+            {
+                if(unionSupport != null)
+                    output.writeEnum(49, unionSupport.number, true);
+            }
+        }
+    }
+
+    public String getFieldName(int number)
+    {
+        switch(number)
+        {
+            case 1: return "allTablesSelectable";
+            case 2: return "blobIncludedInMaxRowSize";
+            case 3: return "catalogAtStart";
+            case 4: return "catalogSeparator";
+            case 5: return "catalogTerm";
+            case 6: return "collateSupport";
+            case 7: return "columnAliasingSupported";
+            case 8: return "convertSupport";
+            case 9: return "correlationNamesSupport";
+            case 10: return "dateTimeFunctions";
+            case 11: return "dateTimeLiteralsSupport";
+            case 12: return "groupBySupport";
+            case 13: return "identifierCasing";
+            case 14: return "identifierQuoteString";
+            case 15: return "likeEscapeClauseSupported";
+            case 16: return "maxBinaryLiteralLength";
+            case 17: return "maxCatalogNameLength";
+            case 18: return "maxCharLiteralLength";
+            case 19: return "maxColumnNameLength";
+            case 20: return "maxColumnsInGroupBy";
+            case 21: return "maxColumnsInOrderBy";
+            case 22: return "maxColumnsInSelect";
+            case 23: return "maxCursorNameLength";
+            case 24: return "maxLogicalLobSize";
+            case 25: return "maxRowSize";
+            case 26: return "maxSchemaNameLength";
+            case 27: return "maxStatementLength";
+            case 28: return "maxStatements";
+            case 29: return "maxTableNameLength";
+            case 30: return "maxTablesInSelect";
+            case 31: return "maxUserNameLength";
+            case 32: return "nullCollation";
+            case 33: return "nullPlusNonNullEqualsNull";
+            case 34: return "numericFunctions";
+            case 35: return "orderBySupport";
+            case 36: return "outerJoinSupport";
+            case 37: return "quotedIdentifierCasing";
+            case 38: return "readOnly";
+            case 39: return "schemaTerm";
+            case 40: return "searchEscapeString";
+            case 41: return "selectForUpdateSupported";
+            case 42: return "specialCharacters";
+            case 43: return "sqlKeywords";
+            case 44: return "stringFunctions";
+            case 45: return "subquerySupport";
+            case 46: return "systemFunctions";
+            case 47: return "tableTerm";
+            case 48: return "transactionSupported";
+            case 49: return "unionSupport";
+            default: return null;
+        }
+    }
+
+    public int getFieldNumber(String name)
+    {
+        final Integer number = __fieldMap.get(name);
+        return number == null ? 0 : number.intValue();
+    }
+
+    private static final java.util.HashMap<String,Integer> __fieldMap = new java.util.HashMap<String,Integer>();
+    static
+    {
+        __fieldMap.put("allTablesSelectable", 1);
+        __fieldMap.put("blobIncludedInMaxRowSize", 2);
+        __fieldMap.put("catalogAtStart", 3);
+        __fieldMap.put("catalogSeparator", 4);
+        __fieldMap.put("catalogTerm", 5);
+        __fieldMap.put("collateSupport", 6);
+        __fieldMap.put("columnAliasingSupported", 7);
+        __fieldMap.put("convertSupport", 8);
+        __fieldMap.put("correlationNamesSupport", 9);
+        __fieldMap.put("dateTimeFunctions", 10);
+        __fieldMap.put("dateTimeLiteralsSupport", 11);
+        __fieldMap.put("groupBySupport", 12);
+        __fieldMap.put("identifierCasing", 13);
+        __fieldMap.put("identifierQuoteString", 14);
+        __fieldMap.put("likeEscapeClauseSupported", 15);
+        __fieldMap.put("maxBinaryLiteralLength", 16);
+        __fieldMap.put("maxCatalogNameLength", 17);
+        __fieldMap.put("maxCharLiteralLength", 18);
+        __fieldMap.put("maxColumnNameLength", 19);
+        __fieldMap.put("maxColumnsInGroupBy", 20);
+        __fieldMap.put("maxColumnsInOrderBy", 21);
+        __fieldMap.put("maxColumnsInSelect", 22);
+        __fieldMap.put("maxCursorNameLength", 23);
+        __fieldMap.put("maxLogicalLobSize", 24);
+        __fieldMap.put("maxRowSize", 25);
+        __fieldMap.put("maxSchemaNameLength", 26);
+        __fieldMap.put("maxStatementLength", 27);
+        __fieldMap.put("maxStatements", 28);
+        __fieldMap.put("maxTableNameLength", 29);
+        __fieldMap.put("maxTablesInSelect", 30);
+        __fieldMap.put("maxUserNameLength", 31);
+        __fieldMap.put("nullCollation", 32);
+        __fieldMap.put("nullPlusNonNullEqualsNull", 33);
+        __fieldMap.put("numericFunctions", 34);
+        __fieldMap.put("orderBySupport", 35);
+        __fieldMap.put("outerJoinSupport", 36);
+        __fieldMap.put("quotedIdentifierCasing", 37);
+        __fieldMap.put("readOnly", 38);
+        __fieldMap.put("schemaTerm", 39);
+        __fieldMap.put("searchEscapeString", 40);
+        __fieldMap.put("selectForUpdateSupported", 41);
+        __fieldMap.put("specialCharacters", 42);
+        __fieldMap.put("sqlKeywords", 43);
+        __fieldMap.put("stringFunctions", 44);
+        __fieldMap.put("subquerySupport", 45);
+        __fieldMap.put("systemFunctions", 46);
+        __fieldMap.put("tableTerm", 47);
+        __fieldMap.put("transactionSupported", 48);
+        __fieldMap.put("unionSupport", 49);
+    }
+    
+}

http://git-wip-us.apache.org/repos/asf/drill/blob/d2e0f415/protocol/src/main/java/org/apache/drill/exec/proto/beans/SubQuerySupport.java
----------------------------------------------------------------------
diff --git a/protocol/src/main/java/org/apache/drill/exec/proto/beans/SubQuerySupport.java b/protocol/src/main/java/org/apache/drill/exec/proto/beans/SubQuerySupport.java
new file mode 100644
index 0000000..28d6ff5
--- /dev/null
+++ b/protocol/src/main/java/org/apache/drill/exec/proto/beans/SubQuerySupport.java
@@ -0,0 +1,57 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+// Generated by http://code.google.com/p/protostuff/ ... DO NOT EDIT!
+// Generated from protobuf
+
+package org.apache.drill.exec.proto.beans;
+
+public enum SubQuerySupport implements com.dyuproject.protostuff.EnumLite<SubQuerySupport>
+{
+    SQ_UNKNOWN(0),
+    SQ_CORRELATED(1),
+    SQ_IN_COMPARISON(2),
+    SQ_IN_EXISTS(3),
+    SQ_IN_INSERT(4),
+    SQ_IN_QUANTIFIED(5);
+    
+    public final int number;
+    
+    private SubQuerySupport (int number)
+    {
+        this.number = number;
+    }
+    
+    public int getNumber()
+    {
+        return number;
+    }
+    
+    public static SubQuerySupport valueOf(int number)
+    {
+        switch(number) 
+        {
+            case 0: return SQ_UNKNOWN;
+            case 1: return SQ_CORRELATED;
+            case 2: return SQ_IN_COMPARISON;
+            case 3: return SQ_IN_EXISTS;
+            case 4: return SQ_IN_INSERT;
+            case 5: return SQ_IN_QUANTIFIED;
+            default: return null;
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/drill/blob/d2e0f415/protocol/src/main/java/org/apache/drill/exec/proto/beans/UnionSupport.java
----------------------------------------------------------------------
diff --git a/protocol/src/main/java/org/apache/drill/exec/proto/beans/UnionSupport.java b/protocol/src/main/java/org/apache/drill/exec/proto/beans/UnionSupport.java
new file mode 100644
index 0000000..aaeec0d
--- /dev/null
+++ b/protocol/src/main/java/org/apache/drill/exec/proto/beans/UnionSupport.java
@@ -0,0 +1,51 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+// Generated by http://code.google.com/p/protostuff/ ... DO NOT EDIT!
+// Generated from protobuf
+
+package org.apache.drill.exec.proto.beans;
+
+public enum UnionSupport implements com.dyuproject.protostuff.EnumLite<UnionSupport>
+{
+    U_UNKNOWN(0),
+    U_UNION(1),
+    U_UNION_ALL(2);
+    
+    public final int number;
+    
+    private UnionSupport (int number)
+    {
+        this.number = number;
+    }
+    
+    public int getNumber()
+    {
+        return number;
+    }
+    
+    public static UnionSupport valueOf(int number)
+    {
+        switch(number) 
+        {
+            case 0: return U_UNKNOWN;
+            case 1: return U_UNION;
+            case 2: return U_UNION_ALL;
+            default: return null;
+        }
+    }
+}


[13/27] drill git commit: DRILL-5301: Server metadata API

Posted by jn...@apache.org.
http://git-wip-us.apache.org/repos/asf/drill/blob/d2e0f415/protocol/src/main/java/org/apache/drill/exec/proto/UserProtos.java
----------------------------------------------------------------------
diff --git a/protocol/src/main/java/org/apache/drill/exec/proto/UserProtos.java b/protocol/src/main/java/org/apache/drill/exec/proto/UserProtos.java
index daa3903..aa12f96 100644
--- a/protocol/src/main/java/org/apache/drill/exec/proto/UserProtos.java
+++ b/protocol/src/main/java/org/apache/drill/exec/proto/UserProtos.java
@@ -123,17 +123,25 @@ public final class UserProtos {
      */
     CREATE_PREPARED_STATEMENT(12, 22),
     /**
+     * <code>GET_SERVER_META = 8;</code>
+     *
+     * <pre>
+     * user is sending a request to receive server metadata
+     * </pre>
+     */
+    GET_SERVER_META(13, 8),
+    /**
      * <code>QUERY_DATA = 6;</code>
      *
      * <pre>
      * bit to user
      * </pre>
      */
-    QUERY_DATA(13, 6),
+    QUERY_DATA(14, 6),
     /**
      * <code>QUERY_HANDLE = 7;</code>
      */
-    QUERY_HANDLE(14, 7),
+    QUERY_HANDLE(15, 7),
     /**
      * <code>QUERY_PLAN_FRAGMENTS = 13;</code>
      *
@@ -141,7 +149,7 @@ public final class UserProtos {
      * return plan fragments
      * </pre>
      */
-    QUERY_PLAN_FRAGMENTS(15, 13),
+    QUERY_PLAN_FRAGMENTS(16, 13),
     /**
      * <code>CATALOGS = 18;</code>
      *
@@ -149,7 +157,7 @@ public final class UserProtos {
      * return catalogs metadata in response to GET_CATALOGS
      * </pre>
      */
-    CATALOGS(16, 18),
+    CATALOGS(17, 18),
     /**
      * <code>SCHEMAS = 19;</code>
      *
@@ -157,7 +165,7 @@ public final class UserProtos {
      * return schema metadata in response to GET_SCHEMAS
      * </pre>
      */
-    SCHEMAS(17, 19),
+    SCHEMAS(18, 19),
     /**
      * <code>TABLES = 20;</code>
      *
@@ -165,7 +173,7 @@ public final class UserProtos {
      * return table metadata in response to GET_TABLES
      * </pre>
      */
-    TABLES(18, 20),
+    TABLES(19, 20),
     /**
      * <code>COLUMNS = 21;</code>
      *
@@ -173,7 +181,7 @@ public final class UserProtos {
      * return column metadata in response to GET_COLUMNS
      * </pre>
      */
-    COLUMNS(19, 21),
+    COLUMNS(20, 21),
     /**
      * <code>PREPARED_STATEMENT = 23;</code>
      *
@@ -181,15 +189,15 @@ public final class UserProtos {
      * return preparated statement in response to CREATE_PREPARED_STATEMENT
      * </pre>
      */
-    PREPARED_STATEMENT(20, 23),
-    /**
-     * <code>REQ_META_FUNCTIONS = 8;</code>
-     */
-    REQ_META_FUNCTIONS(21, 8),
+    PREPARED_STATEMENT(21, 23),
     /**
-     * <code>RESP_FUNCTION_LIST = 9;</code>
+     * <code>SERVER_META = 9;</code>
+     *
+     * <pre>
+     * return server infos in respose to GET_SERVER_META
+     * </pre>
      */
-    RESP_FUNCTION_LIST(22, 9),
+    SERVER_META(22, 9),
     /**
      * <code>QUERY_RESULT = 10;</code>
      *
@@ -297,6 +305,14 @@ public final class UserProtos {
      */
     public static final int CREATE_PREPARED_STATEMENT_VALUE = 22;
     /**
+     * <code>GET_SERVER_META = 8;</code>
+     *
+     * <pre>
+     * user is sending a request to receive server metadata
+     * </pre>
+     */
+    public static final int GET_SERVER_META_VALUE = 8;
+    /**
      * <code>QUERY_DATA = 6;</code>
      *
      * <pre>
@@ -357,13 +373,13 @@ public final class UserProtos {
      */
     public static final int PREPARED_STATEMENT_VALUE = 23;
     /**
-     * <code>REQ_META_FUNCTIONS = 8;</code>
-     */
-    public static final int REQ_META_FUNCTIONS_VALUE = 8;
-    /**
-     * <code>RESP_FUNCTION_LIST = 9;</code>
+     * <code>SERVER_META = 9;</code>
+     *
+     * <pre>
+     * return server infos in respose to GET_SERVER_META
+     * </pre>
      */
-    public static final int RESP_FUNCTION_LIST_VALUE = 9;
+    public static final int SERVER_META_VALUE = 9;
     /**
      * <code>QUERY_RESULT = 10;</code>
      *
@@ -399,6 +415,7 @@ public final class UserProtos {
         case 16: return GET_TABLES;
         case 17: return GET_COLUMNS;
         case 22: return CREATE_PREPARED_STATEMENT;
+        case 8: return GET_SERVER_META;
         case 6: return QUERY_DATA;
         case 7: return QUERY_HANDLE;
         case 13: return QUERY_PLAN_FRAGMENTS;
@@ -407,8 +424,7 @@ public final class UserProtos {
         case 20: return TABLES;
         case 21: return COLUMNS;
         case 23: return PREPARED_STATEMENT;
-        case 8: return REQ_META_FUNCTIONS;
-        case 9: return RESP_FUNCTION_LIST;
+        case 9: return SERVER_META;
         case 10: return QUERY_RESULT;
         case 24: return SASL_MESSAGE;
         default: return null;
@@ -1131,1853 +1147,1758 @@ public final class UserProtos {
     // @@protoc_insertion_point(enum_scope:exec.user.ColumnUpdatability)
   }
 
-  public interface PropertyOrBuilder
-      extends com.google.protobuf.MessageOrBuilder {
-
-    // required string key = 1;
-    /**
-     * <code>required string key = 1;</code>
-     */
-    boolean hasKey();
+  /**
+   * Protobuf enum {@code exec.user.CollateSupport}
+   */
+  public enum CollateSupport
+      implements com.google.protobuf.ProtocolMessageEnum {
     /**
-     * <code>required string key = 1;</code>
+     * <code>CS_UNKNOWN = 0;</code>
+     *
+     * <pre>
+     * Unknown support (for forward compatibility)
+     * </pre>
      */
-    java.lang.String getKey();
+    CS_UNKNOWN(0, 0),
     /**
-     * <code>required string key = 1;</code>
+     * <code>CS_GROUP_BY = 1;</code>
+     *
+     * <pre>
+     * COLLATE clause can be added after each grouping column
+     * </pre>
      */
-    com.google.protobuf.ByteString
-        getKeyBytes();
+    CS_GROUP_BY(1, 1),
+    ;
 
-    // required string value = 2;
-    /**
-     * <code>required string value = 2;</code>
-     */
-    boolean hasValue();
     /**
-     * <code>required string value = 2;</code>
+     * <code>CS_UNKNOWN = 0;</code>
+     *
+     * <pre>
+     * Unknown support (for forward compatibility)
+     * </pre>
      */
-    java.lang.String getValue();
+    public static final int CS_UNKNOWN_VALUE = 0;
     /**
-     * <code>required string value = 2;</code>
+     * <code>CS_GROUP_BY = 1;</code>
+     *
+     * <pre>
+     * COLLATE clause can be added after each grouping column
+     * </pre>
      */
-    com.google.protobuf.ByteString
-        getValueBytes();
-  }
-  /**
-   * Protobuf type {@code exec.user.Property}
-   */
-  public static final class Property extends
-      com.google.protobuf.GeneratedMessage
-      implements PropertyOrBuilder {
-    // Use Property.newBuilder() to construct.
-    private Property(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
-      super(builder);
-      this.unknownFields = builder.getUnknownFields();
-    }
-    private Property(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+    public static final int CS_GROUP_BY_VALUE = 1;
 
-    private static final Property defaultInstance;
-    public static Property getDefaultInstance() {
-      return defaultInstance;
-    }
 
-    public Property getDefaultInstanceForType() {
-      return defaultInstance;
+    public final int getNumber() { return value; }
+
+    public static CollateSupport valueOf(int value) {
+      switch (value) {
+        case 0: return CS_UNKNOWN;
+        case 1: return CS_GROUP_BY;
+        default: return null;
+      }
     }
 
-    private final com.google.protobuf.UnknownFieldSet unknownFields;
-    @java.lang.Override
-    public final com.google.protobuf.UnknownFieldSet
-        getUnknownFields() {
-      return this.unknownFields;
+    public static com.google.protobuf.Internal.EnumLiteMap<CollateSupport>
+        internalGetValueMap() {
+      return internalValueMap;
     }
-    private Property(
-        com.google.protobuf.CodedInputStream input,
-        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws com.google.protobuf.InvalidProtocolBufferException {
-      initFields();
-      int mutable_bitField0_ = 0;
-      com.google.protobuf.UnknownFieldSet.Builder unknownFields =
-          com.google.protobuf.UnknownFieldSet.newBuilder();
-      try {
-        boolean done = false;
-        while (!done) {
-          int tag = input.readTag();
-          switch (tag) {
-            case 0:
-              done = true;
-              break;
-            default: {
-              if (!parseUnknownField(input, unknownFields,
-                                     extensionRegistry, tag)) {
-                done = true;
-              }
-              break;
-            }
-            case 10: {
-              bitField0_ |= 0x00000001;
-              key_ = input.readBytes();
-              break;
-            }
-            case 18: {
-              bitField0_ |= 0x00000002;
-              value_ = input.readBytes();
-              break;
+    private static com.google.protobuf.Internal.EnumLiteMap<CollateSupport>
+        internalValueMap =
+          new com.google.protobuf.Internal.EnumLiteMap<CollateSupport>() {
+            public CollateSupport findValueByNumber(int number) {
+              return CollateSupport.valueOf(number);
             }
-          }
-        }
-      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
-        throw e.setUnfinishedMessage(this);
-      } catch (java.io.IOException e) {
-        throw new com.google.protobuf.InvalidProtocolBufferException(
-            e.getMessage()).setUnfinishedMessage(this);
-      } finally {
-        this.unknownFields = unknownFields.build();
-        makeExtensionsImmutable();
-      }
+          };
+
+    public final com.google.protobuf.Descriptors.EnumValueDescriptor
+        getValueDescriptor() {
+      return getDescriptor().getValues().get(index);
     }
-    public static final com.google.protobuf.Descriptors.Descriptor
+    public final com.google.protobuf.Descriptors.EnumDescriptor
+        getDescriptorForType() {
+      return getDescriptor();
+    }
+    public static final com.google.protobuf.Descriptors.EnumDescriptor
         getDescriptor() {
-      return org.apache.drill.exec.proto.UserProtos.internal_static_exec_user_Property_descriptor;
+      return org.apache.drill.exec.proto.UserProtos.getDescriptor().getEnumTypes().get(7);
     }
 
-    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
-        internalGetFieldAccessorTable() {
-      return org.apache.drill.exec.proto.UserProtos.internal_static_exec_user_Property_fieldAccessorTable
-          .ensureFieldAccessorsInitialized(
-              org.apache.drill.exec.proto.UserProtos.Property.class, org.apache.drill.exec.proto.UserProtos.Property.Builder.class);
-    }
+    private static final CollateSupport[] VALUES = values();
 
-    public static com.google.protobuf.Parser<Property> PARSER =
-        new com.google.protobuf.AbstractParser<Property>() {
-      public Property parsePartialFrom(
-          com.google.protobuf.CodedInputStream input,
-          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-          throws com.google.protobuf.InvalidProtocolBufferException {
-        return new Property(input, extensionRegistry);
+    public static CollateSupport valueOf(
+        com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
+      if (desc.getType() != getDescriptor()) {
+        throw new java.lang.IllegalArgumentException(
+          "EnumValueDescriptor is not for this type.");
       }
-    };
+      return VALUES[desc.getIndex()];
+    }
 
-    @java.lang.Override
-    public com.google.protobuf.Parser<Property> getParserForType() {
-      return PARSER;
+    private final int index;
+    private final int value;
+
+    private CollateSupport(int index, int value) {
+      this.index = index;
+      this.value = value;
     }
 
-    private int bitField0_;
-    // required string key = 1;
-    public static final int KEY_FIELD_NUMBER = 1;
-    private java.lang.Object key_;
+    // @@protoc_insertion_point(enum_scope:exec.user.CollateSupport)
+  }
+
+  /**
+   * Protobuf enum {@code exec.user.CorrelationNamesSupport}
+   */
+  public enum CorrelationNamesSupport
+      implements com.google.protobuf.ProtocolMessageEnum {
     /**
-     * <code>required string key = 1;</code>
+     * <code>CN_NONE = 1;</code>
+     *
+     * <pre>
+     * Correlation names are not supported
+     * </pre>
      */
-    public boolean hasKey() {
-      return ((bitField0_ & 0x00000001) == 0x00000001);
-    }
+    CN_NONE(0, 1),
     /**
-     * <code>required string key = 1;</code>
+     * <code>CN_DIFFERENT_NAMES = 2;</code>
+     *
+     * <pre>
+     * Correlation names are supported, but names have to
+     * </pre>
      */
-    public java.lang.String getKey() {
-      java.lang.Object ref = key_;
-      if (ref instanceof java.lang.String) {
-        return (java.lang.String) ref;
-      } else {
-        com.google.protobuf.ByteString bs = 
-            (com.google.protobuf.ByteString) ref;
-        java.lang.String s = bs.toStringUtf8();
-        if (bs.isValidUtf8()) {
-          key_ = s;
-        }
-        return s;
-      }
-    }
+    CN_DIFFERENT_NAMES(1, 2),
     /**
-     * <code>required string key = 1;</code>
+     * <code>CN_ANY = 3;</code>
+     *
+     * <pre>
+     * be different from the tables they represent
+     * </pre>
      */
-    public com.google.protobuf.ByteString
-        getKeyBytes() {
-      java.lang.Object ref = key_;
-      if (ref instanceof java.lang.String) {
-        com.google.protobuf.ByteString b = 
-            com.google.protobuf.ByteString.copyFromUtf8(
-                (java.lang.String) ref);
-        key_ = b;
-        return b;
-      } else {
-        return (com.google.protobuf.ByteString) ref;
-      }
-    }
+    CN_ANY(2, 3),
+    ;
 
-    // required string value = 2;
-    public static final int VALUE_FIELD_NUMBER = 2;
-    private java.lang.Object value_;
     /**
-     * <code>required string value = 2;</code>
+     * <code>CN_NONE = 1;</code>
+     *
+     * <pre>
+     * Correlation names are not supported
+     * </pre>
      */
-    public boolean hasValue() {
-      return ((bitField0_ & 0x00000002) == 0x00000002);
-    }
+    public static final int CN_NONE_VALUE = 1;
     /**
-     * <code>required string value = 2;</code>
+     * <code>CN_DIFFERENT_NAMES = 2;</code>
+     *
+     * <pre>
+     * Correlation names are supported, but names have to
+     * </pre>
      */
-    public java.lang.String getValue() {
-      java.lang.Object ref = value_;
-      if (ref instanceof java.lang.String) {
-        return (java.lang.String) ref;
-      } else {
-        com.google.protobuf.ByteString bs = 
-            (com.google.protobuf.ByteString) ref;
-        java.lang.String s = bs.toStringUtf8();
-        if (bs.isValidUtf8()) {
-          value_ = s;
-        }
-        return s;
-      }
-    }
+    public static final int CN_DIFFERENT_NAMES_VALUE = 2;
     /**
-     * <code>required string value = 2;</code>
+     * <code>CN_ANY = 3;</code>
+     *
+     * <pre>
+     * be different from the tables they represent
+     * </pre>
      */
-    public com.google.protobuf.ByteString
-        getValueBytes() {
-      java.lang.Object ref = value_;
-      if (ref instanceof java.lang.String) {
-        com.google.protobuf.ByteString b = 
-            com.google.protobuf.ByteString.copyFromUtf8(
-                (java.lang.String) ref);
-        value_ = b;
-        return b;
-      } else {
-        return (com.google.protobuf.ByteString) ref;
+    public static final int CN_ANY_VALUE = 3;
+
+
+    public final int getNumber() { return value; }
+
+    public static CorrelationNamesSupport valueOf(int value) {
+      switch (value) {
+        case 1: return CN_NONE;
+        case 2: return CN_DIFFERENT_NAMES;
+        case 3: return CN_ANY;
+        default: return null;
       }
     }
 
-    private void initFields() {
-      key_ = "";
-      value_ = "";
+    public static com.google.protobuf.Internal.EnumLiteMap<CorrelationNamesSupport>
+        internalGetValueMap() {
+      return internalValueMap;
     }
-    private byte memoizedIsInitialized = -1;
-    public final boolean isInitialized() {
-      byte isInitialized = memoizedIsInitialized;
-      if (isInitialized != -1) return isInitialized == 1;
+    private static com.google.protobuf.Internal.EnumLiteMap<CorrelationNamesSupport>
+        internalValueMap =
+          new com.google.protobuf.Internal.EnumLiteMap<CorrelationNamesSupport>() {
+            public CorrelationNamesSupport findValueByNumber(int number) {
+              return CorrelationNamesSupport.valueOf(number);
+            }
+          };
 
-      if (!hasKey()) {
-        memoizedIsInitialized = 0;
-        return false;
-      }
-      if (!hasValue()) {
-        memoizedIsInitialized = 0;
-        return false;
-      }
-      memoizedIsInitialized = 1;
-      return true;
+    public final com.google.protobuf.Descriptors.EnumValueDescriptor
+        getValueDescriptor() {
+      return getDescriptor().getValues().get(index);
     }
-
-    public void writeTo(com.google.protobuf.CodedOutputStream output)
-                        throws java.io.IOException {
-      getSerializedSize();
-      if (((bitField0_ & 0x00000001) == 0x00000001)) {
-        output.writeBytes(1, getKeyBytes());
-      }
-      if (((bitField0_ & 0x00000002) == 0x00000002)) {
-        output.writeBytes(2, getValueBytes());
-      }
-      getUnknownFields().writeTo(output);
+    public final com.google.protobuf.Descriptors.EnumDescriptor
+        getDescriptorForType() {
+      return getDescriptor();
+    }
+    public static final com.google.protobuf.Descriptors.EnumDescriptor
+        getDescriptor() {
+      return org.apache.drill.exec.proto.UserProtos.getDescriptor().getEnumTypes().get(8);
     }
 
-    private int memoizedSerializedSize = -1;
-    public int getSerializedSize() {
-      int size = memoizedSerializedSize;
-      if (size != -1) return size;
+    private static final CorrelationNamesSupport[] VALUES = values();
 
-      size = 0;
-      if (((bitField0_ & 0x00000001) == 0x00000001)) {
-        size += com.google.protobuf.CodedOutputStream
-          .computeBytesSize(1, getKeyBytes());
-      }
-      if (((bitField0_ & 0x00000002) == 0x00000002)) {
-        size += com.google.protobuf.CodedOutputStream
-          .computeBytesSize(2, getValueBytes());
+    public static CorrelationNamesSupport valueOf(
+        com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
+      if (desc.getType() != getDescriptor()) {
+        throw new java.lang.IllegalArgumentException(
+          "EnumValueDescriptor is not for this type.");
       }
-      size += getUnknownFields().getSerializedSize();
-      memoizedSerializedSize = size;
-      return size;
+      return VALUES[desc.getIndex()];
     }
 
-    private static final long serialVersionUID = 0L;
-    @java.lang.Override
-    protected java.lang.Object writeReplace()
-        throws java.io.ObjectStreamException {
-      return super.writeReplace();
-    }
+    private final int index;
+    private final int value;
 
-    public static org.apache.drill.exec.proto.UserProtos.Property parseFrom(
-        com.google.protobuf.ByteString data)
-        throws com.google.protobuf.InvalidProtocolBufferException {
-      return PARSER.parseFrom(data);
-    }
-    public static org.apache.drill.exec.proto.UserProtos.Property parseFrom(
-        com.google.protobuf.ByteString data,
-        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws com.google.protobuf.InvalidProtocolBufferException {
-      return PARSER.parseFrom(data, extensionRegistry);
-    }
-    public static org.apache.drill.exec.proto.UserProtos.Property parseFrom(byte[] data)
-        throws com.google.protobuf.InvalidProtocolBufferException {
-      return PARSER.parseFrom(data);
-    }
-    public static org.apache.drill.exec.proto.UserProtos.Property parseFrom(
-        byte[] data,
-        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws com.google.protobuf.InvalidProtocolBufferException {
-      return PARSER.parseFrom(data, extensionRegistry);
-    }
-    public static org.apache.drill.exec.proto.UserProtos.Property parseFrom(java.io.InputStream input)
-        throws java.io.IOException {
-      return PARSER.parseFrom(input);
-    }
-    public static org.apache.drill.exec.proto.UserProtos.Property parseFrom(
-        java.io.InputStream input,
-        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws java.io.IOException {
-      return PARSER.parseFrom(input, extensionRegistry);
-    }
-    public static org.apache.drill.exec.proto.UserProtos.Property parseDelimitedFrom(java.io.InputStream input)
-        throws java.io.IOException {
-      return PARSER.parseDelimitedFrom(input);
-    }
-    public static org.apache.drill.exec.proto.UserProtos.Property parseDelimitedFrom(
-        java.io.InputStream input,
-        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws java.io.IOException {
-      return PARSER.parseDelimitedFrom(input, extensionRegistry);
-    }
-    public static org.apache.drill.exec.proto.UserProtos.Property parseFrom(
-        com.google.protobuf.CodedInputStream input)
-        throws java.io.IOException {
-      return PARSER.parseFrom(input);
-    }
-    public static org.apache.drill.exec.proto.UserProtos.Property parseFrom(
-        com.google.protobuf.CodedInputStream input,
-        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws java.io.IOException {
-      return PARSER.parseFrom(input, extensionRegistry);
+    private CorrelationNamesSupport(int index, int value) {
+      this.index = index;
+      this.value = value;
     }
 
-    public static Builder newBuilder() { return Builder.create(); }
-    public Builder newBuilderForType() { return newBuilder(); }
-    public static Builder newBuilder(org.apache.drill.exec.proto.UserProtos.Property prototype) {
-      return newBuilder().mergeFrom(prototype);
-    }
-    public Builder toBuilder() { return newBuilder(this); }
+    // @@protoc_insertion_point(enum_scope:exec.user.CorrelationNamesSupport)
+  }
 
-    @java.lang.Override
-    protected Builder newBuilderForType(
-        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
-      Builder builder = new Builder(parent);
-      return builder;
-    }
+  /**
+   * Protobuf enum {@code exec.user.DateTimeLiteralsSupport}
+   */
+  public enum DateTimeLiteralsSupport
+      implements com.google.protobuf.ProtocolMessageEnum {
     /**
-     * Protobuf type {@code exec.user.Property}
+     * <code>DL_UNKNOWN = 0;</code>
+     *
+     * <pre>
+     * Unknown support (for forward compatibility)
+     * </pre>
      */
-    public static final class Builder extends
-        com.google.protobuf.GeneratedMessage.Builder<Builder>
-       implements org.apache.drill.exec.proto.UserProtos.PropertyOrBuilder {
-      public static final com.google.protobuf.Descriptors.Descriptor
-          getDescriptor() {
-        return org.apache.drill.exec.proto.UserProtos.internal_static_exec_user_Property_descriptor;
-      }
+    DL_UNKNOWN(0, 0),
+    /**
+     * <code>DL_DATE = 1;</code>
+     *
+     * <pre>
+     * DATE literal is supported
+     * </pre>
+     */
+    DL_DATE(1, 1),
+    /**
+     * <code>DL_TIME = 2;</code>
+     *
+     * <pre>
+     * TIME literal is supported
+     * </pre>
+     */
+    DL_TIME(2, 2),
+    /**
+     * <code>DL_TIMESTAMP = 3;</code>
+     *
+     * <pre>
+     * TIMESTAMP literal is supported
+     * </pre>
+     */
+    DL_TIMESTAMP(3, 3),
+    /**
+     * <code>DL_INTERVAL_YEAR = 4;</code>
+     *
+     * <pre>
+     * INTERVAL YEAR literal is supported
+     * </pre>
+     */
+    DL_INTERVAL_YEAR(4, 4),
+    /**
+     * <code>DL_INTERVAL_MONTH = 5;</code>
+     *
+     * <pre>
+     * INTERVAL MONTH literal is supported
+     * </pre>
+     */
+    DL_INTERVAL_MONTH(5, 5),
+    /**
+     * <code>DL_INTERVAL_DAY = 6;</code>
+     *
+     * <pre>
+     * INTERVAL DAY literal is supported
+     * </pre>
+     */
+    DL_INTERVAL_DAY(6, 6),
+    /**
+     * <code>DL_INTERVAL_HOUR = 7;</code>
+     *
+     * <pre>
+     * INTERVAL HOUR literal is supported
+     * </pre>
+     */
+    DL_INTERVAL_HOUR(7, 7),
+    /**
+     * <code>DL_INTERVAL_MINUTE = 8;</code>
+     *
+     * <pre>
+     * INTERVAL MINUTE literal is supported
+     * </pre>
+     */
+    DL_INTERVAL_MINUTE(8, 8),
+    /**
+     * <code>DL_INTERVAL_SECOND = 9;</code>
+     *
+     * <pre>
+     * INTERVAL SECOND literal is supported
+     * </pre>
+     */
+    DL_INTERVAL_SECOND(9, 9),
+    /**
+     * <code>DL_INTERVAL_YEAR_TO_MONTH = 10;</code>
+     *
+     * <pre>
+     * INTERVAL YEAR TO MONTH literal is supported
+     * </pre>
+     */
+    DL_INTERVAL_YEAR_TO_MONTH(10, 10),
+    /**
+     * <code>DL_INTERVAL_DAY_TO_HOUR = 11;</code>
+     *
+     * <pre>
+     * INTERVAL DAY TO HOUR literal is supported
+     * </pre>
+     */
+    DL_INTERVAL_DAY_TO_HOUR(11, 11),
+    /**
+     * <code>DL_INTERVAL_DAY_TO_MINUTE = 12;</code>
+     *
+     * <pre>
+     * INTERVAL DAY TO MINUTE literal is supported
+     * </pre>
+     */
+    DL_INTERVAL_DAY_TO_MINUTE(12, 12),
+    /**
+     * <code>DL_INTERVAL_DAY_TO_SECOND = 13;</code>
+     *
+     * <pre>
+     * INTERVAL DAY TO SECOND literal is supported
+     * </pre>
+     */
+    DL_INTERVAL_DAY_TO_SECOND(13, 13),
+    /**
+     * <code>DL_INTERVAL_HOUR_TO_MINUTE = 14;</code>
+     *
+     * <pre>
+     * INTERVAL HOUR TO MINUTE literal is supported
+     * </pre>
+     */
+    DL_INTERVAL_HOUR_TO_MINUTE(14, 14),
+    /**
+     * <code>DL_INTERVAL_HOUR_TO_SECOND = 15;</code>
+     *
+     * <pre>
+     * INTERVAL HOUR TO SECOND literal is supported
+     * </pre>
+     */
+    DL_INTERVAL_HOUR_TO_SECOND(15, 15),
+    /**
+     * <code>DL_INTERVAL_MINUTE_TO_SECOND = 16;</code>
+     *
+     * <pre>
+     * INTERVAL MINUTE TO SECOND literal is supported
+     * </pre>
+     */
+    DL_INTERVAL_MINUTE_TO_SECOND(16, 16),
+    ;
 
-      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
-          internalGetFieldAccessorTable() {
-        return org.apache.drill.exec.proto.UserProtos.internal_static_exec_user_Property_fieldAccessorTable
-            .ensureFieldAccessorsInitialized(
-                org.apache.drill.exec.proto.UserProtos.Property.class, org.apache.drill.exec.proto.UserProtos.Property.Builder.class);
-      }
+    /**
+     * <code>DL_UNKNOWN = 0;</code>
+     *
+     * <pre>
+     * Unknown support (for forward compatibility)
+     * </pre>
+     */
+    public static final int DL_UNKNOWN_VALUE = 0;
+    /**
+     * <code>DL_DATE = 1;</code>
+     *
+     * <pre>
+     * DATE literal is supported
+     * </pre>
+     */
+    public static final int DL_DATE_VALUE = 1;
+    /**
+     * <code>DL_TIME = 2;</code>
+     *
+     * <pre>
+     * TIME literal is supported
+     * </pre>
+     */
+    public static final int DL_TIME_VALUE = 2;
+    /**
+     * <code>DL_TIMESTAMP = 3;</code>
+     *
+     * <pre>
+     * TIMESTAMP literal is supported
+     * </pre>
+     */
+    public static final int DL_TIMESTAMP_VALUE = 3;
+    /**
+     * <code>DL_INTERVAL_YEAR = 4;</code>
+     *
+     * <pre>
+     * INTERVAL YEAR literal is supported
+     * </pre>
+     */
+    public static final int DL_INTERVAL_YEAR_VALUE = 4;
+    /**
+     * <code>DL_INTERVAL_MONTH = 5;</code>
+     *
+     * <pre>
+     * INTERVAL MONTH literal is supported
+     * </pre>
+     */
+    public static final int DL_INTERVAL_MONTH_VALUE = 5;
+    /**
+     * <code>DL_INTERVAL_DAY = 6;</code>
+     *
+     * <pre>
+     * INTERVAL DAY literal is supported
+     * </pre>
+     */
+    public static final int DL_INTERVAL_DAY_VALUE = 6;
+    /**
+     * <code>DL_INTERVAL_HOUR = 7;</code>
+     *
+     * <pre>
+     * INTERVAL HOUR literal is supported
+     * </pre>
+     */
+    public static final int DL_INTERVAL_HOUR_VALUE = 7;
+    /**
+     * <code>DL_INTERVAL_MINUTE = 8;</code>
+     *
+     * <pre>
+     * INTERVAL MINUTE literal is supported
+     * </pre>
+     */
+    public static final int DL_INTERVAL_MINUTE_VALUE = 8;
+    /**
+     * <code>DL_INTERVAL_SECOND = 9;</code>
+     *
+     * <pre>
+     * INTERVAL SECOND literal is supported
+     * </pre>
+     */
+    public static final int DL_INTERVAL_SECOND_VALUE = 9;
+    /**
+     * <code>DL_INTERVAL_YEAR_TO_MONTH = 10;</code>
+     *
+     * <pre>
+     * INTERVAL YEAR TO MONTH literal is supported
+     * </pre>
+     */
+    public static final int DL_INTERVAL_YEAR_TO_MONTH_VALUE = 10;
+    /**
+     * <code>DL_INTERVAL_DAY_TO_HOUR = 11;</code>
+     *
+     * <pre>
+     * INTERVAL DAY TO HOUR literal is supported
+     * </pre>
+     */
+    public static final int DL_INTERVAL_DAY_TO_HOUR_VALUE = 11;
+    /**
+     * <code>DL_INTERVAL_DAY_TO_MINUTE = 12;</code>
+     *
+     * <pre>
+     * INTERVAL DAY TO MINUTE literal is supported
+     * </pre>
+     */
+    public static final int DL_INTERVAL_DAY_TO_MINUTE_VALUE = 12;
+    /**
+     * <code>DL_INTERVAL_DAY_TO_SECOND = 13;</code>
+     *
+     * <pre>
+     * INTERVAL DAY TO SECOND literal is supported
+     * </pre>
+     */
+    public static final int DL_INTERVAL_DAY_TO_SECOND_VALUE = 13;
+    /**
+     * <code>DL_INTERVAL_HOUR_TO_MINUTE = 14;</code>
+     *
+     * <pre>
+     * INTERVAL HOUR TO MINUTE literal is supported
+     * </pre>
+     */
+    public static final int DL_INTERVAL_HOUR_TO_MINUTE_VALUE = 14;
+    /**
+     * <code>DL_INTERVAL_HOUR_TO_SECOND = 15;</code>
+     *
+     * <pre>
+     * INTERVAL HOUR TO SECOND literal is supported
+     * </pre>
+     */
+    public static final int DL_INTERVAL_HOUR_TO_SECOND_VALUE = 15;
+    /**
+     * <code>DL_INTERVAL_MINUTE_TO_SECOND = 16;</code>
+     *
+     * <pre>
+     * INTERVAL MINUTE TO SECOND literal is supported
+     * </pre>
+     */
+    public static final int DL_INTERVAL_MINUTE_TO_SECOND_VALUE = 16;
 
-      // Construct using org.apache.drill.exec.proto.UserProtos.Property.newBuilder()
-      private Builder() {
-        maybeForceBuilderInitialization();
-      }
 
-      private Builder(
-          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
-        super(parent);
-        maybeForceBuilderInitialization();
-      }
-      private void maybeForceBuilderInitialization() {
-        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
-        }
-      }
-      private static Builder create() {
-        return new Builder();
-      }
+    public final int getNumber() { return value; }
 
-      public Builder clear() {
-        super.clear();
-        key_ = "";
-        bitField0_ = (bitField0_ & ~0x00000001);
-        value_ = "";
-        bitField0_ = (bitField0_ & ~0x00000002);
-        return this;
+    public static DateTimeLiteralsSupport valueOf(int value) {
+      switch (value) {
+        case 0: return DL_UNKNOWN;
+        case 1: return DL_DATE;
+        case 2: return DL_TIME;
+        case 3: return DL_TIMESTAMP;
+        case 4: return DL_INTERVAL_YEAR;
+        case 5: return DL_INTERVAL_MONTH;
+        case 6: return DL_INTERVAL_DAY;
+        case 7: return DL_INTERVAL_HOUR;
+        case 8: return DL_INTERVAL_MINUTE;
+        case 9: return DL_INTERVAL_SECOND;
+        case 10: return DL_INTERVAL_YEAR_TO_MONTH;
+        case 11: return DL_INTERVAL_DAY_TO_HOUR;
+        case 12: return DL_INTERVAL_DAY_TO_MINUTE;
+        case 13: return DL_INTERVAL_DAY_TO_SECOND;
+        case 14: return DL_INTERVAL_HOUR_TO_MINUTE;
+        case 15: return DL_INTERVAL_HOUR_TO_SECOND;
+        case 16: return DL_INTERVAL_MINUTE_TO_SECOND;
+        default: return null;
       }
+    }
 
-      public Builder clone() {
-        return create().mergeFrom(buildPartial());
-      }
+    public static com.google.protobuf.Internal.EnumLiteMap<DateTimeLiteralsSupport>
+        internalGetValueMap() {
+      return internalValueMap;
+    }
+    private static com.google.protobuf.Internal.EnumLiteMap<DateTimeLiteralsSupport>
+        internalValueMap =
+          new com.google.protobuf.Internal.EnumLiteMap<DateTimeLiteralsSupport>() {
+            public DateTimeLiteralsSupport findValueByNumber(int number) {
+              return DateTimeLiteralsSupport.valueOf(number);
+            }
+          };
 
-      public com.google.protobuf.Descriptors.Descriptor
-          getDescriptorForType() {
-        return org.apache.drill.exec.proto.UserProtos.internal_static_exec_user_Property_descriptor;
-      }
+    public final com.google.protobuf.Descriptors.EnumValueDescriptor
+        getValueDescriptor() {
+      return getDescriptor().getValues().get(index);
+    }
+    public final com.google.protobuf.Descriptors.EnumDescriptor
+        getDescriptorForType() {
+      return getDescriptor();
+    }
+    public static final com.google.protobuf.Descriptors.EnumDescriptor
+        getDescriptor() {
+      return org.apache.drill.exec.proto.UserProtos.getDescriptor().getEnumTypes().get(9);
+    }
 
-      public org.apache.drill.exec.proto.UserProtos.Property getDefaultInstanceForType() {
-        return org.apache.drill.exec.proto.UserProtos.Property.getDefaultInstance();
-      }
+    private static final DateTimeLiteralsSupport[] VALUES = values();
 
-      public org.apache.drill.exec.proto.UserProtos.Property build() {
-        org.apache.drill.exec.proto.UserProtos.Property result = buildPartial();
-        if (!result.isInitialized()) {
-          throw newUninitializedMessageException(result);
-        }
-        return result;
+    public static DateTimeLiteralsSupport valueOf(
+        com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
+      if (desc.getType() != getDescriptor()) {
+        throw new java.lang.IllegalArgumentException(
+          "EnumValueDescriptor is not for this type.");
       }
+      return VALUES[desc.getIndex()];
+    }
 
-      public org.apache.drill.exec.proto.UserProtos.Property buildPartial() {
-        org.apache.drill.exec.proto.UserProtos.Property result = new org.apache.drill.exec.proto.UserProtos.Property(this);
-        int from_bitField0_ = bitField0_;
-        int to_bitField0_ = 0;
-        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
-          to_bitField0_ |= 0x00000001;
-        }
-        result.key_ = key_;
-        if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
-          to_bitField0_ |= 0x00000002;
-        }
-        result.value_ = value_;
-        result.bitField0_ = to_bitField0_;
-        onBuilt();
-        return result;
-      }
+    private final int index;
+    private final int value;
 
-      public Builder mergeFrom(com.google.protobuf.Message other) {
-        if (other instanceof org.apache.drill.exec.proto.UserProtos.Property) {
-          return mergeFrom((org.apache.drill.exec.proto.UserProtos.Property)other);
-        } else {
-          super.mergeFrom(other);
-          return this;
-        }
-      }
+    private DateTimeLiteralsSupport(int index, int value) {
+      this.index = index;
+      this.value = value;
+    }
 
-      public Builder mergeFrom(org.apache.drill.exec.proto.UserProtos.Property other) {
-        if (other == org.apache.drill.exec.proto.UserProtos.Property.getDefaultInstance()) return this;
-        if (other.hasKey()) {
-          bitField0_ |= 0x00000001;
-          key_ = other.key_;
-          onChanged();
-        }
-        if (other.hasValue()) {
-          bitField0_ |= 0x00000002;
-          value_ = other.value_;
-          onChanged();
-        }
-        this.mergeUnknownFields(other.getUnknownFields());
-        return this;
-      }
+    // @@protoc_insertion_point(enum_scope:exec.user.DateTimeLiteralsSupport)
+  }
 
-      public final boolean isInitialized() {
-        if (!hasKey()) {
-          
-          return false;
-        }
-        if (!hasValue()) {
-          
-          return false;
-        }
-        return true;
-      }
+  /**
+   * Protobuf enum {@code exec.user.GroupBySupport}
+   */
+  public enum GroupBySupport
+      implements com.google.protobuf.ProtocolMessageEnum {
+    /**
+     * <code>GB_NONE = 1;</code>
+     *
+     * <pre>
+     * Group by is not supported
+     * </pre>
+     */
+    GB_NONE(0, 1),
+    /**
+     * <code>GB_SELECT_ONLY = 2;</code>
+     *
+     * <pre>
+     * Group by supported with non aggregated columns in select
+     * </pre>
+     */
+    GB_SELECT_ONLY(1, 2),
+    /**
+     * <code>GB_BEYOND_SELECT = 3;</code>
+     *
+     * <pre>
+     * Group by supported with columns absent from the select list
+     *if all the non-aggregated colums from the select list are also added 
+     * </pre>
+     */
+    GB_BEYOND_SELECT(2, 3),
+    /**
+     * <code>GB_UNRELATED = 4;</code>
+     *
+     * <pre>
+     * Group by supported with columns absent from the select list
+     * </pre>
+     */
+    GB_UNRELATED(3, 4),
+    ;
 
-      public Builder mergeFrom(
-          com.google.protobuf.CodedInputStream input,
-          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-          throws java.io.IOException {
-        org.apache.drill.exec.proto.UserProtos.Property parsedMessage = null;
-        try {
-          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
-        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
-          parsedMessage = (org.apache.drill.exec.proto.UserProtos.Property) e.getUnfinishedMessage();
-          throw e;
-        } finally {
-          if (parsedMessage != null) {
-            mergeFrom(parsedMessage);
-          }
-        }
-        return this;
-      }
-      private int bitField0_;
+    /**
+     * <code>GB_NONE = 1;</code>
+     *
+     * <pre>
+     * Group by is not supported
+     * </pre>
+     */
+    public static final int GB_NONE_VALUE = 1;
+    /**
+     * <code>GB_SELECT_ONLY = 2;</code>
+     *
+     * <pre>
+     * Group by supported with non aggregated columns in select
+     * </pre>
+     */
+    public static final int GB_SELECT_ONLY_VALUE = 2;
+    /**
+     * <code>GB_BEYOND_SELECT = 3;</code>
+     *
+     * <pre>
+     * Group by supported with columns absent from the select list
+     *if all the non-aggregated colums from the select list are also added 
+     * </pre>
+     */
+    public static final int GB_BEYOND_SELECT_VALUE = 3;
+    /**
+     * <code>GB_UNRELATED = 4;</code>
+     *
+     * <pre>
+     * Group by supported with columns absent from the select list
+     * </pre>
+     */
+    public static final int GB_UNRELATED_VALUE = 4;
 
-      // required string key = 1;
-      private java.lang.Object key_ = "";
-      /**
-       * <code>required string key = 1;</code>
-       */
-      public boolean hasKey() {
-        return ((bitField0_ & 0x00000001) == 0x00000001);
-      }
-      /**
-       * <code>required string key = 1;</code>
-       */
-      public java.lang.String getKey() {
-        java.lang.Object ref = key_;
-        if (!(ref instanceof java.lang.String)) {
-          java.lang.String s = ((com.google.protobuf.ByteString) ref)
-              .toStringUtf8();
-          key_ = s;
-          return s;
-        } else {
-          return (java.lang.String) ref;
-        }
-      }
-      /**
-       * <code>required string key = 1;</code>
-       */
-      public com.google.protobuf.ByteString
-          getKeyBytes() {
-        java.lang.Object ref = key_;
-        if (ref instanceof String) {
-          com.google.protobuf.ByteString b = 
-              com.google.protobuf.ByteString.copyFromUtf8(
-                  (java.lang.String) ref);
-          key_ = b;
-          return b;
-        } else {
-          return (com.google.protobuf.ByteString) ref;
-        }
-      }
-      /**
-       * <code>required string key = 1;</code>
-       */
-      public Builder setKey(
-          java.lang.String value) {
-        if (value == null) {
-    throw new NullPointerException();
-  }
-  bitField0_ |= 0x00000001;
-        key_ = value;
-        onChanged();
-        return this;
-      }
-      /**
-       * <code>required string key = 1;</code>
-       */
-      public Builder clearKey() {
-        bitField0_ = (bitField0_ & ~0x00000001);
-        key_ = getDefaultInstance().getKey();
-        onChanged();
-        return this;
-      }
-      /**
-       * <code>required string key = 1;</code>
-       */
-      public Builder setKeyBytes(
-          com.google.protobuf.ByteString value) {
-        if (value == null) {
-    throw new NullPointerException();
-  }
-  bitField0_ |= 0x00000001;
-        key_ = value;
-        onChanged();
-        return this;
-      }
 
-      // required string value = 2;
-      private java.lang.Object value_ = "";
-      /**
-       * <code>required string value = 2;</code>
-       */
-      public boolean hasValue() {
-        return ((bitField0_ & 0x00000002) == 0x00000002);
-      }
-      /**
-       * <code>required string value = 2;</code>
-       */
-      public java.lang.String getValue() {
-        java.lang.Object ref = value_;
-        if (!(ref instanceof java.lang.String)) {
-          java.lang.String s = ((com.google.protobuf.ByteString) ref)
-              .toStringUtf8();
-          value_ = s;
-          return s;
-        } else {
-          return (java.lang.String) ref;
-        }
-      }
-      /**
-       * <code>required string value = 2;</code>
-       */
-      public com.google.protobuf.ByteString
-          getValueBytes() {
-        java.lang.Object ref = value_;
-        if (ref instanceof String) {
-          com.google.protobuf.ByteString b = 
-              com.google.protobuf.ByteString.copyFromUtf8(
-                  (java.lang.String) ref);
-          value_ = b;
-          return b;
-        } else {
-          return (com.google.protobuf.ByteString) ref;
-        }
-      }
-      /**
-       * <code>required string value = 2;</code>
-       */
-      public Builder setValue(
-          java.lang.String value) {
-        if (value == null) {
-    throw new NullPointerException();
-  }
-  bitField0_ |= 0x00000002;
-        value_ = value;
-        onChanged();
-        return this;
-      }
-      /**
-       * <code>required string value = 2;</code>
-       */
-      public Builder clearValue() {
-        bitField0_ = (bitField0_ & ~0x00000002);
-        value_ = getDefaultInstance().getValue();
-        onChanged();
-        return this;
-      }
-      /**
-       * <code>required string value = 2;</code>
-       */
-      public Builder setValueBytes(
-          com.google.protobuf.ByteString value) {
-        if (value == null) {
-    throw new NullPointerException();
-  }
-  bitField0_ |= 0x00000002;
-        value_ = value;
-        onChanged();
-        return this;
+    public final int getNumber() { return value; }
+
+    public static GroupBySupport valueOf(int value) {
+      switch (value) {
+        case 1: return GB_NONE;
+        case 2: return GB_SELECT_ONLY;
+        case 3: return GB_BEYOND_SELECT;
+        case 4: return GB_UNRELATED;
+        default: return null;
       }
+    }
 
-      // @@protoc_insertion_point(builder_scope:exec.user.Property)
+    public static com.google.protobuf.Internal.EnumLiteMap<GroupBySupport>
+        internalGetValueMap() {
+      return internalValueMap;
     }
+    private static com.google.protobuf.Internal.EnumLiteMap<GroupBySupport>
+        internalValueMap =
+          new com.google.protobuf.Internal.EnumLiteMap<GroupBySupport>() {
+            public GroupBySupport findValueByNumber(int number) {
+              return GroupBySupport.valueOf(number);
+            }
+          };
 
-    static {
-      defaultInstance = new Property(true);
-      defaultInstance.initFields();
+    public final com.google.protobuf.Descriptors.EnumValueDescriptor
+        getValueDescriptor() {
+      return getDescriptor().getValues().get(index);
+    }
+    public final com.google.protobuf.Descriptors.EnumDescriptor
+        getDescriptorForType() {
+      return getDescriptor();
+    }
+    public static final com.google.protobuf.Descriptors.EnumDescriptor
+        getDescriptor() {
+      return org.apache.drill.exec.proto.UserProtos.getDescriptor().getEnumTypes().get(10);
     }
 
-    // @@protoc_insertion_point(class_scope:exec.user.Property)
-  }
+    private static final GroupBySupport[] VALUES = values();
 
-  public interface UserPropertiesOrBuilder
-      extends com.google.protobuf.MessageOrBuilder {
+    public static GroupBySupport valueOf(
+        com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
+      if (desc.getType() != getDescriptor()) {
+        throw new java.lang.IllegalArgumentException(
+          "EnumValueDescriptor is not for this type.");
+      }
+      return VALUES[desc.getIndex()];
+    }
 
-    // repeated .exec.user.Property properties = 1;
+    private final int index;
+    private final int value;
+
+    private GroupBySupport(int index, int value) {
+      this.index = index;
+      this.value = value;
+    }
+
+    // @@protoc_insertion_point(enum_scope:exec.user.GroupBySupport)
+  }
+
+  /**
+   * Protobuf enum {@code exec.user.IdentifierCasing}
+   */
+  public enum IdentifierCasing
+      implements com.google.protobuf.ProtocolMessageEnum {
     /**
-     * <code>repeated .exec.user.Property properties = 1;</code>
+     * <code>IC_UNKNOWN = 0;</code>
+     *
+     * <pre>
+     * Unknown support (for forward compatibility)
+     * </pre>
      */
-    java.util.List<org.apache.drill.exec.proto.UserProtos.Property> 
-        getPropertiesList();
+    IC_UNKNOWN(0, 0),
     /**
-     * <code>repeated .exec.user.Property properties = 1;</code>
+     * <code>IC_STORES_LOWER = 1;</code>
+     *
+     * <pre>
+     * Mixed case identifier is treated as case insensitive
+     *and stored in lower case 
+     * </pre>
      */
-    org.apache.drill.exec.proto.UserProtos.Property getProperties(int index);
+    IC_STORES_LOWER(1, 1),
     /**
-     * <code>repeated .exec.user.Property properties = 1;</code>
+     * <code>IC_STORES_MIXED = 2;</code>
+     *
+     * <pre>
+     * Mixed case identifier is treated as case insensitive
+     *and stored in mixed case 
+     * </pre>
      */
-    int getPropertiesCount();
+    IC_STORES_MIXED(2, 2),
     /**
-     * <code>repeated .exec.user.Property properties = 1;</code>
+     * <code>IC_STORES_UPPER = 3;</code>
+     *
+     * <pre>
+     * Mixed case identifier is treated as case insensitive
+     *and stored in upper case 
+     * </pre>
      */
-    java.util.List<? extends org.apache.drill.exec.proto.UserProtos.PropertyOrBuilder> 
-        getPropertiesOrBuilderList();
+    IC_STORES_UPPER(3, 3),
     /**
-     * <code>repeated .exec.user.Property properties = 1;</code>
+     * <code>IC_SUPPORTS_MIXED = 4;</code>
+     *
+     * <pre>
+     * Mixed case identifier is treated as case sensitive
+     *and stored in mixed case 
+     * </pre>
      */
-    org.apache.drill.exec.proto.UserProtos.PropertyOrBuilder getPropertiesOrBuilder(
-        int index);
-  }
-  /**
-   * Protobuf type {@code exec.user.UserProperties}
-   */
-  public static final class UserProperties extends
-      com.google.protobuf.GeneratedMessage
-      implements UserPropertiesOrBuilder {
-    // Use UserProperties.newBuilder() to construct.
-    private UserProperties(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
-      super(builder);
-      this.unknownFields = builder.getUnknownFields();
-    }
-    private UserProperties(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
-
-    private static final UserProperties defaultInstance;
-    public static UserProperties getDefaultInstance() {
-      return defaultInstance;
-    }
+    IC_SUPPORTS_MIXED(4, 4),
+    ;
 
-    public UserProperties getDefaultInstanceForType() {
-      return defaultInstance;
-    }
+    /**
+     * <code>IC_UNKNOWN = 0;</code>
+     *
+     * <pre>
+     * Unknown support (for forward compatibility)
+     * </pre>
+     */
+    public static final int IC_UNKNOWN_VALUE = 0;
+    /**
+     * <code>IC_STORES_LOWER = 1;</code>
+     *
+     * <pre>
+     * Mixed case identifier is treated as case insensitive
+     *and stored in lower case 
+     * </pre>
+     */
+    public static final int IC_STORES_LOWER_VALUE = 1;
+    /**
+     * <code>IC_STORES_MIXED = 2;</code>
+     *
+     * <pre>
+     * Mixed case identifier is treated as case insensitive
+     *and stored in mixed case 
+     * </pre>
+     */
+    public static final int IC_STORES_MIXED_VALUE = 2;
+    /**
+     * <code>IC_STORES_UPPER = 3;</code>
+     *
+     * <pre>
+     * Mixed case identifier is treated as case insensitive
+     *and stored in upper case 
+     * </pre>
+     */
+    public static final int IC_STORES_UPPER_VALUE = 3;
+    /**
+     * <code>IC_SUPPORTS_MIXED = 4;</code>
+     *
+     * <pre>
+     * Mixed case identifier is treated as case sensitive
+     *and stored in mixed case 
+     * </pre>
+     */
+    public static final int IC_SUPPORTS_MIXED_VALUE = 4;
 
-    private final com.google.protobuf.UnknownFieldSet unknownFields;
-    @java.lang.Override
-    public final com.google.protobuf.UnknownFieldSet
-        getUnknownFields() {
-      return this.unknownFields;
+
+    public final int getNumber() { return value; }
+
+    public static IdentifierCasing valueOf(int value) {
+      switch (value) {
+        case 0: return IC_UNKNOWN;
+        case 1: return IC_STORES_LOWER;
+        case 2: return IC_STORES_MIXED;
+        case 3: return IC_STORES_UPPER;
+        case 4: return IC_SUPPORTS_MIXED;
+        default: return null;
+      }
     }
-    private UserProperties(
-        com.google.protobuf.CodedInputStream input,
-        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws com.google.protobuf.InvalidProtocolBufferException {
-      initFields();
-      int mutable_bitField0_ = 0;
-      com.google.protobuf.UnknownFieldSet.Builder unknownFields =
-          com.google.protobuf.UnknownFieldSet.newBuilder();
-      try {
-        boolean done = false;
-        while (!done) {
-          int tag = input.readTag();
-          switch (tag) {
-            case 0:
-              done = true;
-              break;
-            default: {
-              if (!parseUnknownField(input, unknownFields,
-                                     extensionRegistry, tag)) {
-                done = true;
-              }
-              break;
-            }
-            case 10: {
-              if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
-                properties_ = new java.util.ArrayList<org.apache.drill.exec.proto.UserProtos.Property>();
-                mutable_bitField0_ |= 0x00000001;
-              }
-              properties_.add(input.readMessage(org.apache.drill.exec.proto.UserProtos.Property.PARSER, extensionRegistry));
-              break;
+
+    public static com.google.protobuf.Internal.EnumLiteMap<IdentifierCasing>
+        internalGetValueMap() {
+      return internalValueMap;
+    }
+    private static com.google.protobuf.Internal.EnumLiteMap<IdentifierCasing>
+        internalValueMap =
+          new com.google.protobuf.Internal.EnumLiteMap<IdentifierCasing>() {
+            public IdentifierCasing findValueByNumber(int number) {
+              return IdentifierCasing.valueOf(number);
             }
-          }
-        }
-      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
-        throw e.setUnfinishedMessage(this);
-      } catch (java.io.IOException e) {
-        throw new com.google.protobuf.InvalidProtocolBufferException(
-            e.getMessage()).setUnfinishedMessage(this);
-      } finally {
-        if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
-          properties_ = java.util.Collections.unmodifiableList(properties_);
-        }
-        this.unknownFields = unknownFields.build();
-        makeExtensionsImmutable();
-      }
+          };
+
+    public final com.google.protobuf.Descriptors.EnumValueDescriptor
+        getValueDescriptor() {
+      return getDescriptor().getValues().get(index);
     }
-    public static final com.google.protobuf.Descriptors.Descriptor
+    public final com.google.protobuf.Descriptors.EnumDescriptor
+        getDescriptorForType() {
+      return getDescriptor();
+    }
+    public static final com.google.protobuf.Descriptors.EnumDescriptor
         getDescriptor() {
-      return org.apache.drill.exec.proto.UserProtos.internal_static_exec_user_UserProperties_descriptor;
+      return org.apache.drill.exec.proto.UserProtos.getDescriptor().getEnumTypes().get(11);
     }
 
-    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
-        internalGetFieldAccessorTable() {
-      return org.apache.drill.exec.proto.UserProtos.internal_static_exec_user_UserProperties_fieldAccessorTable
-          .ensureFieldAccessorsInitialized(
-              org.apache.drill.exec.proto.UserProtos.UserProperties.class, org.apache.drill.exec.proto.UserProtos.UserProperties.Builder.class);
-    }
+    private static final IdentifierCasing[] VALUES = values();
 
-    public static com.google.protobuf.Parser<UserProperties> PARSER =
-        new com.google.protobuf.AbstractParser<UserProperties>() {
-      public UserProperties parsePartialFrom(
-          com.google.protobuf.CodedInputStream input,
-          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-          throws com.google.protobuf.InvalidProtocolBufferException {
-        return new UserProperties(input, extensionRegistry);
+    public static IdentifierCasing valueOf(
+        com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
+      if (desc.getType() != getDescriptor()) {
+        throw new java.lang.IllegalArgumentException(
+          "EnumValueDescriptor is not for this type.");
       }
-    };
+      return VALUES[desc.getIndex()];
+    }
 
-    @java.lang.Override
-    public com.google.protobuf.Parser<UserProperties> getParserForType() {
-      return PARSER;
+    private final int index;
+    private final int value;
+
+    private IdentifierCasing(int index, int value) {
+      this.index = index;
+      this.value = value;
     }
 
-    // repeated .exec.user.Property properties = 1;
-    public static final int PROPERTIES_FIELD_NUMBER = 1;
-    private java.util.List<org.apache.drill.exec.proto.UserProtos.Property> properties_;
+    // @@protoc_insertion_point(enum_scope:exec.user.IdentifierCasing)
+  }
+
+  /**
+   * Protobuf enum {@code exec.user.NullCollation}
+   */
+  public enum NullCollation
+      implements com.google.protobuf.ProtocolMessageEnum {
     /**
-     * <code>repeated .exec.user.Property properties = 1;</code>
+     * <code>NC_UNKNOWN = 0;</code>
+     *
+     * <pre>
+     * Unknown support (for forward compatibility)
+     * </pre>
      */
-    public java.util.List<org.apache.drill.exec.proto.UserProtos.Property> getPropertiesList() {
-      return properties_;
-    }
+    NC_UNKNOWN(0, 0),
     /**
-     * <code>repeated .exec.user.Property properties = 1;</code>
+     * <code>NC_AT_START = 1;</code>
+     *
+     * <pre>
+     * NULL values are sorted at the start regardless of the order
+     * </pre>
      */
-    public java.util.List<? extends org.apache.drill.exec.proto.UserProtos.PropertyOrBuilder> 
-        getPropertiesOrBuilderList() {
-      return properties_;
-    }
+    NC_AT_START(1, 1),
     /**
-     * <code>repeated .exec.user.Property properties = 1;</code>
+     * <code>NC_AT_END = 2;</code>
+     *
+     * <pre>
+     * NULL values are sorted at the end regardless of the order
+     * </pre>
      */
-    public int getPropertiesCount() {
-      return properties_.size();
-    }
+    NC_AT_END(2, 2),
     /**
-     * <code>repeated .exec.user.Property properties = 1;</code>
+     * <code>NC_HIGH = 3;</code>
+     *
+     * <pre>
+     * NULL is the highest value
+     * </pre>
      */
-    public org.apache.drill.exec.proto.UserProtos.Property getProperties(int index) {
-      return properties_.get(index);
-    }
+    NC_HIGH(3, 3),
     /**
-     * <code>repeated .exec.user.Property properties = 1;</code>
+     * <code>NC_LOW = 4;</code>
+     *
+     * <pre>
+     * NULL is the lowest value
+     * </pre>
      */
-    public org.apache.drill.exec.proto.UserProtos.PropertyOrBuilder getPropertiesOrBuilder(
-        int index) {
-      return properties_.get(index);
-    }
-
-    private void initFields() {
-      properties_ = java.util.Collections.emptyList();
-    }
-    private byte memoizedIsInitialized = -1;
-    public final boolean isInitialized() {
-      byte isInitialized = memoizedIsInitialized;
-      if (isInitialized != -1) return isInitialized == 1;
+    NC_LOW(4, 4),
+    ;
 
-      for (int i = 0; i < getPropertiesCount(); i++) {
-        if (!getProperties(i).isInitialized()) {
-          memoizedIsInitialized = 0;
-          return false;
-        }
-      }
-      memoizedIsInitialized = 1;
-      return true;
-    }
+    /**
+     * <code>NC_UNKNOWN = 0;</code>
+     *
+     * <pre>
+     * Unknown support (for forward compatibility)
+     * </pre>
+     */
+    public static final int NC_UNKNOWN_VALUE = 0;
+    /**
+     * <code>NC_AT_START = 1;</code>
+     *
+     * <pre>
+     * NULL values are sorted at the start regardless of the order
+     * </pre>
+     */
+    public static final int NC_AT_START_VALUE = 1;
+    /**
+     * <code>NC_AT_END = 2;</code>
+     *
+     * <pre>
+     * NULL values are sorted at the end regardless of the order
+     * </pre>
+     */
+    public static final int NC_AT_END_VALUE = 2;
+    /**
+     * <code>NC_HIGH = 3;</code>
+     *
+     * <pre>
+     * NULL is the highest value
+     * </pre>
+     */
+    public static final int NC_HIGH_VALUE = 3;
+    /**
+     * <code>NC_LOW = 4;</code>
+     *
+     * <pre>
+     * NULL is the lowest value
+     * </pre>
+     */
+    public static final int NC_LOW_VALUE = 4;
 
-    public void writeTo(com.google.protobuf.CodedOutputStream output)
-                        throws java.io.IOException {
-      getSerializedSize();
-      for (int i = 0; i < properties_.size(); i++) {
-        output.writeMessage(1, properties_.get(i));
-      }
-      getUnknownFields().writeTo(output);
-    }
 
-    private int memoizedSerializedSize = -1;
-    public int getSerializedSize() {
-      int size = memoizedSerializedSize;
-      if (size != -1) return size;
+    public final int getNumber() { return value; }
 
-      size = 0;
-      for (int i = 0; i < properties_.size(); i++) {
-        size += com.google.protobuf.CodedOutputStream
-          .computeMessageSize(1, properties_.get(i));
+    public static NullCollation valueOf(int value) {
+      switch (value) {
+        case 0: return NC_UNKNOWN;
+        case 1: return NC_AT_START;
+        case 2: return NC_AT_END;
+        case 3: return NC_HIGH;
+        case 4: return NC_LOW;
+        default: return null;
       }
-      size += getUnknownFields().getSerializedSize();
-      memoizedSerializedSize = size;
-      return size;
     }
 
-    private static final long serialVersionUID = 0L;
-    @java.lang.Override
-    protected java.lang.Object writeReplace()
-        throws java.io.ObjectStreamException {
-      return super.writeReplace();
+    public static com.google.protobuf.Internal.EnumLiteMap<NullCollation>
+        internalGetValueMap() {
+      return internalValueMap;
     }
+    private static com.google.protobuf.Internal.EnumLiteMap<NullCollation>
+        internalValueMap =
+          new com.google.protobuf.Internal.EnumLiteMap<NullCollation>() {
+            public NullCollation findValueByNumber(int number) {
+              return NullCollation.valueOf(number);
+            }
+          };
 
-    public static org.apache.drill.exec.proto.UserProtos.UserProperties parseFrom(
-        com.google.protobuf.ByteString data)
-        throws com.google.protobuf.InvalidProtocolBufferException {
-      return PARSER.parseFrom(data);
-    }
-    public static org.apache.drill.exec.proto.UserProtos.UserProperties parseFrom(
-        com.google.protobuf.ByteString data,
-        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws com.google.protobuf.InvalidProtocolBufferException {
-      return PARSER.parseFrom(data, extensionRegistry);
-    }
-    public static org.apache.drill.exec.proto.UserProtos.UserProperties parseFrom(byte[] data)
-        throws com.google.protobuf.InvalidProtocolBufferException {
-      return PARSER.parseFrom(data);
-    }
-    public static org.apache.drill.exec.proto.UserProtos.UserProperties parseFrom(
-        byte[] data,
-        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws com.google.protobuf.InvalidProtocolBufferException {
-      return PARSER.parseFrom(data, extensionRegistry);
-    }
-    public static org.apache.drill.exec.proto.UserProtos.UserProperties parseFrom(java.io.InputStream input)
-        throws java.io.IOException {
-      return PARSER.parseFrom(input);
-    }
-    public static org.apache.drill.exec.proto.UserProtos.UserProperties parseFrom(
-        java.io.InputStream input,
-        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws java.io.IOException {
-      return PARSER.parseFrom(input, extensionRegistry);
-    }
-    public static org.apache.drill.exec.proto.UserProtos.UserProperties parseDelimitedFrom(java.io.InputStream input)
-        throws java.io.IOException {
-      return PARSER.parseDelimitedFrom(input);
-    }
-    public static org.apache.drill.exec.proto.UserProtos.UserProperties parseDelimitedFrom(
-        java.io.InputStream input,
-        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws java.io.IOException {
-      return PARSER.parseDelimitedFrom(input, extensionRegistry);
+    public final com.google.protobuf.Descriptors.EnumValueDescriptor
+        getValueDescriptor() {
+      return getDescriptor().getValues().get(index);
     }
-    public static org.apache.drill.exec.proto.UserProtos.UserProperties parseFrom(
-        com.google.protobuf.CodedInputStream input)
-        throws java.io.IOException {
-      return PARSER.parseFrom(input);
+    public final com.google.protobuf.Descriptors.EnumDescriptor
+        getDescriptorForType() {
+      return getDescriptor();
     }
-    public static org.apache.drill.exec.proto.UserProtos.UserProperties parseFrom(
-        com.google.protobuf.CodedInputStream input,
-        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws java.io.IOException {
-      return PARSER.parseFrom(input, extensionRegistry);
+    public static final com.google.protobuf.Descriptors.EnumDescriptor
+        getDescriptor() {
+      return org.apache.drill.exec.proto.UserProtos.getDescriptor().getEnumTypes().get(12);
     }
 
-    public static Builder newBuilder() { return Builder.create(); }
-    public Builder newBuilderForType() { return newBuilder(); }
-    public static Builder newBuilder(org.apache.drill.exec.proto.UserProtos.UserProperties prototype) {
-      return newBuilder().mergeFrom(prototype);
+    private static final NullCollation[] VALUES = values();
+
+    public static NullCollation valueOf(
+        com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
+      if (desc.getType() != getDescriptor()) {
+        throw new java.lang.IllegalArgumentException(
+          "EnumValueDescriptor is not for this type.");
+      }
+      return VALUES[desc.getIndex()];
     }
-    public Builder toBuilder() { return newBuilder(this); }
 
-    @java.lang.Override
-    protected Builder newBuilderForType(
-        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
-      Builder builder = new Builder(parent);
-      return builder;
+    private final int index;
+    private final int value;
+
+    private NullCollation(int index, int value) {
+      this.index = index;
+      this.value = value;
     }
+
+    // @@protoc_insertion_point(enum_scope:exec.user.NullCollation)
+  }
+
+  /**
+   * Protobuf enum {@code exec.user.OrderBySupport}
+   */
+  public enum OrderBySupport
+      implements com.google.protobuf.ProtocolMessageEnum {
     /**
-     * Protobuf type {@code exec.user.UserProperties}
+     * <code>OB_UNKNOWN = 0;</code>
+     *
+     * <pre>
+     * Unknown support (for forward compatibility)
+     * </pre>
      */
-    public static final class Builder extends
-        com.google.protobuf.GeneratedMessage.Builder<Builder>
-       implements org.apache.drill.exec.proto.UserProtos.UserPropertiesOrBuilder {
-      public static final com.google.protobuf.Descriptors.Descriptor
-          getDescriptor() {
-        return org.apache.drill.exec.proto.UserProtos.internal_static_exec_user_UserProperties_descriptor;
-      }
-
-      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
-          internalGetFieldAccessorTable() {
-        return org.apache.drill.exec.proto.UserProtos.internal_static_exec_user_UserProperties_fieldAccessorTable
-            .ensureFieldAccessorsInitialized(
-                org.apache.drill.exec.proto.UserProtos.UserProperties.class, org.apache.drill.exec.proto.UserProtos.UserProperties.Builder.class);
-      }
+    OB_UNKNOWN(0, 0),
+    /**
+     * <code>OB_UNRELATED = 1;</code>
+     *
+     * <pre>
+     * ORDER BY supported with columns not in SELECT list
+     * </pre>
+     */
+    OB_UNRELATED(1, 1),
+    /**
+     * <code>OB_EXPRESSION = 2;</code>
+     *
+     * <pre>
+     * ORDER BY with expressions is supported
+     * </pre>
+     */
+    OB_EXPRESSION(2, 2),
+    ;
 
-      // Construct using org.apache.drill.exec.proto.UserProtos.UserProperties.newBuilder()
-      private Builder() {
-        maybeForceBuilderInitialization();
-      }
+    /**
+     * <code>OB_UNKNOWN = 0;</code>
+     *
+     * <pre>
+     * Unknown support (for forward compatibility)
+     * </pre>
+     */
+    public static final int OB_UNKNOWN_VALUE = 0;
+    /**
+     * <code>OB_UNRELATED = 1;</code>
+     *
+     * <pre>
+     * ORDER BY supported with columns not in SELECT list
+     * </pre>
+     */
+    public static final int OB_UNRELATED_VALUE = 1;
+    /**
+     * <code>OB_EXPRESSION = 2;</code>
+     *
+     * <pre>
+     * ORDER BY with expressions is supported
+     * </pre>
+     */
+    public static final int OB_EXPRESSION_VALUE = 2;
 
-      private Builder(
-          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
-        super(parent);
-        maybeForceBuilderInitialization();
-      }
-      private void maybeForceBuilderInitialization() {
-        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
-          getPropertiesFieldBuilder();
-        }
-      }
-      private static Builder create() {
-        return new Builder();
-      }
 
-      public Builder clear() {
-        super.clear();
-        if (propertiesBuilder_ == null) {
-          properties_ = java.util.Collections.emptyList();
-          bitField0_ = (bitField0_ & ~0x00000001);
-        } else {
-          propertiesBuilder_.clear();
-        }
-        return this;
-      }
+    public final int getNumber() { return value; }
 
-      public Builder clone() {
-        return create().mergeFrom(buildPartial());
+    public static OrderBySupport valueOf(int value) {
+      switch (value) {
+        case 0: return OB_UNKNOWN;
+        case 1: return OB_UNRELATED;
+        case 2: return OB_EXPRESSION;
+        default: return null;
       }
+    }
 
-      public com.google.protobuf.Descriptors.Descriptor
-          getDescriptorForType() {
-        return org.apache.drill.exec.proto.UserProtos.internal_static_exec_user_UserProperties_descriptor;
-      }
+    public static com.google.protobuf.Internal.EnumLiteMap<OrderBySupport>
+        internalGetValueMap() {
+      return internalValueMap;
+    }
+    private static com.google.protobuf.Internal.EnumLiteMap<OrderBySupport>
+        internalValueMap =
+          new com.google.protobuf.Internal.EnumLiteMap<OrderBySupport>() {
+            public OrderBySupport findValueByNumber(int number) {
+              return OrderBySupport.valueOf(number);
+            }
+          };
 
-      public org.apache.drill.exec.proto.UserProtos.UserProperties getDefaultInstanceForType() {
-        return org.apache.drill.exec.proto.UserProtos.UserProperties.getDefaultInstance();
-      }
+    public final com.google.protobuf.Descriptors.EnumValueDescriptor
+        getValueDescriptor() {
+      return getDescriptor().getValues().get(index);
+    }
+    public final com.google.protobuf.Descriptors.EnumDescriptor
+        getDescriptorForType() {
+      return getDescriptor();
+    }
+    public static final com.google.protobuf.Descriptors.EnumDescriptor
+        getDescriptor() {
+      return org.apache.drill.exec.proto.UserProtos.getDescriptor().getEnumTypes().get(13);
+    }
 
-      public org.apache.drill.exec.proto.UserProtos.UserProperties build() {
-        org.apache.drill.exec.proto.UserProtos.UserProperties result = buildPartial();
-        if (!result.isInitialized()) {
-          throw newUninitializedMessageException(result);
-        }
-        return result;
-      }
+    private static final OrderBySupport[] VALUES = values();
 
-      public org.apache.drill.exec.proto.UserProtos.UserProperties buildPartial() {
-        org.apache.drill.exec.proto.UserProtos.UserProperties result = new org.apache.drill.exec.proto.UserProtos.UserProperties(this);
-        int from_bitField0_ = bitField0_;
-        if (propertiesBuilder_ == null) {
-          if (((bitField0_ & 0x00000001) == 0x00000001)) {
-            properties_ = java.util.Collections.unmodifiableList(properties_);
-            bitField0_ = (bitField0_ & ~0x00000001);
-          }
-          result.properties_ = properties_;
-        } else {
-          result.properties_ = propertiesBuilder_.build();
-        }
-        onBuilt();
-        return result;
+    public static OrderBySupport valueOf(
+        com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
+      if (desc.getType() != getDescriptor()) {
+        throw new java.lang.IllegalArgumentException(
+          "EnumValueDescriptor is not for this type.");
       }
+      return VALUES[desc.getIndex()];
+    }
 
-      public Builder mergeFrom(com.google.protobuf.Message other) {
-        if (other instanceof org.apache.drill.exec.proto.UserProtos.UserProperties) {
-          return mergeFrom((org.apache.drill.exec.proto.UserProtos.UserProperties)other);
-        } else {
-          super.mergeFrom(other);
-          return this;
-        }
-      }
+    private final int index;
+    private final int value;
 
-      public Builder mergeFrom(org.apache.drill.exec.proto.UserProtos.UserProperties other) {
-        if (other == org.apache.drill.exec.proto.UserProtos.UserProperties.getDefaultInstance()) return this;
-        if (propertiesBuilder_ == null) {
-          if (!other.properties_.isEmpty()) {
-            if (properties_.isEmpty()) {
-              properties_ = other.properties_;
-              bitField0_ = (bitField0_ & ~0x00000001);
-            } else {
-              ensurePropertiesIsMutable();
-              properties_.addAll(other.properties_);
-            }
-            onChanged();
-          }
-        } else {
-          if (!other.properties_.isEmpty()) {
-            if (propertiesBuilder_.isEmpty()) {
-              propertiesBuilder_.dispose();
-              propertiesBuilder_ = null;
-              properties_ = other.properties_;
-              bitField0_ = (bitField0_ & ~0x00000001);
-              propertiesBuilder_ = 
-                com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
-                   getPropertiesFieldBuilder() : null;
-            } else {
-              propertiesBuilder_.addAllMessages(other.properties_);
-            }
-          }
-        }
-        this.mergeUnknownFields(other.getUnknownFields());
-        return this;
-      }
+    private OrderBySupport(int index, int value) {
+      this.index = index;
+      this.value = value;
+    }
 
-      public final boolean isInitialized() {
-        for (int i = 0; i < getPropertiesCount(); i++) {
-          if (!getProperties(i).isInitialized()) {
-            
-            return false;
-          }
-        }
-        return true;
-      }
+    // @@protoc_insertion_point(enum_scope:exec.user.OrderBySupport)
+  }
 
-      public Builder mergeFrom(
-          com.google.protobuf.CodedInputStream input,
-          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-          throws java.io.IOException {
-        org.apache.drill.exec.proto.UserProtos.UserProperties parsedMessage = null;
-        try {
-          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
-        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
-          parsedMessage = (org.apache.drill.exec.proto.UserProtos.UserProperties) e.getUnfinishedMessage();
-          throw e;
-        } finally {
-          if (parsedMessage != null) {
-            mergeFrom(parsedMessage);
-          }
-        }
-        return this;
-      }
-      private int bitField0_;
-
-      // repeated .exec.user.Property properties = 1;
-      private java.util.List<org.apache.drill.exec.proto.UserProtos.Property> properties_ =
-        java.util.Collections.emptyList();
-      private void ensurePropertiesIsMutable() {
-        if (!((bitField0_ & 0x00000001) == 0x00000001)) {
-          properties_ = new java.util.ArrayList<org.apache.drill.exec.proto.UserProtos.Property>(properties_);
-          bitField0_ |= 0x00000001;
-         }
-      }
-
-      private com.google.protobuf.RepeatedFieldBuilder<
-          org.apache.drill.exec.proto.UserProtos.Property, org.apache.drill.exec.proto.UserProtos.Property.Builder, org.apache.drill.exec.proto.UserProtos.PropertyOrBuilder> propertiesBuilder_;
-
-      /**
-       * <code>repeated .exec.user.Property properties = 1;</code>
-       */
-      public java.util.List<org.apache.drill.exec.proto.UserProtos.Property> getPropertiesList() {
-        if (propertiesBuilder_ == null) {
-          return java.util.Collections.unmodifiableList(properties_);
-        } else {
-          return propertiesBuilder_.getMessageList();
-        }
-      }
-      /**
-       * <code>repeated .exec.user.Property properties = 1;</code>
-       */
-      public int getPropertiesCount() {
-        if (propertiesBuilder_ == null) {
-          return properties_.size();
-        } else {
-          return propertiesBuilder_.getCount();
-        }
-      }
-      /**
-       * <code>repeated .exec.user.Property properties = 1;</code>
-       */
-      public org.apache.drill.exec.proto.UserProtos.Property getProperties(int index) {
-        if (propertiesBuilder_ == null) {
-          return properties_.get(index);
-        } else {
-          return propertiesBuilder_.getMessage(index);
-        }
-      }
-      /**
-       * <code>repeated .exec.user.Property properties = 1;</code>
-       */
-      public Builder setProperties(
-          int index, org.apache.drill.exec.proto.UserProtos.Property value) {
-        if (propertiesBuilder_ == null) {
-          if (value == null) {
-            throw new NullPointerException();
-          }
-          ensurePropertiesIsMutable();
-          properties_.set(index, value);
-          onChanged();
-        } else {
-          propertiesBuilder_.setMessage(index, value);
-        }
-        return this;
-      }
-      /**
-       * <code>repeated .exec.user.Property properties = 1;</code>
-       */
-      public Builder setProperties(
-          int index, org.apache.drill.exec.proto.UserProtos.Property.Builder builderForValue) {
-        if (propertiesBuilder_ == null) {
-          ensurePropertiesIsMutable();
-          properties_.set(index, builderForValue.build());
-          onChanged();
-        } else {
-          propertiesBuilder_.setMessage(index, builderForValue.build());
-        }
-        return this;
-      }
-      /**
-       * <code>repeated .exec.user.Property properties = 1;</code>
-       */
-      public Builder addProperties(org.apache.drill.exec.proto.UserProtos.Property value) {
-        if (propertiesBuilder_ == null) {
-          if (value == null) {
-            throw new NullPointerException();
-          }
-          ensurePropertiesIsMutable();
-          properties_.add(value);
-          onChanged();
-        } else {
-          propertiesBuilder_.addMessage(value);
-        }
-        return this;
-      }
-      /**
-       * <code>repeated .exec.user.Property properties = 1;</code>
-       */
-      public Builder addProperties(
-          int index, org.apache.drill.exec.proto.UserProtos.Property value) {
-        if (propertiesBuilder_ == null) {
-          if (value == null) {
-            throw new NullPointerException();
-          }
-          ensurePropertiesIsMutable();
-          properties_.add(index, value);
-          onChanged();
-        } else {
-          propertiesBuilder_.addMessage(index, value);
-        }
-        return this;
-      }
-      /**
-       * <code>repeated .exec.user.Property properties = 1;</code>
-       */
-      public Builder addProperties(
-          org.apache.drill.exec.proto.UserProtos.Property.Builder builderForValue) {
-        if (propertiesBuilder_ == null) {
-          ensurePropertiesIsMutable();
-          properties_.add(builderForValue.build());
-          onChanged();
-        } else {
-          propertiesBuilder_.addMessage(builderForValue.build());
-        }
-        return this;
-      }
-      /**
-       * <code>repeated .exec.user.Property properties = 1;</code>
-       */
-      public Builder addProperties(
-          int index, org.apache.drill.exec.proto.UserProtos.Property.Builder builderForValue) {
-        if (propertiesBuilder_ == null) {
-          ensurePropertiesIsMutable();
-          properties_.add(index, builderForValue.build());
-          onChanged();
-        } else {
-          propertiesBuilder_.addMessage(index, builderForValue.build());
-        }
-        return this;
-      }
-      /**
-       * <code>repeated .exec.user.Property properties = 1;</code>
-       */
-      public Builder addAllProperties(
-          java.lang.Iterable<? extends org.apache.drill.exec.proto.UserProtos.Property> values) {
-        if (propertiesBuilder_ == null) {
-          ensurePropertiesIsMutable();
-          super.addAll(values, properties_);
-          onChanged();
-        } else {
-          propertiesBuilder_.addAllMessages(values);
-        }
-        return this;
-      }
-      /**
-       * <code>repeated .exec.user.Property properties = 1;</code>
-       */
-      public Builder clearProperties() {
-        if (propertiesBuilder_ == null) {
-          properties_ = java.util.Collections.emptyList();
-          bitField0_ = (bitField0_ & ~0x00000001);
-          onChanged();
-        } else {
-          propertiesBuilder_.clear();
-        }
-        return this;
-      }
-      /**
-       * <code>repeated .exec.user.Property properties = 1;</code>
-       */
-      public Builder removeProperties(int index) {
-        if (propertiesBuilder_ == null) {
-          ensurePropertiesIsMutable();
-          properties_.remove(index);
-          onChanged();
-        } else {
-          propertiesBuilder_.remove(index);
-        }
-        return this;
-      }
-      /**
-       * <code>repeated .exec.user.Property properties = 1;</code>
-       */
-      public org.apache.drill.exec.proto.UserProtos.Property.Builder getPropertiesBuilder(
-          int index) {
-        return getPropertiesFieldBuilder().getBuilder(index);
-      }
-      /**
-       * <code>repeated .exec.user.Property properties = 1;</code>
-       */
-      public org.apache.drill.exec.proto.UserProtos.PropertyOrBuilder getPropertiesOrBuilder(
-          int index) {
-        if (propertiesBuilder_ == null) {
-          return properties_.get(index);  } else {
-          return propertiesBuilder_.getMessageOrBuilder(index);
-        }
-      }
-      /**
-       * <code>repeated .exec.user.Property properties = 1;</code>
-       */
-      public java.util.List<? extends org.apache.drill.exec.proto.UserProtos.PropertyOrBuilder> 
-           getPropertiesOrBuilderList() {
-        if (propertiesBuilder_ != null) {
-          return propertiesBuilder_.getMessageOrBuilderList();
-        } else {
-          return java.util.Collections.unmodifiableList(properties_);
-        }
-      }
-      /**
-       * <code>repeated .exec.user.Property properties = 1;</code>
-       */
-      public org.apache.drill.exec.proto.UserProtos.Property.Builder addPropertiesBuilder() {
-        return getPropertiesFieldBuilder().addBuilder(
-            org.apache.drill.exec.proto.UserProtos.Property.getDefaultInstance());
-      }
-      /**
-       * <code>repeated .exec.user.Property properties = 1;</code>
-       */
-      public org.apache.drill.exec.proto.UserProtos.Property.Builder addPropertiesBuilder(
-          int index) {
-        return getPropertiesFieldBuilder().addBuilder(
-            index, org.apache.drill.exec.proto.UserProtos.Property.getDefaultInstance());
-      }
-      /**
-       * <code>repeated .exec.user.Property properties = 1;</code>
-       */
-      public java.util.List<org.apache.drill.exec.proto.UserProtos.Property.Builder> 
-           getPropertiesBuilderList() {
-        return getPropertiesFieldBuilder().getBuilderList();
-      }
-      private com.google.protobuf.RepeatedFieldBuilder<
-          org.apache.drill.exec.proto.UserProtos.Property, org.apache.drill.exec.proto.UserProtos.Property.Builder, org.apache.drill.exec.proto.UserProtos.PropertyOrBuilder> 
-          getPropertiesFieldBuilder() {
-        if (propertiesBuilder_ == null) {
-          propertiesBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
-              org.apache.drill.exec.proto.UserProtos.Property, org.apache.drill.exec.proto.UserProtos.Property.Builder, org.apache.drill.exec.proto.UserProtos.PropertyOrBuilder>(
-                  properties_,
-                  ((bitField0_ & 0x00000001) == 0x00000001),
-                  getParentForChildren(),
-                  isClean());
-          properties_ = null;
-        }
-        return propertiesBuilder_;
-      }
-
-      // @@protoc_insertion_point(builder_scope:exec.user.UserProperties)
-    }
-
-    static {
-      defaultInstance = new UserProperties(true);
-      defaultInstance.initFields();
-    }
-
-    // @@protoc_insertion_point(class_scope:exec.user.UserProperties)
-  }
-
-  public interface RpcEndpointInfosOrBuilder
-      extends com.google.protobuf.MessageOrBuilder {
-
-    // optional string name = 1;
+  /**
+   * Protobuf enum {@code exec.user.OuterJoinSupport}
+   */
+  public enum OuterJoinSupport
+      implements com.google.protobuf.ProtocolMessageEnum {
     /**
-     * <code>optional string name = 1;</code>
+     * <code>OJ_UNKNOWN = 0;</code>
      *
      * <pre>
-     * example: Apache Drill Server, Apache Drill C++ client
+     * Unknown support (for forward compatibility)
      * </pre>
      */
-    boolean hasName();
+    OJ_UNKNOWN(0, 0),
     /**
-     * <code>optional string name = 1;</code>
+     * <code>OJ_LEFT = 1;</code>
      *
      * <pre>
-     * example: Apache Drill Server, Apache Drill C++ client
+     * Left outer join is supported
      * </pre>
      */
-    java.lang.String getName();
+    OJ_LEFT(1, 1),
     /**
-     * <code>optional string name = 1;</code>
+     * <code>OJ_RIGHT = 2;</code>
      *
      * <pre>
-     * example: Apache Drill Server, Apache Drill C++ client
+     * Right outer join is supported
      * </pre>
      */
-    com.google.protobuf.ByteString
-        getNameBytes();
-
-    // optional string version = 2;
+    OJ_RIGHT(2, 2),
     /**
-     * <code>optional string version = 2;</code>
+     * <code>OJ_FULL = 3;</code>
      *
      * <pre>
-     * example: 1.9.0
+     * Full outer join is supported
      * </pre>
      */
-    boolean hasVersion();
+    OJ_FULL(3, 3),
     /**
-     * <code>optional string version = 2;</code>
+     * <code>OJ_NESTED = 4;</code>
      *
      * <pre>
-     * example: 1.9.0
+     * Nested outer join is supported
      * </pre>
      */
-    java.lang.String getVersion();
+    OJ_NESTED(4, 4),
     /**
-     * <code>optional string version = 2;</code>
+     * <code>OJ_NOT_ORDERED = 5;</code>
      *
      * <pre>
-     * example: 1.9.0
+     * Column names in the ON clause don't have to share the same order
+     *as their respective table names in the OUTER JOIN clase 
      * </pre>
      */
-    com.google.protobuf.ByteString
-        getVersionBytes();
-
-    // optional uint32 majorVersion = 3;
+    OJ_NOT_ORDERED(5, 5),
     /**
-     * <code>optional uint32 majorVersion = 3;</code>
+     * <code>OJ_INNER = 6;</code>
      *
      * <pre>
-     * example: 1
+     * Inner table can also be used in an inner join
      * </pre>
      */
-    boolean hasMajorVersion();
+    OJ_INNER(6, 6),
     /**
-     * <code>optional uint32 majorVersion = 3;</code>
+     * <code>OJ_ALL_COMPARISON_OPS = 7;</code>
      *
      * <pre>
-     * example: 1
+     * Any comparison operator is supported in the ON clause
      * </pre>
      */
-    int getMajorVersion();
+    OJ_ALL_COMPARISON_OPS(7, 7),
+    ;
 
-    // optional uint32 minorVersion = 4;
     /**
-     * <code>optional uint32 minorVersion = 4;</code>
+     * <code>OJ_UNKNOWN = 0;</code>
      *
      * <pre>
-     * example: 9
+     * Unknown support (for forward compatibility)
      * </pre>
      */
-    boolean hasMinorVersion();
+    public static final int OJ_UNKNOWN_VALUE = 0;
     /**
-     * <code>optional uint32 minorVersion = 4;</code>
+     * <code>OJ_LEFT = 1;</code>
      *
      * <pre>
-     * example: 9
+     * Left outer join is supported
      * </pre>
      */
-    int getMinorVersion();
-
-    // optional uint32 patchVersion = 5;
+    public static final int OJ_LEFT_VALUE = 1;
     /**
-     * <code>optional uint32 patchVersion = 5;</code>
+     * <code>OJ_RIGHT = 2;</code>
      *
      * <pre>
-     * example: 0
+     * Right outer join is supported
      * </pre>
      */
-    boolean hasPatchVersion();
+    public static final int OJ_RIGHT_VALUE = 2;
     /**
-     * <code>optional uint32 patchVersion = 5;</code>
+     * <code>OJ_FULL = 3;</code>
      *
      * <pre>
-     * example: 0
+     * Full outer join is supported
      * </pre>
      */
-    int getPatchVersion();
-
-    // optional string application = 6;
+    public static final int OJ_FULL_VALUE = 3;
     /**
-     * <code>optional string application = 6;</code>
+     * <code>OJ_NESTED = 4;</code>
      *
      * <pre>
-     * example: Tableau 9.3
+     * Nested outer join is supported
      * </pre>
      */
-    boolean hasApplication();
+    public static final int OJ_NESTED_VALUE = 4;
     /**
-     * <code>optional string application = 6;</code>
+     * <code>OJ_NOT_ORDERED = 5;</code>
      *
      * <pre>
-     * example: Tableau 9.3
+     * Column names in the ON clause don't have to share the same order
+     *as their respective table names in the OUTER JOIN clase 
      * </pre>
      */
-    java.lang.String getApplication();
+    public static final int OJ_NOT_ORDERED_VALUE = 5;
     /**
-     * <code>optional string application = 6;</code>
+     * <code>OJ_INNER = 6;</code>
      *
      * <pre>
-     * example: Tableau 9.3
+     * Inner table can also be used in an inner join
      * </pre>
      */
-    com.google.protobuf.ByteString
-        getApplicationBytes();
+    public static final int OJ_INNER_VALUE = 6;
+    /**
+     * <code>OJ_ALL_COMPARISON_OPS = 7;</code>
+     *
+     * <pre>
+     * Any comparison operator is supported in the ON clause
+     * </pre>
+     */
+    public static final int OJ_ALL_COMPARISON_OPS_VALUE = 7;
 
-    // optional uint32 buildNumber = 7;
+
+    public final int getNumber() { return value; }
+
+    public static OuterJoinSupport valueOf(int value) {
+      switch (value) {
+        case 0: return OJ_UNKNOWN;
+        case 1: return OJ_LEFT;
+        case 2: return OJ_RIGHT;
+        case 3: return OJ_FULL;
+        case 4: return OJ_NESTED;
+        case 5: return OJ_NOT_ORDERED;
+        case 6: return OJ_INNER;
+        case 7: return OJ_ALL_COMPARISON_OPS;
+        default: return null;
+      }
+    }
+
+    public static com.google.protobuf.Internal.EnumLiteMap<OuterJoinSupport>
+        internalGetValueMap() {
+      return internalValueMap;
+    }
+    private static com.google.protobuf.Internal.EnumLiteMap<OuterJoinSupport>
+        internalValueMap =
+          new com.google.protobuf.Internal.EnumLiteMap<OuterJoinSupport>() {
+            public OuterJoinSupport findValueByNumber(int number) {
+              return OuterJoinSupport.valueOf(number);
+           

<TRUNCATED>

[09/27] drill git commit: DRILL-5301: Add C++ client support for Server metadata API

Posted by jn...@apache.org.
http://git-wip-us.apache.org/repos/asf/drill/blob/d3238b1b/contrib/native/client/src/clientlib/metadata.cpp
----------------------------------------------------------------------
diff --git a/contrib/native/client/src/clientlib/metadata.cpp b/contrib/native/client/src/clientlib/metadata.cpp
index 0364c7d..7c2a7a8 100644
--- a/contrib/native/client/src/clientlib/metadata.cpp
+++ b/contrib/native/client/src/clientlib/metadata.cpp
@@ -26,11 +26,18 @@
 const std::string Drill::meta::DrillMetadata::s_connectorName(DRILL_CONNECTOR_NAME);
 const std::string Drill::meta::DrillMetadata::s_connectorVersion(DRILL_VERSION_STRING);
 
-const std::string Drill::meta::DrillMetadata::s_catalogSeparator(".");
-const std::string Drill::meta::DrillMetadata::s_catalogTerm("catalog");
-const std::string Drill::meta::DrillMetadata::s_identifierQuoteString("`");
+namespace Drill {
+namespace meta {
+namespace { // Anonymous namespace
+using boost::assign::list_of;
+
+// Default values based on Drill 1.8 support
+static const std::size_t s_maxIdentifierSize = 1024;
+static const std::string s_catalogSeparator(".");
+static const std::string s_catalogTerm("catalog");
+static const std::string s_identifierQuoteString("`");
 
-const std::vector<std::string> Drill::meta::DrillMetadata::s_sqlKeywords = boost::assign::list_of
+static const std::vector<std::string> s_sqlKeywords = boost::assign::list_of
 		("ABS")("ALLOW")("ARRAY")("ASENSITIVE")("ASYMMETRIC")("ATOMIC")("BIGINT")("BINARY")("BLOB")
 		("BOOLEAN")("CALL")("CALLED")("CARDINALITY")("CEIL")("CEILING")("CLOB")("COLLECT")("CONDITION")
 		("CORR")("COVAR_POP")("COVAR_SAMP")("CUBE")("CUME_DIST")("CURRENT_CATALOG")
@@ -51,643 +58,1070 @@ const std::vector<std::string> Drill::meta::DrillMetadata::s_sqlKeywords = boost
 		("UESCAPE")("UNNEST")("UPSERT")("USE")("VARBINARY")("VAR_POP")("VAR_SAMP")("WIDTH_BUCKET")
 		("WINDOW")("WITHIN")("WITHOUT");
 
-const std::vector<std::string> Drill::meta::DrillMetadata::s_numericFunctions = boost::assign::list_of
-		("ABS")("ACOS")("ASIN")("ATAN")("ATAN2")("CEILING")("COS")("COT")
-		("DEGREES")("EXP")("FLOOR")("LOG")("LOG10")("MOD")("PI")
-		("POWER")("RADIANS")("RAND")("ROUND")("SIGN")("SIN")("SQRT")
-		("TAN")("TRUNCATE");
+static const std::vector<std::string> s_numericFunctions = boost::assign::list_of
+		("ABS")("EXP")("LOG")("LOG10")("MOD")("POWER");
 
-const std::string Drill::meta::DrillMetadata::s_schemaTerm("schema");
-const std::string Drill::meta::DrillMetadata::s_searchEscapeString("\\");
-const std::string Drill::meta::DrillMetadata::s_specialCharacters;
+static const std::string s_schemaTerm("schema");
+static const std::string s_searchEscapeString("\\");
+static const std::string s_specialCharacters;
 
-const std::vector<std::string> Drill::meta::DrillMetadata::s_stringFunctions = boost::assign::list_of
-		("ASCII")("CHAR")("CONCAT")("DIFFERENCE")("INSERT")("LCASE")
-		("LEFT")("LENGTH")("LOCATE")("LTRIM")("REPEAT")("REPLACE")
-		("RIGHT")("RTRIM")("SOUNDEX")("SPACE")("SUBSTRING")("UCASE");
+static const std::vector<std::string> s_stringFunctions = boost::assign::list_of
+		("CONCAT")("INSERT")("LCASE")("LENGTH")("LOCATE")("LTRIM")("RTRIM")("SUBSTRING")("UCASE");
 
-const std::vector<std::string> Drill::meta::DrillMetadata::s_systemFunctions = boost::assign::list_of
-		("DATABASE")("IFNULL")("USER");
+static const std::vector<std::string> s_systemFunctions;
 
-const std::string Drill::meta::DrillMetadata::s_tableTerm("table");
+static const std::string s_tableTerm("table");
 
-const std::vector<std::string> Drill::meta::DrillMetadata::s_dateTimeFunctions = boost::assign::list_of
-		("CURDATE")("CURTIME")("DAYNAME")("DAYOFMONTH")("DAYOFWEEK")
-		("DAYOFYEAR")("HOUR")("MINUTE")("MONTH")("MONTHNAME")("NOW")
-		("QUARTER")("SECOND")("TIMESTAMPADD")("TIMESTAMPDIFF")("WEEK")("YEAR");
+static const std::vector<std::string> s_dateTimeFunctions = boost::assign::list_of
+		("CURDATE")("CURTIME")("NOW")("QUARTER");
 
-namespace Drill {
-namespace meta {
-namespace {
-using boost::assign::list_of;
+static const std::vector<exec::user::DateTimeLiteralsSupport> s_dateTimeLiterals = boost::assign::list_of
+		(exec::user::DL_DATE)(exec::user::DL_TIME)(exec::user::DL_TIMESTAMP)(exec::user::DL_INTERVAL_YEAR)
+		(exec::user::DL_INTERVAL_MONTH)(exec::user::DL_INTERVAL_DAY)(exec::user::DL_INTERVAL_HOUR)
+		(exec::user::DL_INTERVAL_MINUTE)(exec::user::DL_INTERVAL_SECOND)(exec::user::DL_INTERVAL_YEAR_TO_MONTH)
+		(exec::user::DL_INTERVAL_DAY_TO_HOUR)(exec::user::DL_INTERVAL_DAY_TO_MINUTE)
+		(exec::user::DL_INTERVAL_DAY_TO_SECOND)(exec::user::DL_INTERVAL_HOUR_TO_MINUTE)
+		(exec::user::DL_INTERVAL_HOUR_TO_SECOND)(exec::user::DL_INTERVAL_MINUTE_TO_SECOND);
+
+static const std::vector<exec::user::OrderBySupport> s_orderBySupport = boost::assign::list_of
+		(exec::user::OB_UNRELATED)(exec::user::OB_EXPRESSION);
+
+static const std::vector<exec::user::OuterJoinSupport> s_outerJoinSupport = boost::assign::list_of
+		(exec::user::OJ_LEFT)(exec::user::OJ_RIGHT)(exec::user::OJ_FULL);
+
+static const std::vector<exec::user::SubQuerySupport> s_subQuerySupport = boost::assign::list_of
+		(exec::user::SQ_CORRELATED)(exec::user::SQ_IN_COMPARISON)(exec::user::SQ_IN_EXISTS)
+		(exec::user::SQ_IN_QUANTIFIED);
+
+static const std::vector<exec::user::UnionSupport> s_unionSupport = boost::assign::list_of
+		(exec::user::U_UNION)(exec::user::U_UNION_ALL);
+
+static exec::user::ConvertSupport ConvertSupport(common::MinorType from, common::MinorType to) {
+	exec::user::ConvertSupport convertSupport;
+	convertSupport.set_from(from);
+	convertSupport.set_to(to);
+
+	return convertSupport;
+}
+
+static const convert_support_set s_convertMap = boost::assign::list_of
+		(ConvertSupport(common::TINYINT, common::INT))
+		(ConvertSupport(common::TINYINT, common::BIGINT))
+		(ConvertSupport(common::TINYINT, common::DECIMAL9))
+		(ConvertSupport(common::TINYINT, common::DECIMAL18))
+		(ConvertSupport(common::TINYINT, common::DECIMAL28SPARSE))
+		(ConvertSupport(common::TINYINT, common::DECIMAL38SPARSE))
+		(ConvertSupport(common::TINYINT, common::DATE))
+		(ConvertSupport(common::TINYINT, common::TIME))
+		(ConvertSupport(common::TINYINT, common::TIMESTAMP))
+		(ConvertSupport(common::TINYINT, common::INTERVAL))
+		(ConvertSupport(common::TINYINT, common::FLOAT4))
+		(ConvertSupport(common::TINYINT, common::FLOAT8))
+		(ConvertSupport(common::TINYINT, common::BIT))
+		(ConvertSupport(common::TINYINT, common::VARCHAR))
+		(ConvertSupport(common::TINYINT, common::VAR16CHAR))
+		(ConvertSupport(common::TINYINT, common::VARBINARY))
+		(ConvertSupport(common::TINYINT, common::INTERVALYEAR))
+		(ConvertSupport(common::TINYINT, common::INTERVALDAY))
+		(ConvertSupport(common::SMALLINT, common::INT))
+		(ConvertSupport(common::SMALLINT, common::BIGINT))
+		(ConvertSupport(common::SMALLINT, common::DECIMAL9))
+		(ConvertSupport(common::SMALLINT, common::DECIMAL18))
+		(ConvertSupport(common::SMALLINT, common::DECIMAL28SPARSE))
+		(ConvertSupport(common::SMALLINT, common::DECIMAL38SPARSE))
+		(ConvertSupport(common::SMALLINT, common::DATE))
+		(ConvertSupport(common::SMALLINT, common::TIME))
+		(ConvertSupport(common::SMALLINT, common::TIMESTAMP))
+		(ConvertSupport(common::SMALLINT, common::INTERVAL))
+		(ConvertSupport(common::SMALLINT, common::FLOAT4))
+		(ConvertSupport(common::SMALLINT, common::FLOAT8))
+		(ConvertSupport(common::SMALLINT, common::BIT))
+		(ConvertSupport(common::SMALLINT, common::VARCHAR))
+		(ConvertSupport(common::SMALLINT, common::VAR16CHAR))
+		(ConvertSupport(common::SMALLINT, common::VARBINARY))
+		(ConvertSupport(common::SMALLINT, common::INTERVALYEAR))
+		(ConvertSupport(common::SMALLINT, common::INTERVALDAY))
+		(ConvertSupport(common::INT, common::INT))
+		(ConvertSupport(common::INT, common::BIGINT))
+		(ConvertSupport(common::INT, common::DECIMAL9))
+		(ConvertSupport(common::INT, common::DECIMAL18))
+		(ConvertSupport(common::INT, common::DECIMAL28SPARSE))
+		(ConvertSupport(common::INT, common::DECIMAL38SPARSE))
+		(ConvertSupport(common::INT, common::DATE))
+		(ConvertSupport(common::INT, common::TIME))
+		(ConvertSupport(common::INT, common::TIMESTAMP))
+		(ConvertSupport(common::INT, common::INTERVAL))
+		(ConvertSupport(common::INT, common::FLOAT4))
+		(ConvertSupport(common::INT, common::FLOAT8))
+		(ConvertSupport(common::INT, common::BIT))
+		(ConvertSupport(common::INT, common::VARCHAR))
+		(ConvertSupport(common::INT, common::VAR16CHAR))
+		(ConvertSupport(common::INT, common::VARBINARY))
+		(ConvertSupport(common::INT, common::INTERVALYEAR))
+		(ConvertSupport(common::INT, common::INTERVALDAY))
+		(ConvertSupport(common::BIGINT, common::INT))
+		(ConvertSupport(common::BIGINT, common::BIGINT))
+		(ConvertSupport(common::BIGINT, common::DECIMAL9))
+		(ConvertSupport(common::BIGINT, common::DECIMAL18))
+		(ConvertSupport(common::BIGINT, common::DECIMAL28SPARSE))
+		(ConvertSupport(common::BIGINT, common::DECIMAL38SPARSE))
+		(ConvertSupport(common::BIGINT, common::DATE))
+		(ConvertSupport(common::BIGINT, common::TIME))
+		(ConvertSupport(common::BIGINT, common::TIMESTAMP))
+		(ConvertSupport(common::BIGINT, common::INTERVAL))
+		(ConvertSupport(common::BIGINT, common::FLOAT4))
+		(ConvertSupport(common::BIGINT, common::FLOAT8))
+		(ConvertSupport(common::BIGINT, common::BIT))
+		(ConvertSupport(common::BIGINT, common::VARCHAR))
+		(ConvertSupport(common::BIGINT, common::VAR16CHAR))
+		(ConvertSupport(common::BIGINT, common::VARBINARY))
+		(ConvertSupport(common::BIGINT, common::INTERVALYEAR))
+		(ConvertSupport(common::BIGINT, common::INTERVALDAY))
+		(ConvertSupport(common::DECIMAL9, common::INT))
+		(ConvertSupport(common::DECIMAL9, common::BIGINT))
+		(ConvertSupport(common::DECIMAL9, common::DECIMAL9))
+		(ConvertSupport(common::DECIMAL9, common::DECIMAL18))
+		(ConvertSupport(common::DECIMAL9, common::DECIMAL28SPARSE))
+		(ConvertSupport(common::DECIMAL9, common::DECIMAL38SPARSE))
+		(ConvertSupport(common::DECIMAL9, common::DATE))
+		(ConvertSupport(common::DECIMAL9, common::TIME))
+		(ConvertSupport(common::DECIMAL9, common::TIMESTAMP))
+		(ConvertSupport(common::DECIMAL9, common::INTERVAL))
+		(ConvertSupport(common::DECIMAL9, common::FLOAT4))
+		(ConvertSupport(common::DECIMAL9, common::FLOAT8))
+		(ConvertSupport(common::DECIMAL9, common::BIT))
+		(ConvertSupport(common::DECIMAL9, common::VARCHAR))
+		(ConvertSupport(common::DECIMAL9, common::VAR16CHAR))
+		(ConvertSupport(common::DECIMAL9, common::VARBINARY))
+		(ConvertSupport(common::DECIMAL9, common::INTERVALYEAR))
+		(ConvertSupport(common::DECIMAL9, common::INTERVALDAY))
+		(ConvertSupport(common::DECIMAL18, common::INT))
+		(ConvertSupport(common::DECIMAL18, common::BIGINT))
+		(ConvertSupport(common::DECIMAL18, common::DECIMAL9))
+		(ConvertSupport(common::DECIMAL18, common::DECIMAL18))
+		(ConvertSupport(common::DECIMAL18, common::DECIMAL28SPARSE))
+		(ConvertSupport(common::DECIMAL18, common::DECIMAL38SPARSE))
+		(ConvertSupport(common::DECIMAL18, common::DATE))
+		(ConvertSupport(common::DECIMAL18, common::TIME))
+		(ConvertSupport(common::DECIMAL18, common::TIMESTAMP))
+		(ConvertSupport(common::DECIMAL18, common::INTERVAL))
+		(ConvertSupport(common::DECIMAL18, common::FLOAT4))
+		(ConvertSupport(common::DECIMAL18, common::FLOAT8))
+		(ConvertSupport(common::DECIMAL18, common::BIT))
+		(ConvertSupport(common::DECIMAL18, common::VARCHAR))
+		(ConvertSupport(common::DECIMAL18, common::VAR16CHAR))
+		(ConvertSupport(common::DECIMAL18, common::VARBINARY))
+		(ConvertSupport(common::DECIMAL18, common::INTERVALYEAR))
+		(ConvertSupport(common::DECIMAL18, common::INTERVALDAY))
+		(ConvertSupport(common::DECIMAL28SPARSE, common::INT))
+		(ConvertSupport(common::DECIMAL28SPARSE, common::BIGINT))
+		(ConvertSupport(common::DECIMAL28SPARSE, common::DECIMAL9))
+		(ConvertSupport(common::DECIMAL28SPARSE, common::DECIMAL18))
+		(ConvertSupport(common::DECIMAL28SPARSE, common::DECIMAL28SPARSE))
+		(ConvertSupport(common::DECIMAL28SPARSE, common::DECIMAL38SPARSE))
+		(ConvertSupport(common::DECIMAL28SPARSE, common::DATE))
+		(ConvertSupport(common::DECIMAL28SPARSE, common::TIME))
+		(ConvertSupport(common::DECIMAL28SPARSE, common::TIMESTAMP))
+		(ConvertSupport(common::DECIMAL28SPARSE, common::INTERVAL))
+		(ConvertSupport(common::DECIMAL28SPARSE, common::FLOAT4))
+		(ConvertSupport(common::DECIMAL28SPARSE, common::FLOAT8))
+		(ConvertSupport(common::DECIMAL28SPARSE, common::BIT))
+		(ConvertSupport(common::DECIMAL28SPARSE, common::VARCHAR))
+		(ConvertSupport(common::DECIMAL28SPARSE, common::VAR16CHAR))
+		(ConvertSupport(common::DECIMAL28SPARSE, common::VARBINARY))
+		(ConvertSupport(common::DECIMAL28SPARSE, common::INTERVALYEAR))
+		(ConvertSupport(common::DECIMAL28SPARSE, common::INTERVALDAY))
+		(ConvertSupport(common::DECIMAL38SPARSE, common::INT))
+		(ConvertSupport(common::DECIMAL38SPARSE, common::BIGINT))
+		(ConvertSupport(common::DECIMAL38SPARSE, common::DECIMAL9))
+		(ConvertSupport(common::DECIMAL38SPARSE, common::DECIMAL18))
+		(ConvertSupport(common::DECIMAL38SPARSE, common::DECIMAL28SPARSE))
+		(ConvertSupport(common::DECIMAL38SPARSE, common::DECIMAL38SPARSE))
+		(ConvertSupport(common::DECIMAL38SPARSE, common::DATE))
+		(ConvertSupport(common::DECIMAL38SPARSE, common::TIME))
+		(ConvertSupport(common::DECIMAL38SPARSE, common::TIMESTAMP))
+		(ConvertSupport(common::DECIMAL38SPARSE, common::INTERVAL))
+		(ConvertSupport(common::DECIMAL38SPARSE, common::FLOAT4))
+		(ConvertSupport(common::DECIMAL38SPARSE, common::FLOAT8))
+		(ConvertSupport(common::DECIMAL38SPARSE, common::BIT))
+		(ConvertSupport(common::DECIMAL38SPARSE, common::VARCHAR))
+		(ConvertSupport(common::DECIMAL38SPARSE, common::VAR16CHAR))
+		(ConvertSupport(common::DECIMAL38SPARSE, common::VARBINARY))
+		(ConvertSupport(common::DECIMAL38SPARSE, common::INTERVALYEAR))
+		(ConvertSupport(common::DECIMAL38SPARSE, common::INTERVALDAY))
+		(ConvertSupport(common::MONEY, common::INT))
+		(ConvertSupport(common::MONEY, common::BIGINT))
+		(ConvertSupport(common::MONEY, common::DECIMAL9))
+		(ConvertSupport(common::MONEY, common::DECIMAL18))
+		(ConvertSupport(common::MONEY, common::DECIMAL28SPARSE))
+		(ConvertSupport(common::MONEY, common::DECIMAL38SPARSE))
+		(ConvertSupport(common::MONEY, common::DATE))
+		(ConvertSupport(common::MONEY, common::TIME))
+		(ConvertSupport(common::MONEY, common::TIMESTAMP))
+		(ConvertSupport(common::MONEY, common::INTERVAL))
+		(ConvertSupport(common::MONEY, common::FLOAT4))
+		(ConvertSupport(common::MONEY, common::FLOAT8))
+		(ConvertSupport(common::MONEY, common::BIT))
+		(ConvertSupport(common::MONEY, common::VARCHAR))
+		(ConvertSupport(common::MONEY, common::VAR16CHAR))
+		(ConvertSupport(common::MONEY, common::VARBINARY))
+		(ConvertSupport(common::MONEY, common::INTERVALYEAR))
+		(ConvertSupport(common::MONEY, common::INTERVALDAY))
+		(ConvertSupport(common::DATE, common::INT))
+		(ConvertSupport(common::DATE, common::BIGINT))
+		(ConvertSupport(common::DATE, common::DECIMAL9))
+		(ConvertSupport(common::DATE, common::DECIMAL18))
+		(ConvertSupport(common::DATE, common::DECIMAL28SPARSE))
+		(ConvertSupport(common::DATE, common::DECIMAL38SPARSE))
+		(ConvertSupport(common::DATE, common::DATE))
+		(ConvertSupport(common::DATE, common::TIME))
+		(ConvertSupport(common::DATE, common::TIMESTAMP))
+		(ConvertSupport(common::DATE, common::INTERVAL))
+		(ConvertSupport(common::DATE, common::FLOAT4))
+		(ConvertSupport(common::DATE, common::FLOAT8))
+		(ConvertSupport(common::DATE, common::BIT))
+		(ConvertSupport(common::DATE, common::VARCHAR))
+		(ConvertSupport(common::DATE, common::VAR16CHAR))
+		(ConvertSupport(common::DATE, common::VARBINARY))
+		(ConvertSupport(common::DATE, common::INTERVALYEAR))
+		(ConvertSupport(common::DATE, common::INTERVALDAY))
+		(ConvertSupport(common::TIME, common::INT))
+		(ConvertSupport(common::TIME, common::BIGINT))
+		(ConvertSupport(common::TIME, common::DECIMAL9))
+		(ConvertSupport(common::TIME, common::DECIMAL18))
+		(ConvertSupport(common::TIME, common::DECIMAL28SPARSE))
+		(ConvertSupport(common::TIME, common::DECIMAL38SPARSE))
+		(ConvertSupport(common::TIME, common::DATE))
+		(ConvertSupport(common::TIME, common::TIME))
+		(ConvertSupport(common::TIME, common::TIMESTAMP))
+		(ConvertSupport(common::TIME, common::INTERVAL))
+		(ConvertSupport(common::TIME, common::FLOAT4))
+		(ConvertSupport(common::TIME, common::FLOAT8))
+		(ConvertSupport(common::TIME, common::BIT))
+		(ConvertSupport(common::TIME, common::VARCHAR))
+		(ConvertSupport(common::TIME, common::VAR16CHAR))
+		(ConvertSupport(common::TIME, common::VARBINARY))
+		(ConvertSupport(common::TIME, common::INTERVALYEAR))
+		(ConvertSupport(common::TIME, common::INTERVALDAY))
+		(ConvertSupport(common::TIMESTAMPTZ, common::INT))
+		(ConvertSupport(common::TIMESTAMPTZ, common::BIGINT))
+		(ConvertSupport(common::TIMESTAMPTZ, common::DECIMAL9))
+		(ConvertSupport(common::TIMESTAMPTZ, common::DECIMAL18))
+		(ConvertSupport(common::TIMESTAMPTZ, common::DECIMAL28SPARSE))
+		(ConvertSupport(common::TIMESTAMPTZ, common::DECIMAL38SPARSE))
+		(ConvertSupport(common::TIMESTAMPTZ, common::DATE))
+		(ConvertSupport(common::TIMESTAMPTZ, common::TIME))
+		(ConvertSupport(common::TIMESTAMPTZ, common::TIMESTAMP))
+		(ConvertSupport(common::TIMESTAMPTZ, common::INTERVAL))
+		(ConvertSupport(common::TIMESTAMPTZ, common::FLOAT4))
+		(ConvertSupport(common::TIMESTAMPTZ, common::FLOAT8))
+		(ConvertSupport(common::TIMESTAMPTZ, common::BIT))
+		(ConvertSupport(common::TIMESTAMPTZ, common::VARCHAR))
+		(ConvertSupport(common::TIMESTAMPTZ, common::VAR16CHAR))
+		(ConvertSupport(common::TIMESTAMPTZ, common::VARBINARY))
+		(ConvertSupport(common::TIMESTAMPTZ, common::INTERVALYEAR))
+		(ConvertSupport(common::TIMESTAMPTZ, common::INTERVALDAY))
+		(ConvertSupport(common::TIMESTAMP, common::INT))
+		(ConvertSupport(common::TIMESTAMP, common::BIGINT))
+		(ConvertSupport(common::TIMESTAMP, common::DECIMAL9))
+		(ConvertSupport(common::TIMESTAMP, common::DECIMAL18))
+		(ConvertSupport(common::TIMESTAMP, common::DECIMAL28SPARSE))
+		(ConvertSupport(common::TIMESTAMP, common::DECIMAL38SPARSE))
+		(ConvertSupport(common::TIMESTAMP, common::DATE))
+		(ConvertSupport(common::TIMESTAMP, common::TIME))
+		(ConvertSupport(common::TIMESTAMP, common::TIMESTAMP))
+		(ConvertSupport(common::TIMESTAMP, common::INTERVAL))
+		(ConvertSupport(common::TIMESTAMP, common::FLOAT4))
+		(ConvertSupport(common::TIMESTAMP, common::FLOAT8))
+		(ConvertSupport(common::TIMESTAMP, common::BIT))
+		(ConvertSupport(common::TIMESTAMP, common::VARCHAR))
+		(ConvertSupport(common::TIMESTAMP, common::VAR16CHAR))
+		(ConvertSupport(common::TIMESTAMP, common::VARBINARY))
+		(ConvertSupport(common::TIMESTAMP, common::INTERVALYEAR))
+		(ConvertSupport(common::TIMESTAMP, common::INTERVALDAY))
+		(ConvertSupport(common::INTERVAL, common::INT))
+		(ConvertSupport(common::INTERVAL, common::BIGINT))
+		(ConvertSupport(common::INTERVAL, common::DECIMAL9))
+		(ConvertSupport(common::INTERVAL, common::DECIMAL18))
+		(ConvertSupport(common::INTERVAL, common::DECIMAL28SPARSE))
+		(ConvertSupport(common::INTERVAL, common::DECIMAL38SPARSE))
+		(ConvertSupport(common::INTERVAL, common::DATE))
+		(ConvertSupport(common::INTERVAL, common::TIME))
+		(ConvertSupport(common::INTERVAL, common::TIMESTAMP))
+		(ConvertSupport(common::INTERVAL, common::INTERVAL))
+		(ConvertSupport(common::INTERVAL, common::FLOAT4))
+		(ConvertSupport(common::INTERVAL, common::FLOAT8))
+		(ConvertSupport(common::INTERVAL, common::BIT))
+		(ConvertSupport(common::INTERVAL, common::VARCHAR))
+		(ConvertSupport(common::INTERVAL, common::VAR16CHAR))
+		(ConvertSupport(common::INTERVAL, common::VARBINARY))
+		(ConvertSupport(common::INTERVAL, common::INTERVALYEAR))
+		(ConvertSupport(common::INTERVAL, common::INTERVALDAY))
+		(ConvertSupport(common::FLOAT4, common::INT))
+		(ConvertSupport(common::FLOAT4, common::BIGINT))
+		(ConvertSupport(common::FLOAT4, common::DECIMAL9))
+		(ConvertSupport(common::FLOAT4, common::DECIMAL18))
+		(ConvertSupport(common::FLOAT4, common::DECIMAL28SPARSE))
+		(ConvertSupport(common::FLOAT4, common::DECIMAL38SPARSE))
+		(ConvertSupport(common::FLOAT4, common::DATE))
+		(ConvertSupport(common::FLOAT4, common::TIME))
+		(ConvertSupport(common::FLOAT4, common::TIMESTAMP))
+		(ConvertSupport(common::FLOAT4, common::INTERVAL))
+		(ConvertSupport(common::FLOAT4, common::FLOAT4))
+		(ConvertSupport(common::FLOAT4, common::FLOAT8))
+		(ConvertSupport(common::FLOAT4, common::BIT))
+		(ConvertSupport(common::FLOAT4, common::VARCHAR))
+		(ConvertSupport(common::FLOAT4, common::VAR16CHAR))
+		(ConvertSupport(common::FLOAT4, common::VARBINARY))
+		(ConvertSupport(common::FLOAT4, common::INTERVALYEAR))
+		(ConvertSupport(common::FLOAT4, common::INTERVALDAY))
+		(ConvertSupport(common::FLOAT8, common::INT))
+		(ConvertSupport(common::FLOAT8, common::BIGINT))
+		(ConvertSupport(common::FLOAT8, common::DECIMAL9))
+		(ConvertSupport(common::FLOAT8, common::DECIMAL18))
+		(ConvertSupport(common::FLOAT8, common::DECIMAL28SPARSE))
+		(ConvertSupport(common::FLOAT8, common::DECIMAL38SPARSE))
+		(ConvertSupport(common::FLOAT8, common::DATE))
+		(ConvertSupport(common::FLOAT8, common::TIME))
+		(ConvertSupport(common::FLOAT8, common::TIMESTAMP))
+		(ConvertSupport(common::FLOAT8, common::INTERVAL))
+		(ConvertSupport(common::FLOAT8, common::FLOAT4))
+		(ConvertSupport(common::FLOAT8, common::FLOAT8))
+		(ConvertSupport(common::FLOAT8, common::BIT))
+		(ConvertSupport(common::FLOAT8, common::VARCHAR))
+		(ConvertSupport(common::FLOAT8, common::VAR16CHAR))
+		(ConvertSupport(common::FLOAT8, common::VARBINARY))
+		(ConvertSupport(common::FLOAT8, common::INTERVALYEAR))
+		(ConvertSupport(common::FLOAT8, common::INTERVALDAY))
+		(ConvertSupport(common::BIT, common::TINYINT))
+		(ConvertSupport(common::BIT, common::INT))
+		(ConvertSupport(common::BIT, common::BIGINT))
+		(ConvertSupport(common::BIT, common::DECIMAL9))
+		(ConvertSupport(common::BIT, common::DECIMAL18))
+		(ConvertSupport(common::BIT, common::DECIMAL28SPARSE))
+		(ConvertSupport(common::BIT, common::DECIMAL38SPARSE))
+		(ConvertSupport(common::BIT, common::DATE))
+		(ConvertSupport(common::BIT, common::TIME))
+		(ConvertSupport(common::BIT, common::TIMESTAMP))
+		(ConvertSupport(common::BIT, common::INTERVAL))
+		(ConvertSupport(common::BIT, common::FLOAT4))
+		(ConvertSupport(common::BIT, common::FLOAT8))
+		(ConvertSupport(common::BIT, common::BIT))
+		(ConvertSupport(common::BIT, common::VARCHAR))
+		(ConvertSupport(common::BIT, common::VAR16CHAR))
+		(ConvertSupport(common::BIT, common::VARBINARY))
+		(ConvertSupport(common::BIT, common::INTERVALYEAR))
+		(ConvertSupport(common::BIT, common::INTERVALDAY))
+		(ConvertSupport(common::FIXEDCHAR, common::TINYINT))
+		(ConvertSupport(common::FIXEDCHAR, common::INT))
+		(ConvertSupport(common::FIXEDCHAR, common::BIGINT))
+		(ConvertSupport(common::FIXEDCHAR, common::DECIMAL9))
+		(ConvertSupport(common::FIXEDCHAR, common::DECIMAL18))
+		(ConvertSupport(common::FIXEDCHAR, common::DECIMAL28SPARSE))
+		(ConvertSupport(common::FIXEDCHAR, common::DECIMAL38SPARSE))
+		(ConvertSupport(common::FIXEDCHAR, common::DATE))
+		(ConvertSupport(common::FIXEDCHAR, common::TIME))
+		(ConvertSupport(common::FIXEDCHAR, common::TIMESTAMP))
+		(ConvertSupport(common::FIXEDCHAR, common::INTERVAL))
+		(ConvertSupport(common::FIXEDCHAR, common::FLOAT4))
+		(ConvertSupport(common::FIXEDCHAR, common::FLOAT8))
+		(ConvertSupport(common::FIXEDCHAR, common::BIT))
+		(ConvertSupport(common::FIXEDCHAR, common::VARCHAR))
+		(ConvertSupport(common::FIXEDCHAR, common::VAR16CHAR))
+		(ConvertSupport(common::FIXEDCHAR, common::VARBINARY))
+		(ConvertSupport(common::FIXEDCHAR, common::INTERVALYEAR))
+		(ConvertSupport(common::FIXEDCHAR, common::INTERVALDAY))
+		(ConvertSupport(common::FIXED16CHAR, common::TINYINT))
+		(ConvertSupport(common::FIXED16CHAR, common::INT))
+		(ConvertSupport(common::FIXED16CHAR, common::BIGINT))
+		(ConvertSupport(common::FIXED16CHAR, common::DECIMAL9))
+		(ConvertSupport(common::FIXED16CHAR, common::DECIMAL18))
+		(ConvertSupport(common::FIXED16CHAR, common::DECIMAL28SPARSE))
+		(ConvertSupport(common::FIXED16CHAR, common::DECIMAL38SPARSE))
+		(ConvertSupport(common::FIXED16CHAR, common::DATE))
+		(ConvertSupport(common::FIXED16CHAR, common::TIME))
+		(ConvertSupport(common::FIXED16CHAR, common::TIMESTAMP))
+		(ConvertSupport(common::FIXED16CHAR, common::INTERVAL))
+		(ConvertSupport(common::FIXED16CHAR, common::FLOAT4))
+		(ConvertSupport(common::FIXED16CHAR, common::FLOAT8))
+		(ConvertSupport(common::FIXED16CHAR, common::BIT))
+		(ConvertSupport(common::FIXED16CHAR, common::VARCHAR))
+		(ConvertSupport(common::FIXED16CHAR, common::VAR16CHAR))
+		(ConvertSupport(common::FIXED16CHAR, common::VARBINARY))
+		(ConvertSupport(common::FIXED16CHAR, common::INTERVALYEAR))
+		(ConvertSupport(common::FIXED16CHAR, common::INTERVALDAY))
+		(ConvertSupport(common::FIXEDBINARY, common::INT))
+		(ConvertSupport(common::FIXEDBINARY, common::BIGINT))
+		(ConvertSupport(common::FIXEDBINARY, common::DECIMAL9))
+		(ConvertSupport(common::FIXEDBINARY, common::DECIMAL18))
+		(ConvertSupport(common::FIXEDBINARY, common::DECIMAL28SPARSE))
+		(ConvertSupport(common::FIXEDBINARY, common::DECIMAL38SPARSE))
+		(ConvertSupport(common::FIXEDBINARY, common::DATE))
+		(ConvertSupport(common::FIXEDBINARY, common::TIME))
+		(ConvertSupport(common::FIXEDBINARY, common::TIMESTAMP))
+		(ConvertSupport(common::FIXEDBINARY, common::INTERVAL))
+		(ConvertSupport(common::FIXEDBINARY, common::FLOAT4))
+		(ConvertSupport(common::FIXEDBINARY, common::FLOAT8))
+		(ConvertSupport(common::FIXEDBINARY, common::BIT))
+		(ConvertSupport(common::FIXEDBINARY, common::VARCHAR))
+		(ConvertSupport(common::FIXEDBINARY, common::VAR16CHAR))
+		(ConvertSupport(common::FIXEDBINARY, common::VARBINARY))
+		(ConvertSupport(common::FIXEDBINARY, common::INTERVALYEAR))
+		(ConvertSupport(common::FIXEDBINARY, common::INTERVALDAY))
+		(ConvertSupport(common::VARCHAR, common::TINYINT))
+		(ConvertSupport(common::VARCHAR, common::INT))
+		(ConvertSupport(common::VARCHAR, common::BIGINT))
+		(ConvertSupport(common::VARCHAR, common::DECIMAL9))
+		(ConvertSupport(common::VARCHAR, common::DECIMAL18))
+		(ConvertSupport(common::VARCHAR, common::DECIMAL28SPARSE))
+		(ConvertSupport(common::VARCHAR, common::DECIMAL38SPARSE))
+		(ConvertSupport(common::VARCHAR, common::DATE))
+		(ConvertSupport(common::VARCHAR, common::TIME))
+		(ConvertSupport(common::VARCHAR, common::TIMESTAMP))
+		(ConvertSupport(common::VARCHAR, common::INTERVAL))
+		(ConvertSupport(common::VARCHAR, common::FLOAT4))
+		(ConvertSupport(common::VARCHAR, common::FLOAT8))
+		(ConvertSupport(common::VARCHAR, common::BIT))
+		(ConvertSupport(common::VARCHAR, common::VARCHAR))
+		(ConvertSupport(common::VARCHAR, common::VAR16CHAR))
+		(ConvertSupport(common::VARCHAR, common::VARBINARY))
+		(ConvertSupport(common::VARCHAR, common::INTERVALYEAR))
+		(ConvertSupport(common::VARCHAR, common::INTERVALDAY))
+		(ConvertSupport(common::VAR16CHAR, common::TINYINT))
+		(ConvertSupport(common::VAR16CHAR, common::INT))
+		(ConvertSupport(common::VAR16CHAR, common::BIGINT))
+		(ConvertSupport(common::VAR16CHAR, common::DECIMAL9))
+		(ConvertSupport(common::VAR16CHAR, common::DECIMAL18))
+		(ConvertSupport(common::VAR16CHAR, common::DECIMAL28SPARSE))
+		(ConvertSupport(common::VAR16CHAR, common::DECIMAL38SPARSE))
+		(ConvertSupport(common::VAR16CHAR, common::DATE))
+		(ConvertSupport(common::VAR16CHAR, common::TIME))
+		(ConvertSupport(common::VAR16CHAR, common::TIMESTAMP))
+		(ConvertSupport(common::VAR16CHAR, common::INTERVAL))
+		(ConvertSupport(common::VAR16CHAR, common::FLOAT4))
+		(ConvertSupport(common::VAR16CHAR, common::FLOAT8))
+		(ConvertSupport(common::VAR16CHAR, common::BIT))
+		(ConvertSupport(common::VAR16CHAR, common::VARCHAR))
+		(ConvertSupport(common::VAR16CHAR, common::VARBINARY))
+		(ConvertSupport(common::VAR16CHAR, common::INTERVALYEAR))
+		(ConvertSupport(common::VAR16CHAR, common::INTERVALDAY))
+		(ConvertSupport(common::VARBINARY, common::TINYINT))
+		(ConvertSupport(common::VARBINARY, common::INT))
+		(ConvertSupport(common::VARBINARY, common::BIGINT))
+		(ConvertSupport(common::VARBINARY, common::DECIMAL9))
+		(ConvertSupport(common::VARBINARY, common::DECIMAL18))
+		(ConvertSupport(common::VARBINARY, common::DECIMAL28SPARSE))
+		(ConvertSupport(common::VARBINARY, common::DECIMAL38SPARSE))
+		(ConvertSupport(common::VARBINARY, common::DATE))
+		(ConvertSupport(common::VARBINARY, common::TIME))
+		(ConvertSupport(common::VARBINARY, common::TIMESTAMP))
+		(ConvertSupport(common::VARBINARY, common::INTERVAL))
+		(ConvertSupport(common::VARBINARY, common::FLOAT4))
+		(ConvertSupport(common::VARBINARY, common::FLOAT8))
+		(ConvertSupport(common::VARBINARY, common::BIT))
+		(ConvertSupport(common::VARBINARY, common::VARCHAR))
+		(ConvertSupport(common::VARBINARY, common::VAR16CHAR))
+		(ConvertSupport(common::VARBINARY, common::VARBINARY))
+		(ConvertSupport(common::VARBINARY, common::INTERVALYEAR))
+		(ConvertSupport(common::VARBINARY, common::INTERVALDAY))
+		(ConvertSupport(common::UINT1, common::INT))
+		(ConvertSupport(common::UINT1, common::BIGINT))
+		(ConvertSupport(common::UINT1, common::DECIMAL9))
+		(ConvertSupport(common::UINT1, common::DECIMAL18))
+		(ConvertSupport(common::UINT1, common::DECIMAL28SPARSE))
+		(ConvertSupport(common::UINT1, common::DECIMAL38SPARSE))
+		(ConvertSupport(common::UINT1, common::DATE))
+		(ConvertSupport(common::UINT1, common::TIME))
+		(ConvertSupport(common::UINT1, common::TIMESTAMP))
+		(ConvertSupport(common::UINT1, common::INTERVAL))
+		(ConvertSupport(common::UINT1, common::FLOAT4))
+		(ConvertSupport(common::UINT1, common::FLOAT8))
+		(ConvertSupport(common::UINT1, common::BIT))
+		(ConvertSupport(common::UINT1, common::VARCHAR))
+		(ConvertSupport(common::UINT1, common::VAR16CHAR))
+		(ConvertSupport(common::UINT1, common::VARBINARY))
+		(ConvertSupport(common::UINT1, common::INTERVALYEAR))
+		(ConvertSupport(common::UINT1, common::INTERVALDAY))
+		(ConvertSupport(common::UINT2, common::INT))
+		(ConvertSupport(common::UINT2, common::BIGINT))
+		(ConvertSupport(common::UINT2, common::DECIMAL9))
+		(ConvertSupport(common::UINT2, common::DECIMAL18))
+		(ConvertSupport(common::UINT2, common::DECIMAL28SPARSE))
+		(ConvertSupport(common::UINT2, common::DECIMAL38SPARSE))
+		(ConvertSupport(common::UINT2, common::DATE))
+		(ConvertSupport(common::UINT2, common::TIME))
+		(ConvertSupport(common::UINT2, common::TIMESTAMP))
+		(ConvertSupport(common::UINT2, common::INTERVAL))
+		(ConvertSupport(common::UINT2, common::FLOAT4))
+		(ConvertSupport(common::UINT2, common::FLOAT8))
+		(ConvertSupport(common::UINT2, common::BIT))
+		(ConvertSupport(common::UINT2, common::VARCHAR))
+		(ConvertSupport(common::UINT2, common::VAR16CHAR))
+		(ConvertSupport(common::UINT2, common::VARBINARY))
+		(ConvertSupport(common::UINT2, common::INTERVALYEAR))
+		(ConvertSupport(common::UINT2, common::INTERVALDAY))
+		(ConvertSupport(common::UINT4, common::INT))
+		(ConvertSupport(common::UINT4, common::BIGINT))
+		(ConvertSupport(common::UINT4, common::DECIMAL9))
+		(ConvertSupport(common::UINT4, common::DECIMAL18))
+		(ConvertSupport(common::UINT4, common::DECIMAL28SPARSE))
+		(ConvertSupport(common::UINT4, common::DECIMAL38SPARSE))
+		(ConvertSupport(common::UINT4, common::DATE))
+		(ConvertSupport(common::UINT4, common::TIME))
+		(ConvertSupport(common::UINT4, common::TIMESTAMP))
+		(ConvertSupport(common::UINT4, common::INTERVAL))
+		(ConvertSupport(common::UINT4, common::FLOAT4))
+		(ConvertSupport(common::UINT4, common::FLOAT8))
+		(ConvertSupport(common::UINT4, common::BIT))
+		(ConvertSupport(common::UINT4, common::VARCHAR))
+		(ConvertSupport(common::UINT4, common::VAR16CHAR))
+		(ConvertSupport(common::UINT4, common::VARBINARY))
+		(ConvertSupport(common::UINT4, common::INTERVALYEAR))
+		(ConvertSupport(common::UINT4, common::INTERVALDAY))
+		(ConvertSupport(common::UINT8, common::INT))
+		(ConvertSupport(common::UINT8, common::BIGINT))
+		(ConvertSupport(common::UINT8, common::DECIMAL9))
+		(ConvertSupport(common::UINT8, common::DECIMAL18))
+		(ConvertSupport(common::UINT8, common::DECIMAL28SPARSE))
+		(ConvertSupport(common::UINT8, common::DECIMAL38SPARSE))
+		(ConvertSupport(common::UINT8, common::DATE))
+		(ConvertSupport(common::UINT8, common::TIME))
+		(ConvertSupport(common::UINT8, common::TIMESTAMP))
+		(ConvertSupport(common::UINT8, common::INTERVAL))
+		(ConvertSupport(common::UINT8, common::FLOAT4))
+		(ConvertSupport(common::UINT8, common::FLOAT8))
+		(ConvertSupport(common::UINT8, common::BIT))
+		(ConvertSupport(common::UINT8, common::VARCHAR))
+		(ConvertSupport(common::UINT8, common::VAR16CHAR))
+		(ConvertSupport(common::UINT8, common::VARBINARY))
+		(ConvertSupport(common::UINT8, common::INTERVALYEAR))
+		(ConvertSupport(common::UINT8, common::INTERVALDAY))
+		(ConvertSupport(common::DECIMAL28DENSE, common::INT))
+		(ConvertSupport(common::DECIMAL28DENSE, common::BIGINT))
+		(ConvertSupport(common::DECIMAL28DENSE, common::DECIMAL9))
+		(ConvertSupport(common::DECIMAL28DENSE, common::DECIMAL18))
+		(ConvertSupport(common::DECIMAL28DENSE, common::DECIMAL28SPARSE))
+		(ConvertSupport(common::DECIMAL28DENSE, common::DECIMAL38SPARSE))
+		(ConvertSupport(common::DECIMAL28DENSE, common::DATE))
+		(ConvertSupport(common::DECIMAL28DENSE, common::TIME))
+		(ConvertSupport(common::DECIMAL28DENSE, common::TIMESTAMP))
+		(ConvertSupport(common::DECIMAL28DENSE, common::INTERVAL))
+		(ConvertSupport(common::DECIMAL28DENSE, common::FLOAT4))
+		(ConvertSupport(common::DECIMAL28DENSE, common::FLOAT8))
+		(ConvertSupport(common::DECIMAL28DENSE, common::BIT))
+		(ConvertSupport(common::DECIMAL28DENSE, common::VARCHAR))
+		(ConvertSupport(common::DECIMAL28DENSE, common::VAR16CHAR))
+		(ConvertSupport(common::DECIMAL28DENSE, common::VARBINARY))
+		(ConvertSupport(common::DECIMAL28DENSE, common::INTERVALYEAR))
+		(ConvertSupport(common::DECIMAL28DENSE, common::INTERVALDAY))
+		(ConvertSupport(common::DECIMAL38DENSE, common::INT))
+		(ConvertSupport(common::DECIMAL38DENSE, common::BIGINT))
+		(ConvertSupport(common::DECIMAL38DENSE, common::DECIMAL9))
+		(ConvertSupport(common::DECIMAL38DENSE, common::DECIMAL18))
+		(ConvertSupport(common::DECIMAL38DENSE, common::DECIMAL28SPARSE))
+		(ConvertSupport(common::DECIMAL38DENSE, common::DECIMAL38SPARSE))
+		(ConvertSupport(common::DECIMAL38DENSE, common::DATE))
+		(ConvertSupport(common::DECIMAL38DENSE, common::TIME))
+		(ConvertSupport(common::DECIMAL38DENSE, common::TIMESTAMP))
+		(ConvertSupport(common::DECIMAL38DENSE, common::INTERVAL))
+		(ConvertSupport(common::DECIMAL38DENSE, common::FLOAT4))
+		(ConvertSupport(common::DECIMAL38DENSE, common::FLOAT8))
+		(ConvertSupport(common::DECIMAL38DENSE, common::BIT))
+		(ConvertSupport(common::DECIMAL38DENSE, common::VARCHAR))
+		(ConvertSupport(common::DECIMAL38DENSE, common::VAR16CHAR))
+		(ConvertSupport(common::DECIMAL38DENSE, common::VARBINARY))
+		(ConvertSupport(common::DECIMAL38DENSE, common::INTERVALYEAR))
+		(ConvertSupport(common::DECIMAL38DENSE, common::INTERVALDAY))
+		(ConvertSupport(common::DM_UNKNOWN, common::TINYINT))
+		(ConvertSupport(common::DM_UNKNOWN, common::INT))
+		(ConvertSupport(common::DM_UNKNOWN, common::BIGINT))
+		(ConvertSupport(common::DM_UNKNOWN, common::DECIMAL9))
+		(ConvertSupport(common::DM_UNKNOWN, common::DECIMAL18))
+		(ConvertSupport(common::DM_UNKNOWN, common::DECIMAL28SPARSE))
+		(ConvertSupport(common::DM_UNKNOWN, common::DECIMAL38SPARSE))
+		(ConvertSupport(common::DM_UNKNOWN, common::DATE))
+		(ConvertSupport(common::DM_UNKNOWN, common::TIME))
+		(ConvertSupport(common::DM_UNKNOWN, common::TIMESTAMP))
+		(ConvertSupport(common::DM_UNKNOWN, common::INTERVAL))
+		(ConvertSupport(common::DM_UNKNOWN, common::FLOAT4))
+		(ConvertSupport(common::DM_UNKNOWN, common::FLOAT8))
+		(ConvertSupport(common::DM_UNKNOWN, common::BIT))
+		(ConvertSupport(common::DM_UNKNOWN, common::VARCHAR))
+		(ConvertSupport(common::DM_UNKNOWN, common::VAR16CHAR))
+		(ConvertSupport(common::DM_UNKNOWN, common::VARBINARY))
+		(ConvertSupport(common::DM_UNKNOWN, common::INTERVALYEAR))
+		(ConvertSupport(common::DM_UNKNOWN, common::INTERVALDAY))
+		(ConvertSupport(common::INTERVALYEAR, common::INT))
+		(ConvertSupport(common::INTERVALYEAR, common::BIGINT))
+		(ConvertSupport(common::INTERVALYEAR, common::DECIMAL9))
+		(ConvertSupport(common::INTERVALYEAR, common::DECIMAL18))
+		(ConvertSupport(common::INTERVALYEAR, common::DECIMAL28SPARSE))
+		(ConvertSupport(common::INTERVALYEAR, common::DECIMAL38SPARSE))
+		(ConvertSupport(common::INTERVALYEAR, common::DATE))
+		(ConvertSupport(common::INTERVALYEAR, common::TIME))
+		(ConvertSupport(common::INTERVALYEAR, common::TIMESTAMP))
+		(ConvertSupport(common::INTERVALYEAR, common::INTERVAL))
+		(ConvertSupport(common::INTERVALYEAR, common::FLOAT4))
+		(ConvertSupport(common::INTERVALYEAR, common::FLOAT8))
+		(ConvertSupport(common::INTERVALYEAR, common::BIT))
+		(ConvertSupport(common::INTERVALYEAR, common::VARCHAR))
+		(ConvertSupport(common::INTERVALYEAR, common::VAR16CHAR))
+		(ConvertSupport(common::INTERVALYEAR, common::VARBINARY))
+		(ConvertSupport(common::INTERVALYEAR, common::INTERVALYEAR))
+		(ConvertSupport(common::INTERVALYEAR, common::INTERVALDAY))
+		(ConvertSupport(common::INTERVALDAY, common::INT))
+		(ConvertSupport(common::INTERVALDAY, common::BIGINT))
+		(ConvertSupport(common::INTERVALDAY, common::DECIMAL9))
+		(ConvertSupport(common::INTERVALDAY, common::DECIMAL18))
+		(ConvertSupport(common::INTERVALDAY, common::DECIMAL28SPARSE))
+		(ConvertSupport(common::INTERVALDAY, common::DECIMAL38SPARSE))
+		(ConvertSupport(common::INTERVALDAY, common::DATE))
+		(ConvertSupport(common::INTERVALDAY, common::TIME))
+		(ConvertSupport(common::INTERVALDAY, common::TIMESTAMP))
+		(ConvertSupport(common::INTERVALDAY, common::INTERVAL))
+		(ConvertSupport(common::INTERVALDAY, common::FLOAT4))
+		(ConvertSupport(common::INTERVALDAY, common::FLOAT8))
+		(ConvertSupport(common::INTERVALDAY, common::BIT))
+		(ConvertSupport(common::INTERVALDAY, common::VARCHAR))
+		(ConvertSupport(common::INTERVALDAY, common::VAR16CHAR))
+		(ConvertSupport(common::INTERVALDAY, common::VARBINARY))
+		(ConvertSupport(common::INTERVALDAY, common::INTERVALYEAR))
+		(ConvertSupport(common::INTERVALDAY, common::INTERVALDAY));
+
+static exec::user::ServerMeta createDefaultServerMeta() {
+	exec::user::ServerMeta result;
+
+	result.set_all_tables_selectable(false);
+	result.set_blob_included_in_max_row_size(true);
+	result.set_catalog_at_start(true);
+	result.set_catalog_separator(s_catalogSeparator);
+	result.set_catalog_term(s_catalogTerm);
+	result.set_column_aliasing_supported(true);
+	std::copy(s_convertMap.begin(), s_convertMap.end(),
+	          google::protobuf::RepeatedFieldBackInserter(result.mutable_convert_support()));
+	result.set_correlation_names_support(exec::user::CN_ANY);
+	std::copy(s_dateTimeFunctions.begin(), s_dateTimeFunctions.end(),
+			  google::protobuf::RepeatedFieldBackInserter(result.mutable_date_time_functions()));
+	std::copy(s_dateTimeLiterals.begin(), s_dateTimeLiterals.end(),
+			  google::protobuf::RepeatedFieldBackInserter(result.mutable_date_time_literals_support()));
+	result.set_group_by_support(exec::user::GB_UNRELATED);
+	result.set_identifier_casing(exec::user::IC_STORES_MIXED);
+	result.set_identifier_quote_string(s_identifierQuoteString);
+	result.set_like_escape_clause_supported(true);
+	result.set_max_catalog_name_length(s_maxIdentifierSize);
+	result.set_max_column_name_length(s_maxIdentifierSize);
+	result.set_max_cursor_name_length(s_maxIdentifierSize);
+	result.set_max_schema_name_length(s_maxIdentifierSize);
+	result.set_max_table_name_length(s_maxIdentifierSize);
+	result.set_max_user_name_length(s_maxIdentifierSize);
+	result.set_null_collation(exec::user::NC_AT_END);
+	result.set_null_plus_non_null_equals_null(true);
+	std::copy(s_numericFunctions.begin(), s_numericFunctions.end(),
+			  google::protobuf::RepeatedFieldBackInserter(result.mutable_numeric_functions()));
+	std::copy(s_orderBySupport.begin(), s_orderBySupport.end(),
+			  google::protobuf::RepeatedFieldBackInserter(result.mutable_order_by_support()));
+	std::copy(s_outerJoinSupport.begin(), s_outerJoinSupport.end(),
+			  google::protobuf::RepeatedFieldBackInserter(result.mutable_outer_join_support()));
+	result.set_quoted_identifier_casing(exec::user::IC_STORES_MIXED);
+	result.set_read_only(false);
+	result.set_schema_term(s_schemaTerm);
+	result.set_search_escape_string(s_searchEscapeString);
+	result.set_special_characters(s_specialCharacters);
+	std::copy(s_sqlKeywords.begin(), s_sqlKeywords.end(),
+			  google::protobuf::RepeatedFieldBackInserter(result.mutable_sql_keywords()));
+	std::copy(s_stringFunctions.begin(), s_stringFunctions.end(),
+			  google::protobuf::RepeatedFieldBackInserter(result.mutable_string_functions()));
+	std::copy(s_subQuerySupport.begin(), s_subQuerySupport.end(),
+			google::protobuf::RepeatedFieldBackInserter(result.mutable_subquery_support()));
+	std::copy(s_systemFunctions.begin(), s_systemFunctions.end(),
+			  google::protobuf::RepeatedFieldBackInserter(result.mutable_system_functions()));
+	result.set_table_term(s_tableTerm);
+	std::copy(s_unionSupport.begin(), s_unionSupport.end(),
+			  google::protobuf::RepeatedFieldBackInserter(result.mutable_union_support()));
+
+	return result;
+}
+
+static Drill::meta::CollateSupport collateSupport(const google::protobuf::RepeatedField<google::protobuf::int32>& collateSupportList) {
+	Drill::meta::CollateSupport result(Drill::meta::C_NONE);
+
+	for(google::protobuf::RepeatedField<google::protobuf::int32>::const_iterator it = collateSupportList.begin();
+		it != collateSupportList.end();
+		++it) {
+		switch(static_cast<exec::user::CollateSupport>(*it)) {
+		case exec::user::CS_GROUP_BY:
+			result |= Drill::meta::C_GROUPBY;
+			break;
+
+		// ignore unknown
+		case exec::user::CS_UNKNOWN:
+		default:
+			break;
+		}
+	}
+	return result;
+}
+
+static Drill::meta::CorrelationNamesSupport correlationNames(exec::user::CorrelationNamesSupport correlatioNamesSupport) {
+	switch(correlatioNamesSupport) {
+	case exec::user::CN_DIFFERENT_NAMES:
+		return Drill::meta::CN_DIFFERENT_NAMES;
+
+	case exec::user::CN_ANY:
+		return Drill::meta::CN_ANY_NAMES;
+
+	case exec::user::CN_NONE:
+	default:
+		// unknown value
+		return CN_NONE;
+	}
+}
+
+static Drill::meta::DateTimeLiteralSupport dateTimeLiteralsSupport(const google::protobuf::RepeatedField<google::protobuf::int32>& dateTimeLiteralsSupportList) {
+	Drill::meta::DateTimeLiteralSupport result(Drill::meta::DL_NONE);
+
+	for(google::protobuf::RepeatedField<google::protobuf::int32>::const_iterator it = dateTimeLiteralsSupportList.begin();
+			it != dateTimeLiteralsSupportList.end();
+			++it) {
+			switch(static_cast<exec::user::DateTimeLiteralsSupport>(*it)) {
+			case exec::user::DL_DATE:
+				result |= Drill::meta::DL_DATE;
+				break;
+
+			case exec::user::DL_TIME:
+				result |= Drill::meta::DL_TIME;
+				break;
+
+			case exec::user::DL_TIMESTAMP:
+				result |= Drill::meta::DL_TIMESTAMP;
+				break;
+
+			case exec::user::DL_INTERVAL_YEAR:
+				result |= Drill::meta::DL_INTERVAL_YEAR;
+				break;
+
+			case exec::user::DL_INTERVAL_YEAR_TO_MONTH:
+				result |= Drill::meta::DL_INTERVAL_YEAR_TO_MONTH;
+				break;
+
+			case exec::user::DL_INTERVAL_MONTH:
+				result |= Drill::meta::DL_INTERVAL_MONTH;
+				break;
+
+			case exec::user::DL_INTERVAL_DAY:
+				result |= Drill::meta::DL_INTERVAL_DAY;
+				break;
+
+			case exec::user::DL_INTERVAL_DAY_TO_HOUR:
+				result |= Drill::meta::DL_INTERVAL_DAY_TO_HOUR;
+				break;
+
+			case exec::user::DL_INTERVAL_DAY_TO_MINUTE:
+				result |= Drill::meta::DL_INTERVAL_DAY_TO_MINUTE;
+				break;
+
+			case exec::user::DL_INTERVAL_DAY_TO_SECOND:
+				result |= Drill::meta::DL_INTERVAL_DAY_TO_SECOND;
+				break;
+
+			case exec::user::DL_INTERVAL_HOUR:
+				result |= Drill::meta::DL_INTERVAL_HOUR;
+				break;
+
+			case exec::user::DL_INTERVAL_HOUR_TO_MINUTE:
+				result |= Drill::meta::DL_INTERVAL_HOUR_TO_MINUTE;
+				break;
+
+			case exec::user::DL_INTERVAL_HOUR_TO_SECOND:
+				result |= Drill::meta::DL_INTERVAL_HOUR_TO_SECOND;
+				break;
+
+			case exec::user::DL_INTERVAL_MINUTE:
+				result |= Drill::meta::DL_TIMESTAMP;
+				break;
+
+			case exec::user::DL_INTERVAL_MINUTE_TO_SECOND:
+				result |= Drill::meta::DL_TIMESTAMP;
+				break;
+
+			case exec::user::DL_INTERVAL_SECOND:
+				result |= Drill::meta::DL_INTERVAL_SECOND;
+				break;
+
+			// ignore unknown
+			case exec::user::DL_UNKNOWN:
+			default:
+				break;
+			}
+		}
+
+	return result;
+}
+
+static Drill::meta::GroupBySupport groupBySupport(exec::user::GroupBySupport groupBySupport) {
+	switch(groupBySupport) {
+	case exec::user::GB_SELECT_ONLY:
+		return Drill::meta::GB_SELECT_ONLY;
 
-struct FromTo {
-	FromTo(common::MinorType from, common::MinorType to): m_from(from), m_to(to) {}
+	case exec::user::GB_BEYOND_SELECT:
+		return Drill::meta::GB_BEYOND_SELECT;
 
-	common::MinorType m_from;
-	common::MinorType m_to;
-};
+	case exec::user::GB_NONE:
+	default:
+		// unknown value
+		return Drill::meta::GB_NONE;
+	}
+}
+
+static Drill::meta::IdentifierCase identifierCase(exec::user::IdentifierCasing identifierCasing) {
+	switch(identifierCasing) {
+	case exec::user::IC_STORES_LOWER:
+		return Drill::meta::IC_STORES_LOWER;
+
+	case exec::user::IC_STORES_MIXED:
+		return Drill::meta::IC_STORES_MIXED;
+
+	case exec::user::IC_STORES_UPPER:
+		return Drill::meta::IC_STORES_UPPER;
+
+	case exec::user::IC_SUPPORTS_MIXED:
+		return Drill::meta::IC_SUPPORTS_MIXED;
+
+	case exec::user::IC_UNKNOWN:
+	default:
+		// unknown value
+		return Drill::meta::IC_UNKNOWN;
+	}
+}
+
+static Drill::meta::NullCollation nullCollation(exec::user::NullCollation nullCollation) {
+	switch(nullCollation) {
+	case exec::user::NC_AT_END:
+		return Drill::meta::NC_AT_END;
+
+	case exec::user::NC_AT_START:
+		return Drill::meta::NC_AT_START;
+
+	case exec::user::NC_HIGH:
+		return Drill::meta::NC_HIGH;
+
+	case exec::user::NC_LOW:
+		return Drill::meta::NC_LOW;
+
+	case exec::user::NC_UNKNOWN:
+	default:
+		// unknown value
+		return Drill::meta::NC_UNKNOWN;
+	}
+}
+
+static Drill::meta::OuterJoinSupport outerJoinSupport(const google::protobuf::RepeatedField<google::protobuf::int32>& outerJoinSupportList) {
+	Drill::meta::OuterJoinSupport result(Drill::meta::OJ_NONE);
+
+	for(google::protobuf::RepeatedField<google::protobuf::int32>::const_iterator it = outerJoinSupportList.begin();
+			it != outerJoinSupportList.end();
+			++it) {
+			switch(static_cast<exec::user::OuterJoinSupport>(*it)) {
+			case exec::user::OJ_LEFT:
+				result |= Drill::meta::OJ_LEFT;
+				break;
+
+			case exec::user::OJ_RIGHT:
+				result |= Drill::meta::OJ_RIGHT;
+				break;
+
+			case exec::user::OJ_FULL:
+				result |= Drill::meta::OJ_FULL;
+				break;
+
+			case exec::user::OJ_NESTED:
+				result |= Drill::meta::OJ_NESTED;
+				break;
+
+			case exec::user::OJ_INNER:
+				result |= Drill::meta::OJ_INNER;
+				break;
+
+			case exec::user::OJ_NOT_ORDERED:
+				result |= Drill::meta::OJ_NOT_ORDERED;
+				break;
+
+			case exec::user::OJ_ALL_COMPARISON_OPS:
+				result |= Drill::meta::OJ_ALL_COMPARISON_OPS;
+				break;
+
+			// ignore unknown
+			case exec::user::OJ_UNKNOWN:
+			default:
+				break;
+			}
+		}
 
-bool operator==(FromTo const& ft1, FromTo const& ft2) {
-	return ft1.m_from == ft2.m_from && ft1.m_to == ft2.m_to;
+	return result;
 }
 
-std::size_t hash_value(FromTo const& ft) {
-	std::size_t hash = 0;
-	boost::hash_combine(hash, ft.m_from);
-	boost::hash_combine(hash, ft.m_to);
+static Drill::meta::QuotedIdentifierCase quotedIdentifierCase(exec::user::IdentifierCasing identifierCasing) {
+	switch(identifierCasing) {
+	case exec::user::IC_STORES_LOWER:
+		return Drill::meta::QIC_STORES_LOWER;
 
-	return hash;
+	case exec::user::IC_STORES_MIXED:
+		return Drill::meta::QIC_STORES_MIXED;
+
+	case exec::user::IC_STORES_UPPER:
+		return Drill::meta::QIC_STORES_UPPER;
+
+	case exec::user::IC_SUPPORTS_MIXED:
+		return Drill::meta::QIC_SUPPORTS_MIXED;
+
+	case exec::user::IC_UNKNOWN:
+	default:
+		// unknown value
+		return Drill::meta::QIC_UNKNOWN;
+	}
+}
+
+static Drill::meta::SubQuerySupport subQuerySupport(const google::protobuf::RepeatedField<google::protobuf::int32>& subQuerySupportList) {
+	Drill::meta::SubQuerySupport result(Drill::meta::SQ_NONE);
+
+	for(google::protobuf::RepeatedField<google::protobuf::int32>::const_iterator it = subQuerySupportList.begin();
+			it != subQuerySupportList.end();
+			++it) {
+			switch(static_cast<exec::user::SubQuerySupport>(*it)) {
+			case exec::user::SQ_CORRELATED:
+				result |= Drill::meta::SQ_CORRELATED;
+				break;
+
+			case exec::user::SQ_IN_COMPARISON:
+				result |= Drill::meta::SQ_IN_COMPARISON;
+				break;
+
+			case exec::user::SQ_IN_EXISTS:
+				result |= Drill::meta::SQ_IN_EXISTS;
+				break;
+
+			case exec::user::SQ_IN_INSERT:
+				result |= Drill::meta::SQ_IN_INSERT;
+				break;
+
+			case exec::user::SQ_IN_QUANTIFIED:
+				result |= Drill::meta::SQ_IN_QUANTIFIED;
+				break;
+
+			// ignore unknown
+			case exec::user::SQ_UNKNOWN:
+			default:
+				break;
+			}
+		}
+
+	return result;
 }
 
-static boost::unordered_set<FromTo> s_convertMap = boost::assign::list_of
-		(FromTo(common::TINYINT, common::INT))
-		(FromTo(common::TINYINT, common::BIGINT))
-		(FromTo(common::TINYINT, common::DECIMAL9))
-		(FromTo(common::TINYINT, common::DECIMAL18))
-		(FromTo(common::TINYINT, common::DECIMAL28SPARSE))
-		(FromTo(common::TINYINT, common::DECIMAL38SPARSE))
-		(FromTo(common::TINYINT, common::DATE))
-		(FromTo(common::TINYINT, common::TIME))
-		(FromTo(common::TINYINT, common::TIMESTAMP))
-		(FromTo(common::TINYINT, common::INTERVAL))
-		(FromTo(common::TINYINT, common::FLOAT4))
-		(FromTo(common::TINYINT, common::FLOAT8))
-		(FromTo(common::TINYINT, common::BIT))
-		(FromTo(common::TINYINT, common::VARCHAR))
-		(FromTo(common::TINYINT, common::VAR16CHAR))
-		(FromTo(common::TINYINT, common::VARBINARY))
-		(FromTo(common::TINYINT, common::INTERVALYEAR))
-		(FromTo(common::TINYINT, common::INTERVALDAY))
-		(FromTo(common::SMALLINT, common::INT))
-		(FromTo(common::SMALLINT, common::BIGINT))
-		(FromTo(common::SMALLINT, common::DECIMAL9))
-		(FromTo(common::SMALLINT, common::DECIMAL18))
-		(FromTo(common::SMALLINT, common::DECIMAL28SPARSE))
-		(FromTo(common::SMALLINT, common::DECIMAL38SPARSE))
-		(FromTo(common::SMALLINT, common::DATE))
-		(FromTo(common::SMALLINT, common::TIME))
-		(FromTo(common::SMALLINT, common::TIMESTAMP))
-		(FromTo(common::SMALLINT, common::INTERVAL))
-		(FromTo(common::SMALLINT, common::FLOAT4))
-		(FromTo(common::SMALLINT, common::FLOAT8))
-		(FromTo(common::SMALLINT, common::BIT))
-		(FromTo(common::SMALLINT, common::VARCHAR))
-		(FromTo(common::SMALLINT, common::VAR16CHAR))
-		(FromTo(common::SMALLINT, common::VARBINARY))
-		(FromTo(common::SMALLINT, common::INTERVALYEAR))
-		(FromTo(common::SMALLINT, common::INTERVALDAY))
-		(FromTo(common::INT, common::INT))
-		(FromTo(common::INT, common::BIGINT))
-		(FromTo(common::INT, common::DECIMAL9))
-		(FromTo(common::INT, common::DECIMAL18))
-		(FromTo(common::INT, common::DECIMAL28SPARSE))
-		(FromTo(common::INT, common::DECIMAL38SPARSE))
-		(FromTo(common::INT, common::DATE))
-		(FromTo(common::INT, common::TIME))
-		(FromTo(common::INT, common::TIMESTAMP))
-		(FromTo(common::INT, common::INTERVAL))
-		(FromTo(common::INT, common::FLOAT4))
-		(FromTo(common::INT, common::FLOAT8))
-		(FromTo(common::INT, common::BIT))
-		(FromTo(common::INT, common::VARCHAR))
-		(FromTo(common::INT, common::VAR16CHAR))
-		(FromTo(common::INT, common::VARBINARY))
-		(FromTo(common::INT, common::INTERVALYEAR))
-		(FromTo(common::INT, common::INTERVALDAY))
-		(FromTo(common::BIGINT, common::INT))
-		(FromTo(common::BIGINT, common::BIGINT))
-		(FromTo(common::BIGINT, common::DECIMAL9))
-		(FromTo(common::BIGINT, common::DECIMAL18))
-		(FromTo(common::BIGINT, common::DECIMAL28SPARSE))
-		(FromTo(common::BIGINT, common::DECIMAL38SPARSE))
-		(FromTo(common::BIGINT, common::DATE))
-		(FromTo(common::BIGINT, common::TIME))
-		(FromTo(common::BIGINT, common::TIMESTAMP))
-		(FromTo(common::BIGINT, common::INTERVAL))
-		(FromTo(common::BIGINT, common::FLOAT4))
-		(FromTo(common::BIGINT, common::FLOAT8))
-		(FromTo(common::BIGINT, common::BIT))
-		(FromTo(common::BIGINT, common::VARCHAR))
-		(FromTo(common::BIGINT, common::VAR16CHAR))
-		(FromTo(common::BIGINT, common::VARBINARY))
-		(FromTo(common::BIGINT, common::INTERVALYEAR))
-		(FromTo(common::BIGINT, common::INTERVALDAY))
-		(FromTo(common::DECIMAL9, common::INT))
-		(FromTo(common::DECIMAL9, common::BIGINT))
-		(FromTo(common::DECIMAL9, common::DECIMAL9))
-		(FromTo(common::DECIMAL9, common::DECIMAL18))
-		(FromTo(common::DECIMAL9, common::DECIMAL28SPARSE))
-		(FromTo(common::DECIMAL9, common::DECIMAL38SPARSE))
-		(FromTo(common::DECIMAL9, common::DATE))
-		(FromTo(common::DECIMAL9, common::TIME))
-		(FromTo(common::DECIMAL9, common::TIMESTAMP))
-		(FromTo(common::DECIMAL9, common::INTERVAL))
-		(FromTo(common::DECIMAL9, common::FLOAT4))
-		(FromTo(common::DECIMAL9, common::FLOAT8))
-		(FromTo(common::DECIMAL9, common::BIT))
-		(FromTo(common::DECIMAL9, common::VARCHAR))
-		(FromTo(common::DECIMAL9, common::VAR16CHAR))
-		(FromTo(common::DECIMAL9, common::VARBINARY))
-		(FromTo(common::DECIMAL9, common::INTERVALYEAR))
-		(FromTo(common::DECIMAL9, common::INTERVALDAY))
-		(FromTo(common::DECIMAL18, common::INT))
-		(FromTo(common::DECIMAL18, common::BIGINT))
-		(FromTo(common::DECIMAL18, common::DECIMAL9))
-		(FromTo(common::DECIMAL18, common::DECIMAL18))
-		(FromTo(common::DECIMAL18, common::DECIMAL28SPARSE))
-		(FromTo(common::DECIMAL18, common::DECIMAL38SPARSE))
-		(FromTo(common::DECIMAL18, common::DATE))
-		(FromTo(common::DECIMAL18, common::TIME))
-		(FromTo(common::DECIMAL18, common::TIMESTAMP))
-		(FromTo(common::DECIMAL18, common::INTERVAL))
-		(FromTo(common::DECIMAL18, common::FLOAT4))
-		(FromTo(common::DECIMAL18, common::FLOAT8))
-		(FromTo(common::DECIMAL18, common::BIT))
-		(FromTo(common::DECIMAL18, common::VARCHAR))
-		(FromTo(common::DECIMAL18, common::VAR16CHAR))
-		(FromTo(common::DECIMAL18, common::VARBINARY))
-		(FromTo(common::DECIMAL18, common::INTERVALYEAR))
-		(FromTo(common::DECIMAL18, common::INTERVALDAY))
-		(FromTo(common::DECIMAL28SPARSE, common::INT))
-		(FromTo(common::DECIMAL28SPARSE, common::BIGINT))
-		(FromTo(common::DECIMAL28SPARSE, common::DECIMAL9))
-		(FromTo(common::DECIMAL28SPARSE, common::DECIMAL18))
-		(FromTo(common::DECIMAL28SPARSE, common::DECIMAL28SPARSE))
-		(FromTo(common::DECIMAL28SPARSE, common::DECIMAL38SPARSE))
-		(FromTo(common::DECIMAL28SPARSE, common::DATE))
-		(FromTo(common::DECIMAL28SPARSE, common::TIME))
-		(FromTo(common::DECIMAL28SPARSE, common::TIMESTAMP))
-		(FromTo(common::DECIMAL28SPARSE, common::INTERVAL))
-		(FromTo(common::DECIMAL28SPARSE, common::FLOAT4))
-		(FromTo(common::DECIMAL28SPARSE, common::FLOAT8))
-		(FromTo(common::DECIMAL28SPARSE, common::BIT))
-		(FromTo(common::DECIMAL28SPARSE, common::VARCHAR))
-		(FromTo(common::DECIMAL28SPARSE, common::VAR16CHAR))
-		(FromTo(common::DECIMAL28SPARSE, common::VARBINARY))
-		(FromTo(common::DECIMAL28SPARSE, common::INTERVALYEAR))
-		(FromTo(common::DECIMAL28SPARSE, common::INTERVALDAY))
-		(FromTo(common::DECIMAL38SPARSE, common::INT))
-		(FromTo(common::DECIMAL38SPARSE, common::BIGINT))
-		(FromTo(common::DECIMAL38SPARSE, common::DECIMAL9))
-		(FromTo(common::DECIMAL38SPARSE, common::DECIMAL18))
-		(FromTo(common::DECIMAL38SPARSE, common::DECIMAL28SPARSE))
-		(FromTo(common::DECIMAL38SPARSE, common::DECIMAL38SPARSE))
-		(FromTo(common::DECIMAL38SPARSE, common::DATE))
-		(FromTo(common::DECIMAL38SPARSE, common::TIME))
-		(FromTo(common::DECIMAL38SPARSE, common::TIMESTAMP))
-		(FromTo(common::DECIMAL38SPARSE, common::INTERVAL))
-		(FromTo(common::DECIMAL38SPARSE, common::FLOAT4))
-		(FromTo(common::DECIMAL38SPARSE, common::FLOAT8))
-		(FromTo(common::DECIMAL38SPARSE, common::BIT))
-		(FromTo(common::DECIMAL38SPARSE, common::VARCHAR))
-		(FromTo(common::DECIMAL38SPARSE, common::VAR16CHAR))
-		(FromTo(common::DECIMAL38SPARSE, common::VARBINARY))
-		(FromTo(common::DECIMAL38SPARSE, common::INTERVALYEAR))
-		(FromTo(common::DECIMAL38SPARSE, common::INTERVALDAY))
-		(FromTo(common::MONEY, common::INT))
-		(FromTo(common::MONEY, common::BIGINT))
-		(FromTo(common::MONEY, common::DECIMAL9))
-		(FromTo(common::MONEY, common::DECIMAL18))
-		(FromTo(common::MONEY, common::DECIMAL28SPARSE))
-		(FromTo(common::MONEY, common::DECIMAL38SPARSE))
-		(FromTo(common::MONEY, common::DATE))
-		(FromTo(common::MONEY, common::TIME))
-		(FromTo(common::MONEY, common::TIMESTAMP))
-		(FromTo(common::MONEY, common::INTERVAL))
-		(FromTo(common::MONEY, common::FLOAT4))
-		(FromTo(common::MONEY, common::FLOAT8))
-		(FromTo(common::MONEY, common::BIT))
-		(FromTo(common::MONEY, common::VARCHAR))
-		(FromTo(common::MONEY, common::VAR16CHAR))
-		(FromTo(common::MONEY, common::VARBINARY))
-		(FromTo(common::MONEY, common::INTERVALYEAR))
-		(FromTo(common::MONEY, common::INTERVALDAY))
-		(FromTo(common::DATE, common::INT))
-		(FromTo(common::DATE, common::BIGINT))
-		(FromTo(common::DATE, common::DECIMAL9))
-		(FromTo(common::DATE, common::DECIMAL18))
-		(FromTo(common::DATE, common::DECIMAL28SPARSE))
-		(FromTo(common::DATE, common::DECIMAL38SPARSE))
-		(FromTo(common::DATE, common::DATE))
-		(FromTo(common::DATE, common::TIME))
-		(FromTo(common::DATE, common::TIMESTAMP))
-		(FromTo(common::DATE, common::INTERVAL))
-		(FromTo(common::DATE, common::FLOAT4))
-		(FromTo(common::DATE, common::FLOAT8))
-		(FromTo(common::DATE, common::BIT))
-		(FromTo(common::DATE, common::VARCHAR))
-		(FromTo(common::DATE, common::VAR16CHAR))
-		(FromTo(common::DATE, common::VARBINARY))
-		(FromTo(common::DATE, common::INTERVALYEAR))
-		(FromTo(common::DATE, common::INTERVALDAY))
-		(FromTo(common::TIME, common::INT))
-		(FromTo(common::TIME, common::BIGINT))
-		(FromTo(common::TIME, common::DECIMAL9))
-		(FromTo(common::TIME, common::DECIMAL18))
-		(FromTo(common::TIME, common::DECIMAL28SPARSE))
-		(FromTo(common::TIME, common::DECIMAL38SPARSE))
-		(FromTo(common::TIME, common::DATE))
-		(FromTo(common::TIME, common::TIME))
-		(FromTo(common::TIME, common::TIMESTAMP))
-		(FromTo(common::TIME, common::INTERVAL))
-		(FromTo(common::TIME, common::FLOAT4))
-		(FromTo(common::TIME, common::FLOAT8))
-		(FromTo(common::TIME, common::BIT))
-		(FromTo(common::TIME, common::VARCHAR))
-		(FromTo(common::TIME, common::VAR16CHAR))
-		(FromTo(common::TIME, common::VARBINARY))
-		(FromTo(common::TIME, common::INTERVALYEAR))
-		(FromTo(common::TIME, common::INTERVALDAY))
-		(FromTo(common::TIMESTAMPTZ, common::INT))
-		(FromTo(common::TIMESTAMPTZ, common::BIGINT))
-		(FromTo(common::TIMESTAMPTZ, common::DECIMAL9))
-		(FromTo(common::TIMESTAMPTZ, common::DECIMAL18))
-		(FromTo(common::TIMESTAMPTZ, common::DECIMAL28SPARSE))
-		(FromTo(common::TIMESTAMPTZ, common::DECIMAL38SPARSE))
-		(FromTo(common::TIMESTAMPTZ, common::DATE))
-		(FromTo(common::TIMESTAMPTZ, common::TIME))
-		(FromTo(common::TIMESTAMPTZ, common::TIMESTAMP))
-		(FromTo(common::TIMESTAMPTZ, common::INTERVAL))
-		(FromTo(common::TIMESTAMPTZ, common::FLOAT4))
-		(FromTo(common::TIMESTAMPTZ, common::FLOAT8))
-		(FromTo(common::TIMESTAMPTZ, common::BIT))
-		(FromTo(common::TIMESTAMPTZ, common::VARCHAR))
-		(FromTo(common::TIMESTAMPTZ, common::VAR16CHAR))
-		(FromTo(common::TIMESTAMPTZ, common::VARBINARY))
-		(FromTo(common::TIMESTAMPTZ, common::INTERVALYEAR))
-		(FromTo(common::TIMESTAMPTZ, common::INTERVALDAY))
-		(FromTo(common::TIMESTAMP, common::INT))
-		(FromTo(common::TIMESTAMP, common::BIGINT))
-		(FromTo(common::TIMESTAMP, common::DECIMAL9))
-		(FromTo(common::TIMESTAMP, common::DECIMAL18))
-		(FromTo(common::TIMESTAMP, common::DECIMAL28SPARSE))
-		(FromTo(common::TIMESTAMP, common::DECIMAL38SPARSE))
-		(FromTo(common::TIMESTAMP, common::DATE))
-		(FromTo(common::TIMESTAMP, common::TIME))
-		(FromTo(common::TIMESTAMP, common::TIMESTAMP))
-		(FromTo(common::TIMESTAMP, common::INTERVAL))
-		(FromTo(common::TIMESTAMP, common::FLOAT4))
-		(FromTo(common::TIMESTAMP, common::FLOAT8))
-		(FromTo(common::TIMESTAMP, common::BIT))
-		(FromTo(common::TIMESTAMP, common::VARCHAR))
-		(FromTo(common::TIMESTAMP, common::VAR16CHAR))
-		(FromTo(common::TIMESTAMP, common::VARBINARY))
-		(FromTo(common::TIMESTAMP, common::INTERVALYEAR))
-		(FromTo(common::TIMESTAMP, common::INTERVALDAY))
-		(FromTo(common::INTERVAL, common::INT))
-		(FromTo(common::INTERVAL, common::BIGINT))
-		(FromTo(common::INTERVAL, common::DECIMAL9))
-		(FromTo(common::INTERVAL, common::DECIMAL18))
-		(FromTo(common::INTERVAL, common::DECIMAL28SPARSE))
-		(FromTo(common::INTERVAL, common::DECIMAL38SPARSE))
-		(FromTo(common::INTERVAL, common::DATE))
-		(FromTo(common::INTERVAL, common::TIME))
-		(FromTo(common::INTERVAL, common::TIMESTAMP))
-		(FromTo(common::INTERVAL, common::INTERVAL))
-		(FromTo(common::INTERVAL, common::FLOAT4))
-		(FromTo(common::INTERVAL, common::FLOAT8))
-		(FromTo(common::INTERVAL, common::BIT))
-		(FromTo(common::INTERVAL, common::VARCHAR))
-		(FromTo(common::INTERVAL, common::VAR16CHAR))
-		(FromTo(common::INTERVAL, common::VARBINARY))
-		(FromTo(common::INTERVAL, common::INTERVALYEAR))
-		(FromTo(common::INTERVAL, common::INTERVALDAY))
-		(FromTo(common::FLOAT4, common::INT))
-		(FromTo(common::FLOAT4, common::BIGINT))
-		(FromTo(common::FLOAT4, common::DECIMAL9))
-		(FromTo(common::FLOAT4, common::DECIMAL18))
-		(FromTo(common::FLOAT4, common::DECIMAL28SPARSE))
-		(FromTo(common::FLOAT4, common::DECIMAL38SPARSE))
-		(FromTo(common::FLOAT4, common::DATE))
-		(FromTo(common::FLOAT4, common::TIME))
-		(FromTo(common::FLOAT4, common::TIMESTAMP))
-		(FromTo(common::FLOAT4, common::INTERVAL))
-		(FromTo(common::FLOAT4, common::FLOAT4))
-		(FromTo(common::FLOAT4, common::FLOAT8))
-		(FromTo(common::FLOAT4, common::BIT))
-		(FromTo(common::FLOAT4, common::VARCHAR))
-		(FromTo(common::FLOAT4, common::VAR16CHAR))
-		(FromTo(common::FLOAT4, common::VARBINARY))
-		(FromTo(common::FLOAT4, common::INTERVALYEAR))
-		(FromTo(common::FLOAT4, common::INTERVALDAY))
-		(FromTo(common::FLOAT8, common::INT))
-		(FromTo(common::FLOAT8, common::BIGINT))
-		(FromTo(common::FLOAT8, common::DECIMAL9))
-		(FromTo(common::FLOAT8, common::DECIMAL18))
-		(FromTo(common::FLOAT8, common::DECIMAL28SPARSE))
-		(FromTo(common::FLOAT8, common::DECIMAL38SPARSE))
-		(FromTo(common::FLOAT8, common::DATE))
-		(FromTo(common::FLOAT8, common::TIME))
-		(FromTo(common::FLOAT8, common::TIMESTAMP))
-		(FromTo(common::FLOAT8, common::INTERVAL))
-		(FromTo(common::FLOAT8, common::FLOAT4))
-		(FromTo(common::FLOAT8, common::FLOAT8))
-		(FromTo(common::FLOAT8, common::BIT))
-		(FromTo(common::FLOAT8, common::VARCHAR))
-		(FromTo(common::FLOAT8, common::VAR16CHAR))
-		(FromTo(common::FLOAT8, common::VARBINARY))
-		(FromTo(common::FLOAT8, common::INTERVALYEAR))
-		(FromTo(common::FLOAT8, common::INTERVALDAY))
-		(FromTo(common::BIT, common::TINYINT))
-		(FromTo(common::BIT, common::INT))
-		(FromTo(common::BIT, common::BIGINT))
-		(FromTo(common::BIT, common::DECIMAL9))
-		(FromTo(common::BIT, common::DECIMAL18))
-		(FromTo(common::BIT, common::DECIMAL28SPARSE))
-		(FromTo(common::BIT, common::DECIMAL38SPARSE))
-		(FromTo(common::BIT, common::DATE))
-		(FromTo(common::BIT, common::TIME))
-		(FromTo(common::BIT, common::TIMESTAMP))
-		(FromTo(common::BIT, common::INTERVAL))
-		(FromTo(common::BIT, common::FLOAT4))
-		(FromTo(common::BIT, common::FLOAT8))
-		(FromTo(common::BIT, common::BIT))
-		(FromTo(common::BIT, common::VARCHAR))
-		(FromTo(common::BIT, common::VAR16CHAR))
-		(FromTo(common::BIT, common::VARBINARY))
-		(FromTo(common::BIT, common::INTERVALYEAR))
-		(FromTo(common::BIT, common::INTERVALDAY))
-		(FromTo(common::FIXEDCHAR, common::TINYINT))
-		(FromTo(common::FIXEDCHAR, common::INT))
-		(FromTo(common::FIXEDCHAR, common::BIGINT))
-		(FromTo(common::FIXEDCHAR, common::DECIMAL9))
-		(FromTo(common::FIXEDCHAR, common::DECIMAL18))
-		(FromTo(common::FIXEDCHAR, common::DECIMAL28SPARSE))
-		(FromTo(common::FIXEDCHAR, common::DECIMAL38SPARSE))
-		(FromTo(common::FIXEDCHAR, common::DATE))
-		(FromTo(common::FIXEDCHAR, common::TIME))
-		(FromTo(common::FIXEDCHAR, common::TIMESTAMP))
-		(FromTo(common::FIXEDCHAR, common::INTERVAL))
-		(FromTo(common::FIXEDCHAR, common::FLOAT4))
-		(FromTo(common::FIXEDCHAR, common::FLOAT8))
-		(FromTo(common::FIXEDCHAR, common::BIT))
-		(FromTo(common::FIXEDCHAR, common::VARCHAR))
-		(FromTo(common::FIXEDCHAR, common::VAR16CHAR))
-		(FromTo(common::FIXEDCHAR, common::VARBINARY))
-		(FromTo(common::FIXEDCHAR, common::INTERVALYEAR))
-		(FromTo(common::FIXEDCHAR, common::INTERVALDAY))
-		(FromTo(common::FIXED16CHAR, common::TINYINT))
-		(FromTo(common::FIXED16CHAR, common::INT))
-		(FromTo(common::FIXED16CHAR, common::BIGINT))
-		(FromTo(common::FIXED16CHAR, common::DECIMAL9))
-		(FromTo(common::FIXED16CHAR, common::DECIMAL18))
-		(FromTo(common::FIXED16CHAR, common::DECIMAL28SPARSE))
-		(FromTo(common::FIXED16CHAR, common::DECIMAL38SPARSE))
-		(FromTo(common::FIXED16CHAR, common::DATE))
-		(FromTo(common::FIXED16CHAR, common::TIME))
-		(FromTo(common::FIXED16CHAR, common::TIMESTAMP))
-		(FromTo(common::FIXED16CHAR, common::INTERVAL))
-		(FromTo(common::FIXED16CHAR, common::FLOAT4))
-		(FromTo(common::FIXED16CHAR, common::FLOAT8))
-		(FromTo(common::FIXED16CHAR, common::BIT))
-		(FromTo(common::FIXED16CHAR, common::VARCHAR))
-		(FromTo(common::FIXED16CHAR, common::VAR16CHAR))
-		(FromTo(common::FIXED16CHAR, common::VARBINARY))
-		(FromTo(common::FIXED16CHAR, common::INTERVALYEAR))
-		(FromTo(common::FIXED16CHAR, common::INTERVALDAY))
-		(FromTo(common::FIXEDBINARY, common::INT))
-		(FromTo(common::FIXEDBINARY, common::BIGINT))
-		(FromTo(common::FIXEDBINARY, common::DECIMAL9))
-		(FromTo(common::FIXEDBINARY, common::DECIMAL18))
-		(FromTo(common::FIXEDBINARY, common::DECIMAL28SPARSE))
-		(FromTo(common::FIXEDBINARY, common::DECIMAL38SPARSE))
-		(FromTo(common::FIXEDBINARY, common::DATE))
-		(FromTo(common::FIXEDBINARY, common::TIME))
-		(FromTo(common::FIXEDBINARY, common::TIMESTAMP))
-		(FromTo(common::FIXEDBINARY, common::INTERVAL))
-		(FromTo(common::FIXEDBINARY, common::FLOAT4))
-		(FromTo(common::FIXEDBINARY, common::FLOAT8))
-		(FromTo(common::FIXEDBINARY, common::BIT))
-		(FromTo(common::FIXEDBINARY, common::VARCHAR))
-		(FromTo(common::FIXEDBINARY, common::VAR16CHAR))
-		(FromTo(common::FIXEDBINARY, common::VARBINARY))
-		(FromTo(common::FIXEDBINARY, common::INTERVALYEAR))
-		(FromTo(common::FIXEDBINARY, common::INTERVALDAY))
-		(FromTo(common::VARCHAR, common::TINYINT))
-		(FromTo(common::VARCHAR, common::INT))
-		(FromTo(common::VARCHAR, common::BIGINT))
-		(FromTo(common::VARCHAR, common::DECIMAL9))
-		(FromTo(common::VARCHAR, common::DECIMAL18))
-		(FromTo(common::VARCHAR, common::DECIMAL28SPARSE))
-		(FromTo(common::VARCHAR, common::DECIMAL38SPARSE))
-		(FromTo(common::VARCHAR, common::DATE))
-		(FromTo(common::VARCHAR, common::TIME))
-		(FromTo(common::VARCHAR, common::TIMESTAMP))
-		(FromTo(common::VARCHAR, common::INTERVAL))
-		(FromTo(common::VARCHAR, common::FLOAT4))
-		(FromTo(common::VARCHAR, common::FLOAT8))
-		(FromTo(common::VARCHAR, common::BIT))
-		(FromTo(common::VARCHAR, common::VARCHAR))
-		(FromTo(common::VARCHAR, common::VAR16CHAR))
-		(FromTo(common::VARCHAR, common::VARBINARY))
-		(FromTo(common::VARCHAR, common::INTERVALYEAR))
-		(FromTo(common::VARCHAR, common::INTERVALDAY))
-		(FromTo(common::VAR16CHAR, common::TINYINT))
-		(FromTo(common::VAR16CHAR, common::INT))
-		(FromTo(common::VAR16CHAR, common::BIGINT))
-		(FromTo(common::VAR16CHAR, common::DECIMAL9))
-		(FromTo(common::VAR16CHAR, common::DECIMAL18))
-		(FromTo(common::VAR16CHAR, common::DECIMAL28SPARSE))
-		(FromTo(common::VAR16CHAR, common::DECIMAL38SPARSE))
-		(FromTo(common::VAR16CHAR, common::DATE))
-		(FromTo(common::VAR16CHAR, common::TIME))
-		(FromTo(common::VAR16CHAR, common::TIMESTAMP))
-		(FromTo(common::VAR16CHAR, common::INTERVAL))
-		(FromTo(common::VAR16CHAR, common::FLOAT4))
-		(FromTo(common::VAR16CHAR, common::FLOAT8))
-		(FromTo(common::VAR16CHAR, common::BIT))
-		(FromTo(common::VAR16CHAR, common::VARCHAR))
-		(FromTo(common::VAR16CHAR, common::VARBINARY))
-		(FromTo(common::VAR16CHAR, common::INTERVALYEAR))
-		(FromTo(common::VAR16CHAR, common::INTERVALDAY))
-		(FromTo(common::VARBINARY, common::TINYINT))
-		(FromTo(common::VARBINARY, common::INT))
-		(FromTo(common::VARBINARY, common::BIGINT))
-		(FromTo(common::VARBINARY, common::DECIMAL9))
-		(FromTo(common::VARBINARY, common::DECIMAL18))
-		(FromTo(common::VARBINARY, common::DECIMAL28SPARSE))
-		(FromTo(common::VARBINARY, common::DECIMAL38SPARSE))
-		(FromTo(common::VARBINARY, common::DATE))
-		(FromTo(common::VARBINARY, common::TIME))
-		(FromTo(common::VARBINARY, common::TIMESTAMP))
-		(FromTo(common::VARBINARY, common::INTERVAL))
-		(FromTo(common::VARBINARY, common::FLOAT4))
-		(FromTo(common::VARBINARY, common::FLOAT8))
-		(FromTo(common::VARBINARY, common::BIT))
-		(FromTo(common::VARBINARY, common::VARCHAR))
-		(FromTo(common::VARBINARY, common::VAR16CHAR))
-		(FromTo(common::VARBINARY, common::VARBINARY))
-		(FromTo(common::VARBINARY, common::INTERVALYEAR))
-		(FromTo(common::VARBINARY, common::INTERVALDAY))
-		(FromTo(common::UINT1, common::INT))
-		(FromTo(common::UINT1, common::BIGINT))
-		(FromTo(common::UINT1, common::DECIMAL9))
-		(FromTo(common::UINT1, common::DECIMAL18))
-		(FromTo(common::UINT1, common::DECIMAL28SPARSE))
-		(FromTo(common::UINT1, common::DECIMAL38SPARSE))
-		(FromTo(common::UINT1, common::DATE))
-		(FromTo(common::UINT1, common::TIME))
-		(FromTo(common::UINT1, common::TIMESTAMP))
-		(FromTo(common::UINT1, common::INTERVAL))
-		(FromTo(common::UINT1, common::FLOAT4))
-		(FromTo(common::UINT1, common::FLOAT8))
-		(FromTo(common::UINT1, common::BIT))
-		(FromTo(common::UINT1, common::VARCHAR))
-		(FromTo(common::UINT1, common::VAR16CHAR))
-		(FromTo(common::UINT1, common::VARBINARY))
-		(FromTo(common::UINT1, common::INTERVALYEAR))
-		(FromTo(common::UINT1, common::INTERVALDAY))
-		(FromTo(common::UINT2, common::INT))
-		(FromTo(common::UINT2, common::BIGINT))
-		(FromTo(common::UINT2, common::DECIMAL9))
-		(FromTo(common::UINT2, common::DECIMAL18))
-		(FromTo(common::UINT2, common::DECIMAL28SPARSE))
-		(FromTo(common::UINT2, common::DECIMAL38SPARSE))
-		(FromTo(common::UINT2, common::DATE))
-		(FromTo(common::UINT2, common::TIME))
-		(FromTo(common::UINT2, common::TIMESTAMP))
-		(FromTo(common::UINT2, common::INTERVAL))
-		(FromTo(common::UINT2, common::FLOAT4))
-		(FromTo(common::UINT2, common::FLOAT8))
-		(FromTo(common::UINT2, common::BIT))
-		(FromTo(common::UINT2, common::VARCHAR))
-		(FromTo(common::UINT2, common::VAR16CHAR))
-		(FromTo(common::UINT2, common::VARBINARY))
-		(FromTo(common::UINT2, common::INTERVALYEAR))
-		(FromTo(common::UINT2, common::INTERVALDAY))
-		(FromTo(common::UINT4, common::INT))
-		(FromTo(common::UINT4, common::BIGINT))
-		(FromTo(common::UINT4, common::DECIMAL9))
-		(FromTo(common::UINT4, common::DECIMAL18))
-		(FromTo(common::UINT4, common::DECIMAL28SPARSE))
-		(FromTo(common::UINT4, common::DECIMAL38SPARSE))
-		(FromTo(common::UINT4, common::DATE))
-		(FromTo(common::UINT4, common::TIME))
-		(FromTo(common::UINT4, common::TIMESTAMP))
-		(FromTo(common::UINT4, common::INTERVAL))
-		(FromTo(common::UINT4, common::FLOAT4))
-		(FromTo(common::UINT4, common::FLOAT8))
-		(FromTo(common::UINT4, common::BIT))
-		(FromTo(common::UINT4, common::VARCHAR))
-		(FromTo(common::UINT4, common::VAR16CHAR))
-		(FromTo(common::UINT4, common::VARBINARY))
-		(FromTo(common::UINT4, common::INTERVALYEAR))
-		(FromTo(common::UINT4, common::INTERVALDAY))
-		(FromTo(common::UINT8, common::INT))
-		(FromTo(common::UINT8, common::BIGINT))
-		(FromTo(common::UINT8, common::DECIMAL9))
-		(FromTo(common::UINT8, common::DECIMAL18))
-		(FromTo(common::UINT8, common::DECIMAL28SPARSE))
-		(FromTo(common::UINT8, common::DECIMAL38SPARSE))
-		(FromTo(common::UINT8, common::DATE))
-		(FromTo(common::UINT8, common::TIME))
-		(FromTo(common::UINT8, common::TIMESTAMP))
-		(FromTo(common::UINT8, common::INTERVAL))
-		(FromTo(common::UINT8, common::FLOAT4))
-		(FromTo(common::UINT8, common::FLOAT8))
-		(FromTo(common::UINT8, common::BIT))
-		(FromTo(common::UINT8, common::VARCHAR))
-		(FromTo(common::UINT8, common::VAR16CHAR))
-		(FromTo(common::UINT8, common::VARBINARY))
-		(FromTo(common::UINT8, common::INTERVALYEAR))
-		(FromTo(common::UINT8, common::INTERVALDAY))
-		(FromTo(common::DECIMAL28DENSE, common::INT))
-		(FromTo(common::DECIMAL28DENSE, common::BIGINT))
-		(FromTo(common::DECIMAL28DENSE, common::DECIMAL9))
-		(FromTo(common::DECIMAL28DENSE, common::DECIMAL18))
-		(FromTo(common::DECIMAL28DENSE, common::DECIMAL28SPARSE))
-		(FromTo(common::DECIMAL28DENSE, common::DECIMAL38SPARSE))
-		(FromTo(common::DECIMAL28DENSE, common::DATE))
-		(FromTo(common::DECIMAL28DENSE, common::TIME))
-		(FromTo(common::DECIMAL28DENSE, common::TIMESTAMP))
-		(FromTo(common::DECIMAL28DENSE, common::INTERVAL))
-		(FromTo(common::DECIMAL28DENSE, common::FLOAT4))
-		(FromTo(common::DECIMAL28DENSE, common::FLOAT8))
-		(FromTo(common::DECIMAL28DENSE, common::BIT))
-		(FromTo(common::DECIMAL28DENSE, common::VARCHAR))
-		(FromTo(common::DECIMAL28DENSE, common::VAR16CHAR))
-		(FromTo(common::DECIMAL28DENSE, common::VARBINARY))
-		(FromTo(common::DECIMAL28DENSE, common::INTERVALYEAR))
-		(FromTo(common::DECIMAL28DENSE, common::INTERVALDAY))
-		(FromTo(common::DECIMAL38DENSE, common::INT))
-		(FromTo(common::DECIMAL38DENSE, common::BIGINT))
-		(FromTo(common::DECIMAL38DENSE, common::DECIMAL9))
-		(FromTo(common::DECIMAL38DENSE, common::DECIMAL18))
-		(FromTo(common::DECIMAL38DENSE, common::DECIMAL28SPARSE))
-		(FromTo(common::DECIMAL38DENSE, common::DECIMAL38SPARSE))
-		(FromTo(common::DECIMAL38DENSE, common::DATE))
-		(FromTo(common::DECIMAL38DENSE, common::TIME))
-		(FromTo(common::DECIMAL38DENSE, common::TIMESTAMP))
-		(FromTo(common::DECIMAL38DENSE, common::INTERVAL))
-		(FromTo(common::DECIMAL38DENSE, common::FLOAT4))
-		(FromTo(common::DECIMAL38DENSE, common::FLOAT8))
-		(FromTo(common::DECIMAL38DENSE, common::BIT))
-		(FromTo(common::DECIMAL38DENSE, common::VARCHAR))
-		(FromTo(common::DECIMAL38DENSE, common::VAR16CHAR))
-		(FromTo(common::DECIMAL38DENSE, common::VARBINARY))
-		(FromTo(common::DECIMAL38DENSE, common::INTERVALYEAR))
-		(FromTo(common::DECIMAL38DENSE, common::INTERVALDAY))
-		(FromTo(common::DM_UNKNOWN, common::TINYINT))
-		(FromTo(common::DM_UNKNOWN, common::INT))
-		(FromTo(common::DM_UNKNOWN, common::BIGINT))
-		(FromTo(common::DM_UNKNOWN, common::DECIMAL9))
-		(FromTo(common::DM_UNKNOWN, common::DECIMAL18))
-		(FromTo(common::DM_UNKNOWN, common::DECIMAL28SPARSE))
-		(FromTo(common::DM_UNKNOWN, common::DECIMAL38SPARSE))
-		(FromTo(common::DM_UNKNOWN, common::DATE))
-		(FromTo(common::DM_UNKNOWN, common::TIME))
-		(FromTo(common::DM_UNKNOWN, common::TIMESTAMP))
-		(FromTo(common::DM_UNKNOWN, common::INTERVAL))
-		(FromTo(common::DM_UNKNOWN, common::FLOAT4))
-		(FromTo(common::DM_UNKNOWN, common::FLOAT8))
-		(FromTo(common::DM_UNKNOWN, common::BIT))
-		(FromTo(common::DM_UNKNOWN, common::VARCHAR))
-		(FromTo(common::DM_UNKNOWN, common::VAR16CHAR))
-		(FromTo(common::DM_UNKNOWN, common::VARBINARY))
-		(FromTo(common::DM_UNKNOWN, common::INTERVALYEAR))
-		(FromTo(common::DM_UNKNOWN, common::INTERVALDAY))
-		(FromTo(common::INTERVALYEAR, common::INT))
-		(FromTo(common::INTERVALYEAR, common::BIGINT))
-		(FromTo(common::INTERVALYEAR, common::DECIMAL9))
-		(FromTo(common::INTERVALYEAR, common::DECIMAL18))
-		(FromTo(common::INTERVALYEAR, common::DECIMAL28SPARSE))
-		(FromTo(common::INTERVALYEAR, common::DECIMAL38SPARSE))
-		(FromTo(common::INTERVALYEAR, common::DATE))
-		(FromTo(common::INTERVALYEAR, common::TIME))
-		(FromTo(common::INTERVALYEAR, common::TIMESTAMP))
-		(FromTo(common::INTERVALYEAR, common::INTERVAL))
-		(FromTo(common::INTERVALYEAR, common::FLOAT4))
-		(FromTo(common::INTERVALYEAR, common::FLOAT8))
-		(FromTo(common::INTERVALYEAR, common::BIT))
-		(FromTo(common::INTERVALYEAR, common::VARCHAR))
-		(FromTo(common::INTERVALYEAR, common::VAR16CHAR))
-		(FromTo(common::INTERVALYEAR, common::VARBINARY))
-		(FromTo(common::INTERVALYEAR, common::INTERVALYEAR))
-		(FromTo(common::INTERVALYEAR, common::INTERVALDAY))
-		(FromTo(common::INTERVALDAY, common::INT))
-		(FromTo(common::INTERVALDAY, common::BIGINT))
-		(FromTo(common::INTERVALDAY, common::DECIMAL9))
-		(FromTo(common::INTERVALDAY, common::DECIMAL18))
-		(FromTo(common::INTERVALDAY, common::DECIMAL28SPARSE))
-		(FromTo(common::INTERVALDAY, common::DECIMAL38SPARSE))
-		(FromTo(common::INTERVALDAY, common::DATE))
-		(FromTo(common::INTERVALDAY, common::TIME))
-		(FromTo(common::INTERVALDAY, common::TIMESTAMP))
-		(FromTo(common::INTERVALDAY, common::INTERVAL))
-		(FromTo(common::INTERVALDAY, common::FLOAT4))
-		(FromTo(common::INTERVALDAY, common::FLOAT8))
-		(FromTo(common::INTERVALDAY, common::BIT))
-		(FromTo(common::INTERVALDAY, common::VARCHAR))
-		(FromTo(common::INTERVALDAY, common::VAR16CHAR))
-		(FromTo(common::INTERVALDAY, common::VARBINARY))
-		(FromTo(common::INTERVALDAY, common::INTERVALYEAR))
-		(FromTo(common::INTERVALDAY, common::INTERVALDAY));
+static Drill::meta::UnionSupport unionSupport(const google::protobuf::RepeatedField<google::protobuf::int32>& unionSupportList) {
+	Drill::meta::UnionSupport result(Drill::meta::U_NONE);
+
+	for(google::protobuf::RepeatedField<google::protobuf::int32>::const_iterator it = unionSupportList.begin();
+			it != unionSupportList.end();
+			++it) {
+			switch(static_cast<exec::user::UnionSupport>(*it)) {
+			case exec::user::U_UNION:
+				result |= Drill::meta::U_UNION;
+				break;
+
+			case exec::user::U_UNION_ALL:
+				result |= Drill::meta::U_UNION_ALL;
+				break;
+
+			// ignore unknown
+			case exec::user::U_UNKNOWN:
+			default:
+				break;
+			}
+		}
+
+	return result;
+}
+
+static bool unrelatedColumnsInOrderBySupported(const google::protobuf::RepeatedField<google::protobuf::int32>& orderBySupportList) {
+	for(google::protobuf::RepeatedField<google::protobuf::int32>::const_iterator it = orderBySupportList.begin();
+			it != orderBySupportList.end();
+			++it) {
+			switch(static_cast<exec::user::OrderBySupport>(*it)) {
+			case exec::user::OB_UNRELATED:
+				return true;
+				break;
+
+			case exec::user::OB_EXPRESSION:
+			// ignore unknown
+			case exec::user::OB_UNKNOWN:
+			default:
+				break;
+			}
+		}
+
+	return false;
+}
 } // anonymous namespace
 
+const exec::user::ServerMeta DrillMetadata::s_defaultServerMeta = createDefaultServerMeta();
+
+DrillMetadata::DrillMetadata(DrillClientImpl& client, const exec::user::ServerMeta&  serverMeta): Metadata(), m_client(client),
+		m_allTablesSelectable(serverMeta.all_tables_selectable()),
+		m_blobIncludedInMaxRowSize(serverMeta.blob_included_in_max_row_size()),
+		m_catalogAtStart(serverMeta.catalog_at_start()),
+		m_catalogSeparator(serverMeta.catalog_separator()),
+		m_catalogTerm(serverMeta.catalog_term()),
+		m_collateSupport(collateSupport(serverMeta.collate_support())),
+		m_columnAliasingSupported(serverMeta.column_aliasing_supported()),
+		m_correlationNamesSupport(correlationNames(serverMeta.correlation_names_support())),
+		m_convertSupport(serverMeta.convert_support().begin(), serverMeta.convert_support().end()),
+		m_dateTimeFunctions(serverMeta.date_time_functions().begin(), serverMeta.date_time_functions().end()),
+		m_dateTimeLiteralsSupport(dateTimeLiteralsSupport(serverMeta.date_time_literals_support())),
+		m_groupBySupport(groupBySupport(serverMeta.group_by_support())),
+		m_identifierCase(identifierCase(serverMeta.identifier_casing())),
+		m_identifierQuoteString(serverMeta.identifier_quote_string()),
+		m_likeEscapeClauseSupported(serverMeta.like_escape_clause_supported()),
+		m_maxBinaryLiteralLength(serverMeta.max_binary_literal_length()),
+		m_maxCatalogNameLength(serverMeta.max_catalog_name_length()),
+		m_maxCharLIteralLength(serverMeta.max_char_literal_length()),
+		m_maxColumnNameLength(serverMeta.max_column_name_length()),
+		m_maxColumnsInGroupBy(serverMeta.max_column_name_length()),
+		m_maxColumnsInOrderBy(serverMeta.max_columns_in_order_by()),
+		m_maxColumnsInSelect(serverMeta.max_columns_in_select()),
+		m_maxCursorNameLength(serverMeta.max_cursor_name_length()),
+		m_maxLogicalLobSize(serverMeta.max_logical_lob_size()),
+		m_maxRowSize(serverMeta.max_row_size()),
+		m_maxSchemaNameLength(serverMeta.max_schema_name_length()),
+		m_maxStatementLength(serverMeta.max_statement_length()),
+		m_maxStatements(serverMeta.max_statements()),
+		m_maxTableNameLength(serverMeta.max_table_name_length()),
+		m_maxTablesInSelectLength(serverMeta.max_tables_in_select()),
+		m_maxUserNameLength(serverMeta.max_user_name_length()),
+		m_nullCollation(nullCollation(serverMeta.null_collation())),
+		m_nullPlusNonNullEqualsNull(serverMeta.null_plus_non_null_equals_null()),
+		m_numericFunctions(serverMeta.numeric_functions().begin(), serverMeta.numeric_functions().end()),
+		m_outerJoinSupport(outerJoinSupport(serverMeta.outer_join_support())),
+		m_quotedIdentifierCase(quotedIdentifierCase(serverMeta.quoted_identifier_casing())),
+		m_readOnly(serverMeta.read_only()),
+		m_schemaTerm(serverMeta.schema_term()),
+		m_searchEscapeString(serverMeta.search_escape_string()),
+		m_selectForUpdateSupported(serverMeta.select_for_update_supported()),
+		m_specialCharacters(serverMeta.special_characters()),
+		m_sqlKeywords(serverMeta.sql_keywords().begin(), serverMeta.sql_keywords().end()),
+		m_stringFunctions(serverMeta.string_functions().begin(), serverMeta.string_functions().end()),
+		m_subQuerySupport(subQuerySupport(serverMeta.subquery_support())),
+		m_systemFunctions(serverMeta.system_functions().begin(), serverMeta.system_functions().end()),
+		m_tableTerm(serverMeta.table_term()),
+		m_transactionSupported(serverMeta.transaction_supported()),
+		m_unionSupport(unionSupport(serverMeta.union_support())),
+		m_unrelatedColumnsInOrderBySupported(unrelatedColumnsInOrderBySupported(serverMeta.order_by_support()))
+{
+}
+
 // Conversion scalar function support
 bool DrillMetadata::isConvertSupported(common::MinorType from, common::MinorType to) const {
-	return s_convertMap.find(FromTo(from,to)) != s_convertMap.end();
+	return m_convertSupport.find(ConvertSupport(from,to)) != m_convertSupport.end();
 }
 
 const std::string& DrillMetadata::getServerName() const {


[02/27] drill git commit: DRILL-4994: Refactor DrillCursor

Posted by jn...@apache.org.
DRILL-4994: Refactor DrillCursor

Refactor DrillCursor to be more self-contained.


Project: http://git-wip-us.apache.org/repos/asf/drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/drill/commit/ab60855b
Tree: http://git-wip-us.apache.org/repos/asf/drill/tree/ab60855b
Diff: http://git-wip-us.apache.org/repos/asf/drill/diff/ab60855b

Branch: refs/heads/master
Commit: ab60855bf390e8f01369760f019ee06eecf1959e
Parents: e2b5271
Author: Laurent Goujon <la...@dremio.com>
Authored: Fri Nov 4 13:31:19 2016 -0700
Committer: Jinfeng Ni <jn...@apache.org>
Committed: Wed Mar 1 23:15:09 2017 -0800

----------------------------------------------------------------------
 .../jdbc/impl/AvaticaDrillSqlAccessor.java      |   4 +-
 .../org/apache/drill/jdbc/impl/DrillCursor.java | 323 +++++++++++++++++--
 .../drill/jdbc/impl/DrillResultSetImpl.java     | 302 +----------------
 3 files changed, 317 insertions(+), 312 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/drill/blob/ab60855b/exec/jdbc/src/main/java/org/apache/drill/jdbc/impl/AvaticaDrillSqlAccessor.java
----------------------------------------------------------------------
diff --git a/exec/jdbc/src/main/java/org/apache/drill/jdbc/impl/AvaticaDrillSqlAccessor.java b/exec/jdbc/src/main/java/org/apache/drill/jdbc/impl/AvaticaDrillSqlAccessor.java
index 5a48e59..914e279 100644
--- a/exec/jdbc/src/main/java/org/apache/drill/jdbc/impl/AvaticaDrillSqlAccessor.java
+++ b/exec/jdbc/src/main/java/org/apache/drill/jdbc/impl/AvaticaDrillSqlAccessor.java
@@ -64,11 +64,11 @@ class AvaticaDrillSqlAccessor implements Accessor {
     // so in that case row can be left at -1, so isBeforeFirst() returns true
     // even though we're not longer before the empty set of rows--and it's all
     // private, so we can't get to it to override any of several candidates.
-    if ( cursor.getResultSet().isAfterLast() ) {
+    if ( cursor.isAfterLast() ) {
       throw new InvalidCursorStateSqlException(
           "Result set cursor is already positioned past all rows." );
     }
-    else if ( cursor.getResultSet().isBeforeFirst() ) {
+    else if ( cursor.isBeforeFirst() ) {
       throw new InvalidCursorStateSqlException(
           "Result set cursor is positioned before all rows.  Call next() first." );
     }

http://git-wip-us.apache.org/repos/asf/drill/blob/ab60855b/exec/jdbc/src/main/java/org/apache/drill/jdbc/impl/DrillCursor.java
----------------------------------------------------------------------
diff --git a/exec/jdbc/src/main/java/org/apache/drill/jdbc/impl/DrillCursor.java b/exec/jdbc/src/main/java/org/apache/drill/jdbc/impl/DrillCursor.java
index 08570a8..ed279a3 100644
--- a/exec/jdbc/src/main/java/org/apache/drill/jdbc/impl/DrillCursor.java
+++ b/exec/jdbc/src/main/java/org/apache/drill/jdbc/impl/DrillCursor.java
@@ -24,33 +24,260 @@ import java.sql.SQLException;
 import java.util.ArrayList;
 import java.util.Calendar;
 import java.util.List;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.LinkedBlockingDeque;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
 
 import org.apache.calcite.avatica.AvaticaResultSet;
+import org.apache.calcite.avatica.AvaticaStatement;
 import org.apache.calcite.avatica.ColumnMetaData;
+import org.apache.calcite.avatica.Meta;
+import org.apache.calcite.avatica.Meta.Signature;
 import org.apache.calcite.avatica.util.ArrayImpl.Factory;
 import org.apache.calcite.avatica.util.Cursor;
 import org.apache.drill.common.exceptions.UserException;
+import org.apache.drill.exec.ExecConstants;
+import org.apache.drill.exec.client.DrillClient;
 import org.apache.drill.exec.exception.SchemaChangeException;
+import org.apache.drill.exec.proto.UserBitShared.QueryId;
+import org.apache.drill.exec.proto.UserBitShared.QueryResult.QueryState;
+import org.apache.drill.exec.proto.UserBitShared.QueryType;
+import org.apache.drill.exec.proto.helper.QueryIdHelper;
 import org.apache.drill.exec.record.BatchSchema;
 import org.apache.drill.exec.record.RecordBatchLoader;
+import org.apache.drill.exec.rpc.ConnectionThrottle;
 import org.apache.drill.exec.rpc.user.QueryDataBatch;
+import org.apache.drill.exec.rpc.user.UserResultsListener;
 import org.apache.drill.exec.store.ischema.InfoSchemaConstants;
+import org.apache.drill.jdbc.SchemaChangeListener;
 import org.slf4j.Logger;
 
+import com.google.common.collect.Queues;
+
 
 class DrillCursor implements Cursor {
+
+  ////////////////////////////////////////
+  // ResultsListener:
+  static class ResultsListener implements UserResultsListener {
+    private static final org.slf4j.Logger logger =
+        org.slf4j.LoggerFactory.getLogger(ResultsListener.class);
+
+    private static volatile int nextInstanceId = 1;
+
+    /** (Just for logging.) */
+    private final int instanceId;
+
+    private final int batchQueueThrottlingThreshold;
+
+    /** (Just for logging.) */
+    private volatile QueryId queryId;
+
+    /** (Just for logging.) */
+    private int lastReceivedBatchNumber;
+    /** (Just for logging.) */
+    private int lastDequeuedBatchNumber;
+
+    private volatile UserException executionFailureException;
+
+    // TODO:  Revisit "completed".  Determine and document exactly what it
+    // means.  Some uses imply that it means that incoming messages indicate
+    // that the _query_ has _terminated_ (not necessarily _completing_
+    // normally), while some uses imply that it's some other state of the
+    // ResultListener.  Some uses seem redundant.)
+    volatile boolean completed = false;
+
+    /** Whether throttling of incoming data is active. */
+    private final AtomicBoolean throttled = new AtomicBoolean( false );
+    private volatile ConnectionThrottle throttle;
+
+    private volatile boolean closed = false;
+
+    private final CountDownLatch firstMessageReceived = new CountDownLatch(1);
+
+    final LinkedBlockingDeque<QueryDataBatch> batchQueue =
+        Queues.newLinkedBlockingDeque();
+
+
+    /**
+     * ...
+     * @param  batchQueueThrottlingThreshold
+     *         queue size threshold for throttling server
+     */
+    ResultsListener( int batchQueueThrottlingThreshold ) {
+      instanceId = nextInstanceId++;
+      this.batchQueueThrottlingThreshold = batchQueueThrottlingThreshold;
+      logger.debug( "[#{}] Query listener created.", instanceId );
+    }
+
+    /**
+     * Starts throttling if not currently throttling.
+     * @param  throttle  the "throttlable" object to throttle
+     * @return  true if actually started (wasn't throttling already)
+     */
+    private boolean startThrottlingIfNot( ConnectionThrottle throttle ) {
+      final boolean started = throttled.compareAndSet( false, true );
+      if ( started ) {
+        this.throttle = throttle;
+        throttle.setAutoRead(false);
+      }
+      return started;
+    }
+
+    /**
+     * Stops throttling if currently throttling.
+     * @return  true if actually stopped (was throttling)
+     */
+    private boolean stopThrottlingIfSo() {
+      final boolean stopped = throttled.compareAndSet( true, false );
+      if ( stopped ) {
+        throttle.setAutoRead(true);
+        throttle = null;
+      }
+      return stopped;
+    }
+
+    public void awaitFirstMessage() throws InterruptedException {
+      firstMessageReceived.await();
+    }
+
+    private void releaseIfFirst() {
+      firstMessageReceived.countDown();
+    }
+
+    @Override
+    public void queryIdArrived(QueryId queryId) {
+      logger.debug( "[#{}] Received query ID: {}.",
+                    instanceId, QueryIdHelper.getQueryId( queryId ) );
+      this.queryId = queryId;
+    }
+
+    @Override
+    public void submissionFailed(UserException ex) {
+      logger.debug( "Received query failure:", instanceId, ex );
+      this.executionFailureException = ex;
+      completed = true;
+      close();
+      logger.info( "[#{}] Query failed: ", instanceId, ex );
+    }
+
+    @Override
+    public void dataArrived(QueryDataBatch result, ConnectionThrottle throttle) {
+      lastReceivedBatchNumber++;
+      logger.debug( "[#{}] Received query data batch #{}: {}.",
+                    instanceId, lastReceivedBatchNumber, result );
+
+      // If we're in a closed state, just release the message.
+      if (closed) {
+        result.release();
+        // TODO:  Revisit member completed:  Is ResultListener really completed
+        // after only one data batch after being closed?
+        completed = true;
+        return;
+      }
+
+      // We're active; let's add to the queue.
+      batchQueue.add(result);
+
+      // Throttle server if queue size has exceed threshold.
+      if (batchQueue.size() > batchQueueThrottlingThreshold ) {
+        if ( startThrottlingIfNot( throttle ) ) {
+          logger.debug( "[#{}] Throttling started at queue size {}.",
+                        instanceId, batchQueue.size() );
+        }
+      }
+
+      releaseIfFirst();
+    }
+
+    @Override
+    public void queryCompleted(QueryState state) {
+      logger.debug( "[#{}] Received query completion: {}.", instanceId, state );
+      releaseIfFirst();
+      completed = true;
+    }
+
+    QueryId getQueryId() {
+      return queryId;
+    }
+
+
+    /**
+     * Gets the next batch of query results from the queue.
+     * @return  the next batch, or {@code null} after last batch has been returned
+     * @throws UserException
+     *         if the query failed
+     * @throws InterruptedException
+     *         if waiting on the queue was interrupted
+     */
+    QueryDataBatch getNext() throws UserException, InterruptedException {
+      while (true) {
+        if (executionFailureException != null) {
+          logger.debug( "[#{}] Dequeued query failure exception: {}.",
+                        instanceId, executionFailureException );
+          throw executionFailureException;
+        }
+        if (completed && batchQueue.isEmpty()) {
+          return null;
+        } else {
+          QueryDataBatch qdb = batchQueue.poll(50, TimeUnit.MILLISECONDS);
+          if (qdb != null) {
+            lastDequeuedBatchNumber++;
+            logger.debug( "[#{}] Dequeued query data batch #{}: {}.",
+                          instanceId, lastDequeuedBatchNumber, qdb );
+
+            // Unthrottle server if queue size has dropped enough below threshold:
+            if ( batchQueue.size() < batchQueueThrottlingThreshold / 2
+                 || batchQueue.size() == 0  // (in case threshold < 2)
+                 ) {
+              if ( stopThrottlingIfSo() ) {
+                logger.debug( "[#{}] Throttling stopped at queue size {}.",
+                              instanceId, batchQueue.size() );
+              }
+            }
+            return qdb;
+          }
+        }
+      }
+    }
+
+    void close() {
+      logger.debug( "[#{}] Query listener closing.", instanceId );
+      closed = true;
+      if ( stopThrottlingIfSo() ) {
+        logger.debug( "[#{}] Throttling stopped at close() (at queue size {}).",
+                      instanceId, batchQueue.size() );
+      }
+      while (!batchQueue.isEmpty()) {
+        QueryDataBatch qdb = batchQueue.poll();
+        if (qdb != null && qdb.getData() != null) {
+          qdb.getData().release();
+        }
+      }
+      // Close may be called before the first result is received and therefore
+      // when the main thread is blocked waiting for the result.  In that case
+      // we want to unblock the main thread.
+      firstMessageReceived.countDown(); // TODO:  Why not call releaseIfFirst as used elsewhere?
+      completed = true;
+    }
+
+  }
+
   private static final Logger logger = getLogger( DrillCursor.class );
 
   /** JDBC-specified string for unknown catalog, schema, and table names. */
   private static final String UNKNOWN_NAME_STRING = "";
 
-  /** The associated {@link java.sql.ResultSet} implementation. */
-  private final DrillResultSetImpl resultSet;
+  private final DrillConnectionImpl connection;
+  private final AvaticaStatement statement;
+  private final Meta.Signature signature;
 
   /** Holds current batch of records (none before first load). */
   private final RecordBatchLoader currentBatchHolder;
 
-  private final DrillResultSetImpl.ResultsListener resultsListener;
+  private final ResultsListener resultsListener;
+  private SchemaChangeListener changeListener;
 
   private final DrillAccessorList accessors = new DrillAccessorList();
 
@@ -85,6 +312,7 @@ class DrillCursor implements Cursor {
   /** Whether cursor is after the end of the sequence of records/rows. */
   private boolean afterLastRow = false;
 
+  private int currentRowNumber = -1;
   /** Zero-based offset of current record in record batch.
    * (Not <i>row</i> number.) */
   private int currentRecordNumber = -1;
@@ -92,22 +320,42 @@ class DrillCursor implements Cursor {
 
   /**
    *
-   * @param  resultSet  the associated ResultSet implementation
+   * @param statement
+   * @param signature
    */
-  DrillCursor(final DrillResultSetImpl resultSet) {
-    this.resultSet = resultSet;
-    currentBatchHolder = resultSet.batchLoader;
-    resultsListener = resultSet.resultsListener;
-  }
-
-  DrillResultSetImpl getResultSet() {
-    return resultSet;
+  DrillCursor(DrillConnectionImpl connection, AvaticaStatement statement, Signature signature) {
+    this.connection = connection;
+    this.statement = statement;
+    this.signature = signature;
+
+    DrillClient client = connection.getClient();
+    final int batchQueueThrottlingThreshold =
+        client.getConfig().getInt(
+            ExecConstants.JDBC_BATCH_QUEUE_THROTTLING_THRESHOLD );
+    resultsListener = new ResultsListener(batchQueueThrottlingThreshold);
+    currentBatchHolder = new RecordBatchLoader(client.getAllocator());
   }
 
   protected int getCurrentRecordNumber() {
     return currentRecordNumber;
   }
 
+  public String getQueryId() {
+    if (resultsListener.getQueryId() != null) {
+      return QueryIdHelper.getQueryId(resultsListener.getQueryId());
+    } else {
+      return null;
+    }
+  }
+
+  public boolean isBeforeFirst() {
+    return currentRowNumber < 0;
+  }
+
+  public boolean isAfterLast() {
+    return afterLastRow;
+  }
+
   // (Overly restrictive Avatica uses List<Accessor> instead of List<? extends
   // Accessor>, so accessors/DrillAccessorList can't be of type
   // List<AvaticaDrillSqlAccessor>, and we have to cast from Accessor to
@@ -119,6 +367,14 @@ class DrillCursor implements Cursor {
     return accessors;
   }
 
+  synchronized void cleanup() {
+    if (resultsListener.getQueryId() != null && ! resultsListener.completed) {
+      connection.getClient().cancelQuery(resultsListener.getQueryId());
+    }
+    resultsListener.close();
+    currentBatchHolder.clear();
+  }
+
   /**
    * Updates column accessors and metadata from current record batch.
    */
@@ -144,8 +400,8 @@ class DrillCursor implements Cursor {
         schema,
         getObjectClasses );
 
-    if (getResultSet().changeListener != null) {
-      getResultSet().changeListener.schemaChanged(schema);
+    if (changeListener != null) {
+      changeListener.schemaChanged(schema);
     }
   }
 
@@ -261,6 +517,7 @@ class DrillCursor implements Cursor {
       throw new IllegalStateException(
           "loadInitialSchema() called a second time" );
     }
+
     assert ! afterLastRow : "afterLastRow already true in loadInitialSchema()";
     assert ! afterFirstBatch : "afterLastRow already true in loadInitialSchema()";
     assert -1 == currentRecordNumber
@@ -270,6 +527,26 @@ class DrillCursor implements Cursor {
         : "currentBatchHolder.getRecordCount() not 0 (is "
           + currentBatchHolder.getRecordCount() + " in loadInitialSchema()";
 
+    if (statement instanceof DrillPreparedStatementImpl) {
+      DrillPreparedStatementImpl drillPreparedStatement = (DrillPreparedStatementImpl) statement;
+      connection.getClient().executePreparedStatement(drillPreparedStatement.getPreparedStatementHandle().getServerHandle(), resultsListener);
+    } else {
+      connection.getClient().runQuery(QueryType.SQL, signature.sql, resultsListener);
+    }
+
+    try {
+      resultsListener.awaitFirstMessage();
+    } catch ( InterruptedException e ) {
+      // Preserve evidence that the interruption occurred so that code higher up
+      // on the call stack can learn of the interruption and respond to it if it
+      // wants to.
+      Thread.currentThread().interrupt();
+
+      // Not normally expected--Drill doesn't interrupt in this area (right?)--
+      // but JDBC client certainly could.
+      throw new SQLException("Interrupted", e );
+    }
+
     returnTrueForNextCallToNext = true;
 
     nextRowInternally();
@@ -297,26 +574,28 @@ class DrillCursor implements Cursor {
       return false;
     }
     else if ( returnTrueForNextCallToNext ) {
+      ++currentRowNumber;
       // We have a deferred "not after end" to report--reset and report that.
       returnTrueForNextCallToNext = false;
       return true;
     }
     else {
       accessors.clearLastColumnIndexedInRow();
-      return nextRowInternally();
+      boolean res = nextRowInternally();
+      if (res) { ++ currentRowNumber; }
+
+      return res;
     }
   }
 
+  public void cancel() {
+    close();
+  }
+
   @Override
   public void close() {
-    // currentBatchHolder is owned by resultSet and cleaned up by
-    // DrillResultSet.cleanup()
-
-    // listener is owned by resultSet and cleaned up by
-    // DrillResultSet.cleanup()
-
     // Clean up result set (to deallocate any buffers).
-    getResultSet().cleanup();
+    cleanup();
     // TODO:  CHECK:  Something might need to set statement.openResultSet to
     // null.  Also, AvaticaResultSet.close() doesn't check whether already
     // closed and skip calls to cursor.close(), statement.onResultSetClose()

http://git-wip-us.apache.org/repos/asf/drill/blob/ab60855b/exec/jdbc/src/main/java/org/apache/drill/jdbc/impl/DrillResultSetImpl.java
----------------------------------------------------------------------
diff --git a/exec/jdbc/src/main/java/org/apache/drill/jdbc/impl/DrillResultSetImpl.java b/exec/jdbc/src/main/java/org/apache/drill/jdbc/impl/DrillResultSetImpl.java
index a2a7699..e406348 100644
--- a/exec/jdbc/src/main/java/org/apache/drill/jdbc/impl/DrillResultSetImpl.java
+++ b/exec/jdbc/src/main/java/org/apache/drill/jdbc/impl/DrillResultSetImpl.java
@@ -40,10 +40,6 @@ import java.sql.Types;
 import java.util.Calendar;
 import java.util.Map;
 import java.util.TimeZone;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.LinkedBlockingDeque;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicBoolean;
 
 import org.apache.calcite.avatica.AvaticaResultSet;
 import org.apache.calcite.avatica.AvaticaSite;
@@ -51,23 +47,9 @@ import org.apache.calcite.avatica.AvaticaStatement;
 import org.apache.calcite.avatica.ColumnMetaData;
 import org.apache.calcite.avatica.Meta;
 import org.apache.calcite.avatica.util.Cursor;
-import org.apache.drill.common.exceptions.UserException;
-import org.apache.drill.exec.ExecConstants;
-import org.apache.drill.exec.client.DrillClient;
-import org.apache.drill.exec.proto.UserBitShared.QueryId;
-import org.apache.drill.exec.proto.UserBitShared.QueryResult.QueryState;
-import org.apache.drill.exec.proto.UserBitShared.QueryType;
-import org.apache.drill.exec.proto.helper.QueryIdHelper;
-import org.apache.drill.exec.record.RecordBatchLoader;
-import org.apache.drill.exec.rpc.ConnectionThrottle;
-import org.apache.drill.exec.rpc.user.QueryDataBatch;
-import org.apache.drill.exec.rpc.user.UserResultsListener;
 import org.apache.drill.jdbc.AlreadyClosedSqlException;
 import org.apache.drill.jdbc.DrillResultSet;
 import org.apache.drill.jdbc.ExecutionCanceledSqlException;
-import org.apache.drill.jdbc.SchemaChangeListener;
-
-import com.google.common.collect.Queues;
 
 
 /**
@@ -79,29 +61,13 @@ class DrillResultSetImpl extends AvaticaResultSet implements DrillResultSet {
       org.slf4j.LoggerFactory.getLogger(DrillResultSetImpl.class);
 
   private final DrillConnectionImpl connection;
-
-  SchemaChangeListener changeListener;
-  final ResultsListener resultsListener;
-  private final DrillClient client;
-  // TODO:  Resolve:  Since is barely manipulated here in DrillResultSetImpl,
-  //  move down into DrillCursor and have this.clean() have cursor clean it.
-  final RecordBatchLoader batchLoader;
-  final DrillCursor cursor;
-  boolean hasPendingCancelationNotification;
-
+  private volatile boolean hasPendingCancelationNotification = false;
 
   DrillResultSetImpl(AvaticaStatement statement, Meta.Signature signature,
                      ResultSetMetaData resultSetMetaData, TimeZone timeZone,
                      Meta.Frame firstFrame) {
     super(statement, signature, resultSetMetaData, timeZone, firstFrame);
     connection = (DrillConnectionImpl) statement.getConnection();
-    client = connection.getClient();
-    final int batchQueueThrottlingThreshold =
-        client.getConfig().getInt(
-            ExecConstants.JDBC_BATCH_QUEUE_THROTTLING_THRESHOLD );
-    resultsListener = new ResultsListener(batchQueueThrottlingThreshold);
-    batchLoader = new RecordBatchLoader(client.getAllocator());
-    cursor = new DrillCursor(this);
   }
 
   /**
@@ -118,7 +84,7 @@ class DrillResultSetImpl extends AvaticaResultSet implements DrillResultSet {
                                       ExecutionCanceledSqlException,
                                       SQLException {
     if ( isClosed() ) {
-      if ( hasPendingCancelationNotification ) {
+      if (cursor instanceof DrillCursor && hasPendingCancelationNotification) {
         hasPendingCancelationNotification = false;
         throw new ExecutionCanceledSqlException(
             "SQL statement execution canceled; ResultSet now closed." );
@@ -139,17 +105,12 @@ class DrillResultSetImpl extends AvaticaResultSet implements DrillResultSet {
 
   @Override
   protected void cancel() {
-    hasPendingCancelationNotification = true;
-    cleanup();
-    close();
-  }
-
-  synchronized void cleanup() {
-    if (resultsListener.getQueryId() != null && ! resultsListener.completed) {
-      client.cancelQuery(resultsListener.getQueryId());
+    if (cursor instanceof DrillCursor) {
+      hasPendingCancelationNotification = true;
+      ((DrillCursor) cursor).cancel();
+    } else {
+      super.cancel();
     }
-    resultsListener.close();
-    batchLoader.clear();
   }
 
   ////////////////////////////////////////
@@ -172,7 +133,7 @@ class DrillResultSetImpl extends AvaticaResultSet implements DrillResultSet {
     // cancellation) which in turn sets the cursor to null.  So we must check
     // before we call next.
     // TODO: handle next() after close is called in the Avatica code.
-    if (super.cursor != null) {
+    if (cursor != null) {
       return super.next();
     } else {
       return false;
@@ -1900,11 +1861,10 @@ class DrillResultSetImpl extends AvaticaResultSet implements DrillResultSet {
   @Override
   public String getQueryId() throws SQLException {
     throwIfClosed();
-    if (resultsListener.getQueryId() != null) {
-      return QueryIdHelper.getQueryId(resultsListener.getQueryId());
-    } else {
-      return null;
+    if (cursor instanceof DrillCursor) {
+      return ((DrillCursor) cursor).getQueryId();
     }
+    return null;
   }
 
 
@@ -1912,249 +1872,15 @@ class DrillResultSetImpl extends AvaticaResultSet implements DrillResultSet {
 
   @Override
   protected DrillResultSetImpl execute() throws SQLException{
-    if (statement instanceof DrillPreparedStatementImpl) {
-      DrillPreparedStatementImpl drillPreparedStatement = (DrillPreparedStatementImpl) statement;
-      client.executePreparedStatement(drillPreparedStatement.getPreparedStatementHandle().getServerHandle(), resultsListener);
-    } else {
-      client.runQuery(QueryType.SQL, this.signature.sql, resultsListener);
-    }
     connection.getDriver().handler.onStatementExecute(statement, null);
 
-    super.execute2(cursor, this.signature.columns);
-
-    // don't return with metadata until we've achieved at least one return message.
-    try {
-      // TODO:  Revisit:  Why reaching directly into ResultsListener rather than
-      // calling some wait method?
-      resultsListener.latch.await();
-    } catch ( InterruptedException e ) {
-      // Preserve evidence that the interruption occurred so that code higher up
-      // on the call stack can learn of the interruption and respond to it if it
-      // wants to.
-      Thread.currentThread().interrupt();
-
-      // Not normally expected--Drill doesn't interrupt in this area (right?)--
-      // but JDBC client certainly could.
-      throw new SQLException( "Interrupted", e );
-    }
+    DrillCursor drillCursor = new DrillCursor(connection, statement, signature);
+    super.execute2(drillCursor, this.signature.columns);
 
     // Read first (schema-only) batch to initialize result-set metadata from
     // (initial) schema before Statement.execute...(...) returns result set:
-    cursor.loadInitialSchema();
+    drillCursor.loadInitialSchema();
 
     return this;
   }
-
-
-  ////////////////////////////////////////
-  // ResultsListener:
-
-  static class ResultsListener implements UserResultsListener {
-    private static final org.slf4j.Logger logger =
-        org.slf4j.LoggerFactory.getLogger(ResultsListener.class);
-
-    private static volatile int nextInstanceId = 1;
-
-    /** (Just for logging.) */
-    private final int instanceId;
-
-    private final int batchQueueThrottlingThreshold;
-
-    /** (Just for logging.) */
-    private volatile QueryId queryId;
-
-    /** (Just for logging.) */
-    private int lastReceivedBatchNumber;
-    /** (Just for logging.) */
-    private int lastDequeuedBatchNumber;
-
-    private volatile UserException executionFailureException;
-
-    // TODO:  Revisit "completed".  Determine and document exactly what it
-    // means.  Some uses imply that it means that incoming messages indicate
-    // that the _query_ has _terminated_ (not necessarily _completing_
-    // normally), while some uses imply that it's some other state of the
-    // ResultListener.  Some uses seem redundant.)
-    volatile boolean completed = false;
-
-    /** Whether throttling of incoming data is active. */
-    private final AtomicBoolean throttled = new AtomicBoolean( false );
-    private volatile ConnectionThrottle throttle;
-
-    private volatile boolean closed = false;
-    // TODO:  Rename.  It's obvious it's a latch--but what condition or action
-    // does it represent or control?
-    private CountDownLatch latch = new CountDownLatch(1);
-    private AtomicBoolean receivedMessage = new AtomicBoolean(false);
-
-    final LinkedBlockingDeque<QueryDataBatch> batchQueue =
-        Queues.newLinkedBlockingDeque();
-
-
-    /**
-     * ...
-     * @param  batchQueueThrottlingThreshold
-     *         queue size threshold for throttling server
-     */
-    ResultsListener( int batchQueueThrottlingThreshold ) {
-      instanceId = nextInstanceId++;
-      this.batchQueueThrottlingThreshold = batchQueueThrottlingThreshold;
-      logger.debug( "[#{}] Query listener created.", instanceId );
-    }
-
-    /**
-     * Starts throttling if not currently throttling.
-     * @param  throttle  the "throttlable" object to throttle
-     * @return  true if actually started (wasn't throttling already)
-     */
-    private boolean startThrottlingIfNot( ConnectionThrottle throttle ) {
-      final boolean started = throttled.compareAndSet( false, true );
-      if ( started ) {
-        this.throttle = throttle;
-        throttle.setAutoRead(false);
-      }
-      return started;
-    }
-
-    /**
-     * Stops throttling if currently throttling.
-     * @return  true if actually stopped (was throttling)
-     */
-    private boolean stopThrottlingIfSo() {
-      final boolean stopped = throttled.compareAndSet( true, false );
-      if ( stopped ) {
-        throttle.setAutoRead(true);
-        throttle = null;
-      }
-      return stopped;
-    }
-
-    // TODO:  Doc.:  Release what if what is first relative to what?
-    private boolean releaseIfFirst() {
-      if (receivedMessage.compareAndSet(false, true)) {
-        latch.countDown();
-        return true;
-      }
-
-      return false;
-    }
-
-    @Override
-    public void queryIdArrived(QueryId queryId) {
-      logger.debug( "[#{}] Received query ID: {}.",
-                    instanceId, QueryIdHelper.getQueryId( queryId ) );
-      this.queryId = queryId;
-    }
-
-    @Override
-    public void submissionFailed(UserException ex) {
-      logger.debug( "Received query failure:", instanceId, ex );
-      this.executionFailureException = ex;
-      completed = true;
-      close();
-      logger.info( "[#{}] Query failed: ", instanceId, ex );
-    }
-
-    @Override
-    public void dataArrived(QueryDataBatch result, ConnectionThrottle throttle) {
-      lastReceivedBatchNumber++;
-      logger.debug( "[#{}] Received query data batch #{}: {}.",
-                    instanceId, lastReceivedBatchNumber, result );
-
-      // If we're in a closed state, just release the message.
-      if (closed) {
-        result.release();
-        // TODO:  Revisit member completed:  Is ResultListener really completed
-        // after only one data batch after being closed?
-        completed = true;
-        return;
-      }
-
-      // We're active; let's add to the queue.
-      batchQueue.add(result);
-
-      // Throttle server if queue size has exceed threshold.
-      if (batchQueue.size() > batchQueueThrottlingThreshold ) {
-        if ( startThrottlingIfNot( throttle ) ) {
-          logger.debug( "[#{}] Throttling started at queue size {}.",
-                        instanceId, batchQueue.size() );
-        }
-      }
-
-      releaseIfFirst();
-    }
-
-    @Override
-    public void queryCompleted(QueryState state) {
-      logger.debug( "[#{}] Received query completion: {}.", instanceId, state );
-      releaseIfFirst();
-      completed = true;
-    }
-
-    QueryId getQueryId() {
-      return queryId;
-    }
-
-
-    /**
-     * Gets the next batch of query results from the queue.
-     * @return  the next batch, or {@code null} after last batch has been returned
-     * @throws UserException
-     *         if the query failed
-     * @throws InterruptedException
-     *         if waiting on the queue was interrupted
-     */
-    QueryDataBatch getNext() throws UserException, InterruptedException {
-      while (true) {
-        if (executionFailureException != null) {
-          logger.debug( "[#{}] Dequeued query failure exception: {}.",
-                        instanceId, executionFailureException );
-          throw executionFailureException;
-        }
-        if (completed && batchQueue.isEmpty()) {
-          return null;
-        } else {
-          QueryDataBatch qdb = batchQueue.poll(50, TimeUnit.MILLISECONDS);
-          if (qdb != null) {
-            lastDequeuedBatchNumber++;
-            logger.debug( "[#{}] Dequeued query data batch #{}: {}.",
-                          instanceId, lastDequeuedBatchNumber, qdb );
-
-            // Unthrottle server if queue size has dropped enough below threshold:
-            if ( batchQueue.size() < batchQueueThrottlingThreshold / 2
-                 || batchQueue.size() == 0  // (in case threshold < 2)
-                 ) {
-              if ( stopThrottlingIfSo() ) {
-                logger.debug( "[#{}] Throttling stopped at queue size {}.",
-                              instanceId, batchQueue.size() );
-              }
-            }
-            return qdb;
-          }
-        }
-      }
-    }
-
-    void close() {
-      logger.debug( "[#{}] Query listener closing.", instanceId );
-      closed = true;
-      if ( stopThrottlingIfSo() ) {
-        logger.debug( "[#{}] Throttling stopped at close() (at queue size {}).",
-                      instanceId, batchQueue.size() );
-      }
-      while (!batchQueue.isEmpty()) {
-        QueryDataBatch qdb = batchQueue.poll();
-        if (qdb != null && qdb.getData() != null) {
-          qdb.getData().release();
-        }
-      }
-      // Close may be called before the first result is received and therefore
-      // when the main thread is blocked waiting for the result.  In that case
-      // we want to unblock the main thread.
-      latch.countDown(); // TODO:  Why not call releaseIfFirst as used elsewhere?
-      completed = true;
-    }
-
-  }
-
 }


[17/27] drill git commit: DRILL-5167: Send escape character for metadata queries

Posted by jn...@apache.org.
DRILL-5167: Send escape character for metadata queries

Escape character was not sent when doing metadata queries, which caused
the server to return incorrect results as the pattern is interpreted
differently form what the user asked for.

close #712


Project: http://git-wip-us.apache.org/repos/asf/drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/drill/commit/c81f5888
Tree: http://git-wip-us.apache.org/repos/asf/drill/tree/c81f5888
Diff: http://git-wip-us.apache.org/repos/asf/drill/diff/c81f5888

Branch: refs/heads/master
Commit: c81f58884e2cef279a7d60ea611b6a422a2eb56e
Parents: d3238b1
Author: Laurent Goujon <la...@dremio.com>
Authored: Wed Dec 28 17:03:37 2016 -0800
Committer: Jinfeng Ni <jn...@apache.org>
Committed: Wed Mar 1 23:15:33 2017 -0800

----------------------------------------------------------------------
 .../client/src/clientlib/drillClientImpl.cpp    | 27 ++++++++++++--------
 1 file changed, 16 insertions(+), 11 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/drill/blob/c81f5888/contrib/native/client/src/clientlib/drillClientImpl.cpp
----------------------------------------------------------------------
diff --git a/contrib/native/client/src/clientlib/drillClientImpl.cpp b/contrib/native/client/src/clientlib/drillClientImpl.cpp
index 808595c..417fe80 100644
--- a/contrib/native/client/src/clientlib/drillClientImpl.cpp
+++ b/contrib/native/client/src/clientlib/drillClientImpl.cpp
@@ -661,12 +661,16 @@ DrillClientQueryResult* DrillClientImpl::ExecuteQuery(const PreparedStatement& p
     return sendMsg(factory, ::exec::user::RUN_QUERY, query);
 }
 
+static void updateLikeFilter(exec::user::LikeFilter& likeFilter, const std::string& pattern) {
+	likeFilter.set_pattern(pattern);
+	likeFilter.set_escape(meta::DrillMetadata::s_searchEscapeString);
+}
+
 DrillClientCatalogResult* DrillClientImpl::getCatalogs(const std::string& catalogPattern,
         Metadata::pfnCatalogMetadataListener listener,
         void* listenerCtx) {
     exec::user::GetCatalogsReq query;
-    exec::user::LikeFilter* catalogFilter(query.mutable_catalog_name_filter());
-    catalogFilter->set_pattern(catalogPattern);
+    updateLikeFilter(*query.mutable_catalog_name_filter(), catalogPattern);
 
     boost::function<DrillClientCatalogResult*(int32_t)> factory = boost::bind(
             boost::factory<DrillClientCatalogResult*>(),
@@ -682,8 +686,8 @@ DrillClientSchemaResult* DrillClientImpl::getSchemas(const std::string& catalogP
         Metadata::pfnSchemaMetadataListener listener,
         void* listenerCtx) {
     exec::user::GetSchemasReq query;
-    query.mutable_catalog_name_filter()->set_pattern(catalogPattern);
-    query.mutable_schema_name_filter()->set_pattern(schemaPattern);
+    updateLikeFilter(*query.mutable_catalog_name_filter(), catalogPattern);
+    updateLikeFilter(*query.mutable_schema_name_filter(), schemaPattern);
 
     boost::function<DrillClientSchemaResult*(int32_t)> factory = boost::bind(
             boost::factory<DrillClientSchemaResult*>(),
@@ -701,9 +705,10 @@ DrillClientTableResult* DrillClientImpl::getTables(const std::string& catalogPat
         Metadata::pfnTableMetadataListener listener,
         void* listenerCtx) {
     exec::user::GetTablesReq query;
-    query.mutable_catalog_name_filter()->set_pattern(catalogPattern);
-    query.mutable_schema_name_filter()->set_pattern(schemaPattern);
-    query.mutable_table_name_filter()->set_pattern(tablePattern);
+    updateLikeFilter(*query.mutable_catalog_name_filter(), catalogPattern);
+    updateLikeFilter(*query.mutable_schema_name_filter(), schemaPattern);
+    updateLikeFilter(*query.mutable_table_name_filter(), tablePattern);
+
     if (tableTypes) {
     	std::copy(tableTypes->begin(), tableTypes->end(),
     			google::protobuf::RepeatedFieldBackInserter(query.mutable_table_type_filter()));
@@ -725,10 +730,10 @@ DrillClientColumnResult* DrillClientImpl::getColumns(const std::string& catalogP
         Metadata::pfnColumnMetadataListener listener,
         void* listenerCtx) {
     exec::user::GetColumnsReq query;
-    query.mutable_catalog_name_filter()->set_pattern(catalogPattern);
-    query.mutable_schema_name_filter()->set_pattern(schemaPattern);
-    query.mutable_table_name_filter()->set_pattern(tablePattern);
-    query.mutable_column_name_filter()->set_pattern(columnsPattern);
+    updateLikeFilter(*query.mutable_catalog_name_filter(), catalogPattern);
+    updateLikeFilter(*query.mutable_schema_name_filter(), schemaPattern);
+    updateLikeFilter(*query.mutable_table_name_filter(), tablePattern);
+    updateLikeFilter(*query.mutable_column_name_filter(), columnsPattern);
 
     boost::function<DrillClientColumnResult*(int32_t)> factory = boost::bind(
             boost::factory<DrillClientColumnResult*>(),


[22/27] drill git commit: DRILL-4963: Fix issues with dynamically loaded overloaded functions

Posted by jn...@apache.org.
DRILL-4963: Fix issues with dynamically loaded overloaded functions

close #701


Project: http://git-wip-us.apache.org/repos/asf/drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/drill/commit/dcbcb94f
Tree: http://git-wip-us.apache.org/repos/asf/drill/tree/dcbcb94f
Diff: http://git-wip-us.apache.org/repos/asf/drill/diff/dcbcb94f

Branch: refs/heads/master
Commit: dcbcb94fd2695edd4bbca63b2759292e99695d47
Parents: 79811db
Author: Arina Ielchiieva <ar...@gmail.com>
Authored: Tue Dec 20 16:57:15 2016 +0000
Committer: Jinfeng Ni <jn...@apache.org>
Committed: Wed Mar 1 23:46:19 2017 -0800

----------------------------------------------------------------------
 .../hbase/config/HBasePersistentStore.java      |  16 +-
 .../drill/hbase/TestHBaseTableProvider.java     |   7 +-
 .../mongo/config/MongoPersistentStore.java      |  14 +-
 .../src/resources/drill-override-example.conf   |   3 +-
 .../drill/exec/coord/zk/ZookeeperClient.java    |  45 +++-
 .../exception/FunctionNotFoundException.java    |  27 ---
 .../expr/fn/FunctionImplementationRegistry.java | 219 ++++++++++++-------
 .../fn/registry/FunctionRegistryHolder.java     |  27 +--
 .../expr/fn/registry/LocalFunctionRegistry.java |  31 ++-
 .../fn/registry/RemoteFunctionRegistry.java     |  24 +-
 .../org/apache/drill/exec/ops/QueryContext.java |  11 +-
 .../exec/planner/sql/DrillOperatorTable.java    |  29 ++-
 .../drill/exec/planner/sql/DrillSqlWorker.java  |  70 +++---
 .../drill/exec/planner/sql/SqlConverter.java    |   9 -
 .../sql/handlers/CreateFunctionHandler.java     |   3 +-
 .../sql/handlers/DropFunctionHandler.java       |   3 +-
 .../exec/store/sys/BasePersistentStore.java     |   8 +-
 .../drill/exec/store/sys/PersistentStore.java   |  20 +-
 .../store/sys/store/LocalPersistentStore.java   | 188 ++++++++++------
 .../sys/store/ZookeeperPersistentStore.java     |  18 +-
 .../exec/testing/store/NoWriteLocalStore.java   |  33 ++-
 .../org/apache/drill/TestDynamicUDFSupport.java | 101 ++++++---
 .../exec/coord/zk/TestZookeeperClient.java      |  30 ++-
 .../fn/registry/FunctionRegistryHolderTest.java |  45 ++--
 .../record/ExpressionTreeMaterializerTest.java  |   7 +-
 .../jars/DrillUDF-overloading-1.0-sources.jar   | Bin 0 -> 3473 bytes
 .../resources/jars/DrillUDF-overloading-1.0.jar | Bin 0 -> 5779 bytes
 27 files changed, 639 insertions(+), 349 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/drill/blob/dcbcb94f/contrib/storage-hbase/src/main/java/org/apache/drill/exec/store/hbase/config/HBasePersistentStore.java
----------------------------------------------------------------------
diff --git a/contrib/storage-hbase/src/main/java/org/apache/drill/exec/store/hbase/config/HBasePersistentStore.java b/contrib/storage-hbase/src/main/java/org/apache/drill/exec/store/hbase/config/HBasePersistentStore.java
index 2d329a8..ef6bbfe 100644
--- a/contrib/storage-hbase/src/main/java/org/apache/drill/exec/store/hbase/config/HBasePersistentStore.java
+++ b/contrib/storage-hbase/src/main/java/org/apache/drill/exec/store/hbase/config/HBasePersistentStore.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -67,6 +67,20 @@ public class HBasePersistentStore<V> extends BasePersistentStore<V> {
   }
 
   @Override
+  public boolean contains(String key) {
+    try {
+      Get get = new Get(row(key));
+      get.addColumn(FAMILY, QUALIFIER);
+      return hbaseTable.exists(get);
+    } catch (IOException e) {
+      throw UserException
+          .dataReadError(e)
+          .message("Caught error while checking row existence '%s' for table '%s'", key, hbaseTableName)
+          .build(logger);
+    }
+  }
+
+  @Override
   public V get(String key) {
     return get(key, FAMILY);
   }

http://git-wip-us.apache.org/repos/asf/drill/blob/dcbcb94f/contrib/storage-hbase/src/test/java/org/apache/drill/hbase/TestHBaseTableProvider.java
----------------------------------------------------------------------
diff --git a/contrib/storage-hbase/src/test/java/org/apache/drill/hbase/TestHBaseTableProvider.java b/contrib/storage-hbase/src/test/java/org/apache/drill/hbase/TestHBaseTableProvider.java
index 6b73283..f278359 100644
--- a/contrib/storage-hbase/src/test/java/org/apache/drill/hbase/TestHBaseTableProvider.java
+++ b/contrib/storage-hbase/src/test/java/org/apache/drill/hbase/TestHBaseTableProvider.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -18,6 +18,8 @@
 package org.apache.drill.hbase;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
 
 import java.util.Map.Entry;
 
@@ -57,6 +59,9 @@ public class TestHBaseTableProvider extends BaseHBaseTest {
     assertEquals("v0", hbaseStore.get(""));
     assertEquals("testValue", hbaseStore.get(".test"));
 
+    assertTrue(hbaseStore.contains(""));
+    assertFalse(hbaseStore.contains("unknown_key"));
+
     int rowCount = 0;
     for (Entry<String, String> entry : Lists.newArrayList(hbaseStore.getAll())) {
       rowCount++;

http://git-wip-us.apache.org/repos/asf/drill/blob/dcbcb94f/contrib/storage-mongo/src/main/java/org/apache/drill/exec/store/mongo/config/MongoPersistentStore.java
----------------------------------------------------------------------
diff --git a/contrib/storage-mongo/src/main/java/org/apache/drill/exec/store/mongo/config/MongoPersistentStore.java b/contrib/storage-mongo/src/main/java/org/apache/drill/exec/store/mongo/config/MongoPersistentStore.java
index b5cc3ee..73ff31d 100644
--- a/contrib/storage-mongo/src/main/java/org/apache/drill/exec/store/mongo/config/MongoPersistentStore.java
+++ b/contrib/storage-mongo/src/main/java/org/apache/drill/exec/store/mongo/config/MongoPersistentStore.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -61,6 +61,18 @@ public class MongoPersistentStore<V> extends BasePersistentStore<V> {
   }
 
   @Override
+  public boolean contains(String key) {
+    try {
+      Bson query = Filters.eq(DrillMongoConstants.ID, key);
+      Document document = collection.find(query).first();
+      return document != null && document.containsKey(pKey);
+    } catch (Exception e) {
+      logger.error(e.getMessage(), e);
+      throw new DrillRuntimeException(e.getMessage(), e);
+    }
+  }
+
+  @Override
   public V get(String key) {
     try {
       Bson query = Filters.eq(DrillMongoConstants.ID, key);

http://git-wip-us.apache.org/repos/asf/drill/blob/dcbcb94f/distribution/src/resources/drill-override-example.conf
----------------------------------------------------------------------
diff --git a/distribution/src/resources/drill-override-example.conf b/distribution/src/resources/drill-override-example.conf
index 43f9942..b9d09a8 100644
--- a/distribution/src/resources/drill-override-example.conf
+++ b/distribution/src/resources/drill-override-example.conf
@@ -171,8 +171,7 @@ drill.exec: {
     decode_threadpool_size: 1
   },
   debug.error_on_leak: true,
-  # Settings for Dynamic UDFs.
-  # See https://gist.github.com/arina-ielchiieva/a1c4cfa3890145c5ecb1b70a39cbff55#file-dynamicudfssupport-md.
+  # Settings for Dynamic UDFs (see https://issues.apache.org/jira/browse/DRILL-4726 for details).
   udf: {
     # number of retry attempts to update remote function registry
     # if registry version was changed during update

http://git-wip-us.apache.org/repos/asf/drill/blob/dcbcb94f/exec/java-exec/src/main/java/org/apache/drill/exec/coord/zk/ZookeeperClient.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/coord/zk/ZookeeperClient.java b/exec/java-exec/src/main/java/org/apache/drill/exec/coord/zk/ZookeeperClient.java
index 610a2b9..17cb6cb 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/coord/zk/ZookeeperClient.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/coord/zk/ZookeeperClient.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -85,32 +85,53 @@ public class ZookeeperClient implements AutoCloseable {
 
   /**
    * Returns true if path exists in the cache, false otherwise.
-   *
    * Note that calls to this method are eventually consistent.
    *
-   * @param path  path to check
+   * @param path path to check
+   * @return true if path exists, false otherwise
    */
   public boolean hasPath(final String path) {
-    return hasPath(path, false);
+    return hasPath(path, false, null);
+  }
+
+  /**
+   * Returns true if path exists, false otherwise.
+   * If consistent flag is set to true, check is done directly is made against Zookeeper directly,
+   * else check is done against local cache.
+   *
+   * @param path path to check
+   * @param consistent whether the check should be consistent
+   * @return true if path exists, false otherwise
+   */
+  public boolean hasPath(final String path, final boolean consistent) {
+    return hasPath(path, consistent, null);
   }
 
   /**
    * Checks if the given path exists.
+   * If the flag consistent is set, the check is consistent as it is made against Zookeeper directly.
+   * Otherwise, the check is eventually consistent.
    *
-   * If the flag consistent is set, the check is consistent as it is made against Zookeeper directly. Otherwise,
-   * the check is eventually consistent.
+   * If consistency flag is set to true and version holder is not null, passes version holder to get data change version.
+   * Data change version is retrieved from {@link Stat} object, it increases each time znode data change is performed.
+   * Link to Zookeeper documentation - https://zookeeper.apache.org/doc/r3.2.2/zookeeperProgrammers.html#sc_zkDataModel_znodes
    *
-   * @param path  path to check
-   * @param consistent  whether the check should be consistent
-   * @return
+   * @param path path to check
+   * @param consistent whether the check should be consistent
+   * @param version version holder
+   * @return true if path exists, false otherwise
    */
-  public boolean hasPath(final String path, final boolean consistent) {
+  public boolean hasPath(final String path, final boolean consistent, final DataChangeVersion version) {
     Preconditions.checkNotNull(path, "path is required");
 
     final String target = PathUtils.join(root, path);
     try {
       if (consistent) {
-        return curator.checkExists().forPath(target) != null;
+        Stat stat = curator.checkExists().forPath(target);
+        if (version != null && stat != null) {
+          version.setVersion(stat.getVersion());
+        }
+        return stat != null;
       } else {
         return getCache().getCurrentData(target) != null;
       }
@@ -153,7 +174,7 @@ public class ZookeeperClient implements AutoCloseable {
    * @param path  target path
    * @param version version holder
    */
-  public byte[] get(final String path, DataChangeVersion version) {
+  public byte[] get(final String path, final DataChangeVersion version) {
     return get(path, true, version);
   }
 

http://git-wip-us.apache.org/repos/asf/drill/blob/dcbcb94f/exec/java-exec/src/main/java/org/apache/drill/exec/exception/FunctionNotFoundException.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/exception/FunctionNotFoundException.java b/exec/java-exec/src/main/java/org/apache/drill/exec/exception/FunctionNotFoundException.java
deleted file mode 100644
index 0d59cc8..0000000
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/exception/FunctionNotFoundException.java
+++ /dev/null
@@ -1,27 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p/>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p/>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.drill.exec.exception;
-
-import org.apache.drill.common.exceptions.DrillRuntimeException;
-
-public class FunctionNotFoundException extends DrillRuntimeException {
-
-  public FunctionNotFoundException(String message, Throwable cause) {
-    super(message, cause);
-  }
-}

http://git-wip-us.apache.org/repos/asf/drill/blob/dcbcb94f/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/FunctionImplementationRegistry.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/FunctionImplementationRegistry.java b/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/FunctionImplementationRegistry.java
index ce0d68b..5c7bfb4 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/FunctionImplementationRegistry.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/FunctionImplementationRegistry.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -58,11 +58,13 @@ import org.apache.drill.exec.expr.fn.registry.RemoteFunctionRegistry;
 import org.apache.drill.exec.planner.sql.DrillOperatorTable;
 import org.apache.drill.exec.proto.UserBitShared.Jar;
 import org.apache.drill.exec.resolver.FunctionResolver;
+import org.apache.drill.exec.resolver.FunctionResolverFactory;
 import org.apache.drill.exec.server.options.OptionManager;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Stopwatch;
 import com.google.common.collect.Lists;
+import org.apache.drill.exec.store.sys.store.DataChangeVersion;
 import org.apache.drill.exec.util.JarUtil;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -128,7 +130,7 @@ public class FunctionImplementationRegistry implements FunctionLookupContext, Au
 
   /**
    * Register functions in given operator table.
-   * @param operatorTable
+   * @param operatorTable operator table
    */
   public void register(DrillOperatorTable operatorTable) {
     // Register Drill functions first and move to pluggable function registries.
@@ -140,27 +142,39 @@ public class FunctionImplementationRegistry implements FunctionLookupContext, Au
   }
 
   /**
-   * Using the given <code>functionResolver</code>
-   * finds Drill function implementation for given <code>functionCall</code>.
-   * If function implementation was not found,
-   * loads all missing remote functions and tries to find Drill implementation one more time.
+   * First attempts to finds the Drill function implementation that matches the name, arg types and return type.
+   * If exact function implementation was not found,
+   * syncs local function registry with remote function registry if needed
+   * and tries to find function implementation one more time
+   * but this time using given <code>functionResolver</code>.
+   *
+   * @param functionResolver function resolver
+   * @param functionCall function call
+   * @return best matching function holder
    */
   @Override
   public DrillFuncHolder findDrillFunction(FunctionResolver functionResolver, FunctionCall functionCall) {
-    return findDrillFunction(functionResolver, functionCall, true);
-  }
-
-  private DrillFuncHolder findDrillFunction(FunctionResolver functionResolver, FunctionCall functionCall, boolean retry) {
     AtomicLong version = new AtomicLong();
-    DrillFuncHolder holder = functionResolver.getBestMatch(
-        localFunctionRegistry.getMethods(functionReplacement(functionCall), version), functionCall);
-    if (holder == null && retry && loadRemoteFunctions(version.get())) {
-      return findDrillFunction(functionResolver, functionCall, false);
+    String newFunctionName = functionReplacement(functionCall);
+    List<DrillFuncHolder> functions = localFunctionRegistry.getMethods(newFunctionName, version);
+    FunctionResolver exactResolver = FunctionResolverFactory.getExactResolver(functionCall);
+    DrillFuncHolder holder = exactResolver.getBestMatch(functions, functionCall);
+
+    if (holder == null) {
+      syncWithRemoteRegistry(version.get());
+      List<DrillFuncHolder> updatedFunctions = localFunctionRegistry.getMethods(newFunctionName, version);
+      holder = functionResolver.getBestMatch(updatedFunctions, functionCall);
     }
+
     return holder;
   }
 
-  // Check if this Function Replacement is needed; if yes, return a new name. otherwise, return the original name
+  /**
+   * Checks if this function replacement is needed.
+   *
+   * @param functionCall function call
+   * @return new function name is replacement took place, otherwise original function name
+   */
   private String functionReplacement(FunctionCall functionCall) {
     String funcName = functionCall.getName();
       if (functionCall.args.size() > 0) {
@@ -178,22 +192,41 @@ public class FunctionImplementationRegistry implements FunctionLookupContext, Au
   }
 
   /**
-   * Find the Drill function implementation that matches the name, arg types and return type.
-   * If exact function implementation was not found,
-   * loads all missing remote functions and tries to find Drill implementation one more time.
+   * Finds the Drill function implementation that matches the name, arg types and return type.
+   *
+   * @param name function name
+   * @param argTypes input parameters types
+   * @param returnType function return type
+   * @return exactly matching function holder
    */
   public DrillFuncHolder findExactMatchingDrillFunction(String name, List<MajorType> argTypes, MajorType returnType) {
     return findExactMatchingDrillFunction(name, argTypes, returnType, true);
   }
 
-  private DrillFuncHolder findExactMatchingDrillFunction(String name, List<MajorType> argTypes, MajorType returnType, boolean retry) {
+  /**
+   * Finds the Drill function implementation that matches the name, arg types and return type.
+   * If exact function implementation was not found,
+   * checks if local function registry is in sync with remote function registry.
+   * If not syncs them and tries to find exact function implementation one more time
+   * but with retry flag set to false.
+   *
+   * @param name function name
+   * @param argTypes input parameters types
+   * @param returnType function return type
+   * @param retry retry on failure flag
+   * @return exactly matching function holder
+   */
+  private DrillFuncHolder findExactMatchingDrillFunction(String name,
+                                                         List<MajorType> argTypes,
+                                                         MajorType returnType,
+                                                         boolean retry) {
     AtomicLong version = new AtomicLong();
     for (DrillFuncHolder h : localFunctionRegistry.getMethods(name, version)) {
       if (h.matches(returnType, argTypes)) {
         return h;
       }
     }
-    if (retry && loadRemoteFunctions(version.get())) {
+    if (retry && syncWithRemoteRegistry(version.get())) {
       return findExactMatchingDrillFunction(name, argTypes, returnType, false);
     }
     return null;
@@ -206,8 +239,8 @@ public class FunctionImplementationRegistry implements FunctionLookupContext, Au
    * Note: Order of searching is same as order of {@link org.apache.drill.exec.expr.fn.PluggableFunctionRegistry}
    * implementations found on classpath.
    *
-   * @param functionCall
-   * @return
+   * @param functionCall function call
+   * @return drill function holder
    */
   @Override
   public AbstractFuncHolder findNonDrillFunction(FunctionCall functionCall) {
@@ -260,76 +293,101 @@ public class FunctionImplementationRegistry implements FunctionLookupContext, Au
   }
 
   /**
-   * Attempts to load and register functions from remote function registry.
-   * First checks if there is no missing jars.
-   * If yes, enters synchronized block to prevent other loading the same jars.
-   * Again re-checks if there are no missing jars in case someone has already loaded them (double-check lock).
-   * If there are still missing jars, first copies jars to local udf area and prepares {@link JarScan} for each jar.
-   * Jar registration timestamp represented in milliseconds is used as suffix.
-   * Then registers all jars at the same time. Returns true when finished.
-   * In case if any errors during jars coping or registration, logs errors and proceeds.
+   * Purpose of this method is to synchronize remote and local function registries if needed
+   * and to inform if function registry was changed after given version.
+   *
+   * To make synchronization as much light-weigh as possible, first only versions of both registries are checked
+   * without any locking. If synchronization is needed, enters synchronized block to prevent others loading the same jars.
+   * The need of synchronization is checked again (double-check lock) before comparing jars.
+   * If any missing jars are found, they are downloaded to local udf area, each is wrapped into {@link JarScan}.
+   * Once jar download is finished, all missing jars are registered in one batch.
+   * In case if any errors during jars download / registration, these errors are logged.
    *
-   * If no missing jars are found, checks current local registry version.
-   * Returns false if versions match, true otherwise.
+   * During registration local function registry is updated with remote function registry version it is synced with.
+   * When at least one jar of the missing jars failed to download / register,
+   * local function registry version are not updated but jars that where successfully downloaded / registered
+   * are added to local function registry.
    *
-   * @param version local function registry version
-   * @return true if new jars were registered or local function registry version is different, false otherwise
+   * If synchronization between remote and local function registry was not needed,
+   * checks if given registry version matches latest sync version
+   * to inform if function registry was changed after given version.
+   *
+   * @param version remote function registry local function registry was based on
+   * @return true if remote and local function registries were synchronized after given version
    */
-  public boolean loadRemoteFunctions(long version) {
-    List<String> missingJars = getMissingJars(remoteFunctionRegistry, localFunctionRegistry);
-    if (!missingJars.isEmpty()) {
+  public boolean syncWithRemoteRegistry(long version) {
+    if (isRegistrySyncNeeded(remoteFunctionRegistry.getRegistryVersion(), localFunctionRegistry.getVersion())) {
       synchronized (this) {
-        missingJars = getMissingJars(remoteFunctionRegistry, localFunctionRegistry);
-        if (!missingJars.isEmpty()) {
-          logger.info("Starting dynamic UDFs lazy-init process.\n" +
-              "The following jars are going to be downloaded and registered locally: " + missingJars);
+        long localRegistryVersion = localFunctionRegistry.getVersion();
+        if (isRegistrySyncNeeded(remoteFunctionRegistry.getRegistryVersion(), localRegistryVersion))  {
+          DataChangeVersion remoteVersion = new DataChangeVersion();
+          List<String> missingJars = getMissingJars(this.remoteFunctionRegistry, localFunctionRegistry, remoteVersion);
           List<JarScan> jars = Lists.newArrayList();
-          for (String jarName : missingJars) {
-            Path binary = null;
-            Path source = null;
-            URLClassLoader classLoader = null;
-            try {
-              binary = copyJarToLocal(jarName, remoteFunctionRegistry);
-              source = copyJarToLocal(JarUtil.getSourceName(jarName), remoteFunctionRegistry);
-              URL[] urls = {binary.toUri().toURL(), source.toUri().toURL()};
-              classLoader = new URLClassLoader(urls);
-              ScanResult scanResult = scan(classLoader, binary, urls);
-              localFunctionRegistry.validate(jarName, scanResult);
-              jars.add(new JarScan(jarName, scanResult, classLoader));
-            } catch (Exception e) {
-              deleteQuietlyLocalJar(binary);
-              deleteQuietlyLocalJar(source);
-              if (classLoader != null) {
-                try {
-                  classLoader.close();
-                } catch (Exception ex) {
-                  logger.warn("Problem during closing class loader for {}", jarName, e);
+          if (!missingJars.isEmpty()) {
+            logger.info("Starting dynamic UDFs lazy-init process.\n" +
+                "The following jars are going to be downloaded and registered locally: " + missingJars);
+            for (String jarName : missingJars) {
+              Path binary = null;
+              Path source = null;
+              URLClassLoader classLoader = null;
+              try {
+                binary = copyJarToLocal(jarName, this.remoteFunctionRegistry);
+                source = copyJarToLocal(JarUtil.getSourceName(jarName), this.remoteFunctionRegistry);
+                URL[] urls = {binary.toUri().toURL(), source.toUri().toURL()};
+                classLoader = new URLClassLoader(urls);
+                ScanResult scanResult = scan(classLoader, binary, urls);
+                localFunctionRegistry.validate(jarName, scanResult);
+                jars.add(new JarScan(jarName, scanResult, classLoader));
+              } catch (Exception e) {
+                deleteQuietlyLocalJar(binary);
+                deleteQuietlyLocalJar(source);
+                if (classLoader != null) {
+                  try {
+                    classLoader.close();
+                  } catch (Exception ex) {
+                    logger.warn("Problem during closing class loader for {}", jarName, e);
+                  }
                 }
+                logger.error("Problem during remote functions load from {}", jarName, e);
               }
-              logger.error("Problem during remote functions load from {}", jarName, e);
             }
           }
-          if (!jars.isEmpty()) {
-            localFunctionRegistry.register(jars);
-            return true;
-          }
+          long latestRegistryVersion = jars.size() != missingJars.size() ?
+              localRegistryVersion : remoteVersion.getVersion();
+          localFunctionRegistry.register(jars, latestRegistryVersion);
+          return true;
         }
       }
     }
+
     return version != localFunctionRegistry.getVersion();
   }
 
   /**
-   * First finds path to marker file url, otherwise throws {@link JarValidationException}.
-   * Then scans jar classes according to list indicated in marker files.
-   * Additional logic is added to close {@link URL} after {@link ConfigFactory#parseURL(URL)}.
-   * This is extremely important for Windows users where system doesn't allow to delete file if it's being used.
+   * Checks if local function registry should be synchronized with remote function registry.
+   * If remote function registry version is -1, it means that remote function registry is unreachable
+   * or is not configured thus we skip synchronization and return false.
+   * In all other cases synchronization is needed if remote and local function registries versions do not match.
    *
-   * @param classLoader unique class loader for jar
-   * @param path local path to jar
-   * @param urls urls associated with the jar (ex: binary and source)
-   * @return scan result of packages, classes, annotations found in jar
+   * @param remoteVersion remote function registry version
+   * @param localVersion local function registry version
+   * @return true is local registry should be refreshed, false otherwise
    */
+  private boolean isRegistrySyncNeeded(long remoteVersion, long localVersion) {
+    return remoteVersion != -1 && remoteVersion != localVersion;
+  }
+
+  /**
+  * First finds path to marker file url, otherwise throws {@link JarValidationException}.
+  * Then scans jar classes according to list indicated in marker files.
+  * Additional logic is added to close {@link URL} after {@link ConfigFactory#parseURL(URL)}.
+  * This is extremely important for Windows users where system doesn't allow to delete file if it's being used.
+  *
+  * @param classLoader unique class loader for jar
+  * @param path local path to jar
+  * @param urls urls associated with the jar (ex: binary and source)
+  * @return scan result of packages, classes, annotations found in jar
+  */
   private ScanResult scan(ClassLoader classLoader, Path path, URL[] urls) throws IOException {
     Enumeration<URL> markerFileEnumeration = classLoader.getResources(
         CommonConstants.DRILL_JAR_MARKER_FILE_RESOURCE_PATHNAME);
@@ -355,14 +413,17 @@ public class FunctionImplementationRegistry implements FunctionLookupContext, Au
   /**
    * Return list of jars that are missing in local function registry
    * but present in remote function registry.
+   * Also updates version holder with remote function registry version.
    *
    * @param remoteFunctionRegistry remote function registry
    * @param localFunctionRegistry local function registry
+   * @param version holder for remote function registry version
    * @return list of missing jars
    */
   private List<String> getMissingJars(RemoteFunctionRegistry remoteFunctionRegistry,
-                                      LocalFunctionRegistry localFunctionRegistry) {
-    List<Jar> remoteJars = remoteFunctionRegistry.getRegistry().getJarList();
+                                      LocalFunctionRegistry localFunctionRegistry,
+                                      DataChangeVersion version) {
+    List<Jar> remoteJars = remoteFunctionRegistry.getRegistry(version).getJarList();
     List<String> localJars = localFunctionRegistry.getAllJarNames();
     List<String> missingJars = Lists.newArrayList();
     for (Jar jar : remoteJars) {
@@ -384,8 +445,10 @@ public class FunctionImplementationRegistry implements FunctionLookupContext, Au
   private Path getLocalUdfDir(DrillConfig config) {
     tmpDir = getTmpDir(config);
     File udfDir = new File(tmpDir, config.getString(ExecConstants.UDF_DIRECTORY_LOCAL));
-    udfDir.mkdirs();
     String udfPath = udfDir.getPath();
+    if (udfDir.mkdirs()) {
+      logger.debug("Local udf directory [{}] was created", udfPath);
+    }
     Preconditions.checkState(udfDir.exists(), "Local udf directory [%s] must exist", udfPath);
     Preconditions.checkState(udfDir.isDirectory(), "Local udf directory [%s] must be a directory", udfPath);
     Preconditions.checkState(udfDir.canWrite(), "Local udf directory [%s] must be writable for application user", udfPath);
@@ -404,6 +467,8 @@ public class FunctionImplementationRegistry implements FunctionLookupContext, Au
    * If value is still missing, generates directory using {@link Files#createTempDir()}.
    * If temporary directory was generated, sets {@link #deleteTmpDir} to true
    * to delete directory on drillbit exit.
+   *
+   * @param config drill config
    * @return drill temporary directory path
    */
   private File getTmpDir(DrillConfig config) {

http://git-wip-us.apache.org/repos/asf/drill/blob/dcbcb94f/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/registry/FunctionRegistryHolder.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/registry/FunctionRegistryHolder.java b/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/registry/FunctionRegistryHolder.java
index 005c4e5..3124539 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/registry/FunctionRegistryHolder.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/registry/FunctionRegistryHolder.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -42,8 +42,7 @@ import java.util.concurrent.locks.ReentrantReadWriteLock;
  * since we expect infrequent registry changes.
  * Holder is designed to allow concurrent reads and single writes to keep data consistent.
  * This is achieved by {@link ReadWriteLock} implementation usage.
- * Holder has number version which changes every time new jars are added or removed. Initial version number is 0.
- * Also version is used when user needs data from registry with version it is based on.
+ * Holder has number version which indicates remote function registry version number it is in sync with.
  *
  * Structure example:
  *
@@ -86,7 +85,8 @@ public class FunctionRegistryHolder {
   private final ReadWriteLock readWriteLock = new ReentrantReadWriteLock();
   private final AutoCloseableLock readLock = new AutoCloseableLock(readWriteLock.readLock());
   private final AutoCloseableLock writeLock = new AutoCloseableLock(readWriteLock.writeLock());
-  private long version = 0;
+  // remote function registry number, it is in sync with
+  private long version;
 
   // jar name, Map<function name, Queue<function signature>
   private final Map<String, Map<String, Queue<String>>> jars;
@@ -114,13 +114,13 @@ public class FunctionRegistryHolder {
    * If jar with the same name already exists, it and its functions will be removed.
    * Then jar will be added to {@link #jars}
    * and each function will be added using {@link #addFunctions(Map, List)}.
-   * Function version registry will be incremented by 1 if at least one jar was added but not for each jar.
+   * Registry version is updated with passed version if all jars were added successfully.
    * This is write operation, so one user at a time can call perform such action,
    * others will wait till first user completes his action.
    *
    * @param newJars jars and list of their function holders, each contains function name, signature and holder
    */
-  public void addJars(Map<String, List<FunctionHolder>> newJars) {
+  public void addJars(Map<String, List<FunctionHolder>> newJars, long version) {
     try (AutoCloseableLock lock = writeLock.open()) {
       for (Map.Entry<String, List<FunctionHolder>> newJar : newJars.entrySet()) {
         String jarName = newJar.getKey();
@@ -129,15 +129,12 @@ public class FunctionRegistryHolder {
         jars.put(jarName, jar);
         addFunctions(jar, newJar.getValue());
       }
-      if (!newJars.isEmpty()) {
-        version++;
-      }
+      this.version = version;
     }
   }
 
   /**
    * Removes jar from {@link #jars} and all associated with jar functions from {@link #functions}
-   * If jar was removed, function registry version will be incremented by 1.
    * This is write operation, so one user at a time can call perform such action,
    * others will wait till first user completes his action.
    *
@@ -145,9 +142,7 @@ public class FunctionRegistryHolder {
    */
   public void removeJar(String jarName) {
     try (AutoCloseableLock lock = writeLock.open()) {
-      if (removeAllByJar(jarName)) {
-        version++;
-      }
+      removeAllByJar(jarName);
     }
   }
 
@@ -341,12 +336,11 @@ public class FunctionRegistryHolder {
    * All jar functions have the same class loader, so we need to close only one time.
    *
    * @param jarName jar name to be removed
-   * @return true if jar was removed, false otherwise
    */
-  private boolean removeAllByJar(String jarName) {
+  private void removeAllByJar(String jarName) {
     Map<String, Queue<String>> jar = jars.remove(jarName);
     if (jar == null) {
-      return false;
+      return;
     }
 
     for (Map.Entry<String, Queue<String>> functionEntry : jar.entrySet()) {
@@ -372,6 +366,5 @@ public class FunctionRegistryHolder {
         functions.remove(function);
       }
     }
-    return true;
   }
 }

http://git-wip-us.apache.org/repos/asf/drill/blob/dcbcb94f/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/registry/LocalFunctionRegistry.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/registry/LocalFunctionRegistry.java b/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/registry/LocalFunctionRegistry.java
index 2a3f167..1318f72 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/registry/LocalFunctionRegistry.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/registry/LocalFunctionRegistry.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -73,12 +73,14 @@ public class LocalFunctionRegistry {
 
   private final FunctionRegistryHolder registryHolder;
 
-  /** Registers all functions present in Drill classpath on start-up. All functions will be marked as built-in.
-   * Built-in functions are not allowed to be unregistered. */
+  /**
+   * Registers all functions present in Drill classpath on start-up. All functions will be marked as built-in.
+   * Built-in functions are not allowed to be unregistered. Initially sync registry version will be set to 0.
+   */
   public LocalFunctionRegistry(ScanResult classpathScan) {
     registryHolder = new FunctionRegistryHolder();
     validate(BUILT_IN, classpathScan);
-    register(Lists.newArrayList(new JarScan(BUILT_IN, classpathScan, this.getClass().getClassLoader())));
+    register(Lists.newArrayList(new JarScan(BUILT_IN, classpathScan, this.getClass().getClassLoader())), 0);
     if (logger.isTraceEnabled()) {
       StringBuilder allFunctions = new StringBuilder();
       for (DrillFuncHolder method: registryHolder.getAllFunctionsWithHolders().values()) {
@@ -89,7 +91,7 @@ public class LocalFunctionRegistry {
   }
 
   /**
-   * @return local function registry version number
+   * @return remote function registry version number with which local function registry is synced
    */
   public long getVersion() {
     return registryHolder.getVersion();
@@ -147,14 +149,15 @@ public class LocalFunctionRegistry {
   }
 
   /**
-   * Registers all functions present in jar.
+   * Registers all functions present in jar and updates registry version.
    * If jar name is already registered, all jar related functions will be overridden.
    * To prevent classpath collisions during loading and unloading jars,
    * each jar is shipped with its own class loader.
    *
    * @param jars list of jars to be registered
+   * @param version remote function registry version number with which local function registry is synced
    */
-  public void register(List<JarScan> jars) {
+  public void register(List<JarScan> jars, long version) {
     Map<String, List<FunctionHolder>> newJars = Maps.newHashMap();
     for (JarScan jarScan : jars) {
       FunctionConverter converter = new FunctionConverter();
@@ -174,7 +177,7 @@ public class LocalFunctionRegistry {
         }
       }
     }
-    registryHolder.addJars(newJars);
+    registryHolder.addJars(newJars, version);
   }
 
   /**
@@ -217,25 +220,31 @@ public class LocalFunctionRegistry {
     return registryHolder.getHoldersByFunctionName(name.toLowerCase(), version);
   }
 
+  /**
+   * @param name function name
+   * @return all function holders associated with the function name. Function name is case insensitive.
+   */
   public List<DrillFuncHolder> getMethods(String name) {
     return registryHolder.getHoldersByFunctionName(name.toLowerCase());
   }
 
   /**
    * Registers all functions present in {@link DrillOperatorTable},
-   * also sets local registry version used at the moment of registering.
+   * also sets sync registry version used at the moment of function registration.
    *
    * @param operatorTable drill operator table
    */
   public void register(DrillOperatorTable operatorTable) {
     AtomicLong versionHolder = new AtomicLong();
-    final Map<String, Collection<DrillFuncHolder>> registeredFunctions = registryHolder.getAllFunctionsWithHolders(versionHolder).asMap();
+    final Map<String, Collection<DrillFuncHolder>> registeredFunctions =
+        registryHolder.getAllFunctionsWithHolders(versionHolder).asMap();
     operatorTable.setFunctionRegistryVersion(versionHolder.get());
     registerOperatorsWithInference(operatorTable, registeredFunctions);
     registerOperatorsWithoutInference(operatorTable, registeredFunctions);
   }
 
-  private void registerOperatorsWithInference(DrillOperatorTable operatorTable, Map<String, Collection<DrillFuncHolder>> registeredFunctions) {
+  private void registerOperatorsWithInference(DrillOperatorTable operatorTable, Map<String,
+      Collection<DrillFuncHolder>> registeredFunctions) {
     final Map<String, DrillSqlOperator.DrillSqlOperatorBuilder> map = Maps.newHashMap();
     final Map<String, DrillSqlAggOperator.DrillSqlAggOperatorBuilder> mapAgg = Maps.newHashMap();
     for (Entry<String, Collection<DrillFuncHolder>> function : registeredFunctions.entrySet()) {

http://git-wip-us.apache.org/repos/asf/drill/blob/dcbcb94f/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/registry/RemoteFunctionRegistry.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/registry/RemoteFunctionRegistry.java b/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/registry/RemoteFunctionRegistry.java
index fe79583..2e5eda2 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/registry/RemoteFunctionRegistry.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/registry/RemoteFunctionRegistry.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -106,8 +106,26 @@ public class RemoteFunctionRegistry implements AutoCloseable {
     this.retryAttempts = config.getInt(ExecConstants.UDF_RETRY_ATTEMPTS);
   }
 
-  public Registry getRegistry() {
-    return registry.get(registry_path, null);
+  /**
+   * Returns current remote function registry version.
+   * If remote function registry is not found or unreachable, logs error and returns -1.
+   *
+   * @return remote function registry version if any, -1 otherwise
+   */
+  public long getRegistryVersion() {
+    DataChangeVersion version = new DataChangeVersion();
+    boolean contains = false;
+    try {
+      contains = registry.contains(registry_path, version);
+    } catch (Exception e) {
+      logger.error("Problem during trying to access remote function registry [{}]", registry_path, e);
+    }
+    if (contains) {
+      return version.getVersion();
+    } else {
+      logger.error("Remote function registry [{}] is unreachable", registry_path);
+      return -1;
+    }
   }
 
   public Registry getRegistry(DataChangeVersion version) {

http://git-wip-us.apache.org/repos/asf/drill/blob/dcbcb94f/exec/java-exec/src/main/java/org/apache/drill/exec/ops/QueryContext.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/ops/QueryContext.java b/exec/java-exec/src/main/java/org/apache/drill/exec/ops/QueryContext.java
index 264af29..707815a 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/ops/QueryContext.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/ops/QueryContext.java
@@ -66,7 +66,6 @@ public class QueryContext implements AutoCloseable, OptimizerRulesContext, Schem
   private final UserSession session;
   private final OptionManager queryOptions;
   private final PlannerSettings plannerSettings;
-  private final DrillOperatorTable table;
   private final ExecutionControls executionControls;
 
   private final BufferAllocator allocator;
@@ -83,6 +82,7 @@ public class QueryContext implements AutoCloseable, OptimizerRulesContext, Schem
    * time this is set to true and the close method becomes a no-op.
    */
   private boolean closed = false;
+  private DrillOperatorTable table;
 
   public QueryContext(final UserSession session, final DrillbitContext drillbitContext, QueryId queryId) {
     this.drillbitContext = drillbitContext;
@@ -229,6 +229,15 @@ public class QueryContext implements AutoCloseable, OptimizerRulesContext, Schem
     return table;
   }
 
+  /**
+   * Re-creates drill operator table to refresh functions list from local function registry.
+   */
+  public void reloadDrillOperatorTable() {
+    table = new DrillOperatorTable(
+        drillbitContext.getFunctionImplementationRegistry(),
+        drillbitContext.getOptionManager());
+  }
+
   public QueryContextInformation getQueryContextInfo() {
     return queryContextInfo;
   }

http://git-wip-us.apache.org/repos/asf/drill/blob/dcbcb94f/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/DrillOperatorTable.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/DrillOperatorTable.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/DrillOperatorTable.java
index 6e5c72b..5102ae8 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/DrillOperatorTable.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/DrillOperatorTable.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -36,7 +36,7 @@ import org.apache.drill.exec.server.options.OptionManager;
 
 import java.util.List;
 import java.util.Map;
-import java.util.concurrent.atomic.AtomicLong;
+
 
 /**
  * Implementation of {@link SqlOperatorTable} that contains standard operators and functions provided through
@@ -52,8 +52,8 @@ public class DrillOperatorTable extends SqlStdOperatorTable {
 
   private final ArrayListMultimap<String, SqlOperator> drillOperatorsWithoutInferenceMap = ArrayListMultimap.create();
   private final ArrayListMultimap<String, SqlOperator> drillOperatorsWithInferenceMap = ArrayListMultimap.create();
-  // indicates local function registry version based on which drill operator were loaded
-  // is used to define if we need to reload operator table in case when function signature was not found
+  // indicates remote function registry version based on which drill operator were loaded
+  // is used to define if we need to reload operator table in case remote function registry version has changed
   private long functionRegistryVersion;
 
   private final OptionManager systemOptionManager;
@@ -65,19 +65,18 @@ public class DrillOperatorTable extends SqlStdOperatorTable {
     this.systemOptionManager = systemOptionManager;
   }
 
-  /** Cleans up all operator holders and reloads operators */
-  public void reloadOperators(FunctionImplementationRegistry registry) {
-    drillOperatorsWithoutInference.clear();
-    drillOperatorsWithInference.clear();
-    drillOperatorsWithoutInferenceMap.clear();
-    drillOperatorsWithInferenceMap.clear();
-    registry.register(this);
-  }
-
-  public long setFunctionRegistryVersion(long version) {
-    return functionRegistryVersion = version;
+  /**
+   * Set function registry version based on which operator table was loaded.
+   *
+   * @param version registry version
+   */
+  public void setFunctionRegistryVersion(long version) {
+    functionRegistryVersion = version;
   }
 
+  /**
+   * @return function registry version based on which operator table was loaded
+   */
   public long getFunctionRegistryVersion() {
     return functionRegistryVersion;
   }

http://git-wip-us.apache.org/repos/asf/drill/blob/dcbcb94f/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/DrillSqlWorker.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/DrillSqlWorker.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/DrillSqlWorker.java
index 0ad3944..3bc0922 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/DrillSqlWorker.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/DrillSqlWorker.java
@@ -24,10 +24,7 @@ import org.apache.calcite.sql.parser.SqlParseException;
 import org.apache.calcite.tools.RelConversionException;
 import org.apache.calcite.tools.ValidationException;
 import org.apache.drill.common.exceptions.UserException;
-import org.apache.drill.exec.exception.FunctionNotFoundException;
-import org.apache.drill.exec.expr.fn.FunctionImplementationRegistry;
 import org.apache.drill.exec.ops.QueryContext;
-import org.apache.drill.exec.ops.UdfUtilities;
 import org.apache.drill.exec.physical.PhysicalPlan;
 import org.apache.drill.exec.planner.sql.handlers.AbstractSqlHandler;
 import org.apache.drill.exec.planner.sql.handlers.DefaultSqlHandler;
@@ -50,13 +47,56 @@ public class DrillSqlWorker {
   private DrillSqlWorker() {
   }
 
+  /**
+   * Converts sql query string into query physical plan.
+   *
+   * @param context query context
+   * @param sql sql query
+   * @return query physical plan
+   */
   public static PhysicalPlan getPlan(QueryContext context, String sql) throws SqlParseException, ValidationException,
       ForemanSetupException {
     return getPlan(context, sql, null);
   }
 
+  /**
+   * Converts sql query string into query physical plan.
+   * In case of any errors (that might occur due to missing function implementation),
+   * checks if local function registry should be synchronized with remote function registry.
+   * If sync took place, reloads drill operator table
+   * (since functions were added to / removed from local function registry)
+   * and attempts to converts sql query string into query physical plan one more time.
+   *
+   * @param context query context
+   * @param sql sql query
+   * @param textPlan text plan
+   * @return query physical plan
+   */
   public static PhysicalPlan getPlan(QueryContext context, String sql, Pointer<String> textPlan)
       throws ForemanSetupException {
+    Pointer<String> textPlanCopy = textPlan == null ? null : new Pointer<>(textPlan.value);
+    try {
+      return getQueryPlan(context, sql, textPlan);
+    } catch (Exception e) {
+      if (context.getFunctionRegistry().syncWithRemoteRegistry(
+          context.getDrillOperatorTable().getFunctionRegistryVersion())) {
+        context.reloadDrillOperatorTable();
+        return getQueryPlan(context, sql, textPlanCopy);
+      }
+      throw e;
+    }
+  }
+
+  /**
+   * Converts sql query string into query physical plan.
+   *
+   * @param context query context
+   * @param sql sql query
+   * @param textPlan text plan
+   * @return query physical plan
+   */
+  private static PhysicalPlan getQueryPlan(QueryContext context, String sql, Pointer<String> textPlan)
+      throws ForemanSetupException {
 
     final SqlConverter parser = new SqlConverter(context);
 
@@ -88,7 +128,7 @@ public class DrillSqlWorker {
     }
 
     try {
-      return getPhysicalPlan(handler, sqlNode, context);
+      return handler.getPlan(sqlNode);
     } catch(ValidationException e) {
       String errorMessage = e.getCause() != null ? e.getCause().getMessage() : e.getMessage();
       throw UserException.validationError(e)
@@ -104,26 +144,4 @@ public class DrillSqlWorker {
       throw new QueryInputException("Failure handling SQL.", e);
     }
   }
-
-  /**
-   * Returns query physical plan.
-   * In case of {@link FunctionNotFoundException} attempts to load remote functions.
-   * If at least one function was loaded or local function function registry version has changed,
-   * makes one more attempt to get query physical plan.
-   */
-  private static PhysicalPlan getPhysicalPlan(AbstractSqlHandler handler, SqlNode sqlNode, QueryContext context)
-      throws RelConversionException, IOException, ForemanSetupException, ValidationException {
-    try {
-      return handler.getPlan(sqlNode);
-    } catch (FunctionNotFoundException e) {
-      DrillOperatorTable drillOperatorTable = context.getDrillOperatorTable();
-      FunctionImplementationRegistry functionRegistry = context.getFunctionRegistry();
-      if (functionRegistry.loadRemoteFunctions(drillOperatorTable.getFunctionRegistryVersion())) {
-        drillOperatorTable.reloadOperators(functionRegistry);
-        return handler.getPlan(sqlNode);
-      }
-      throw e;
-    }
-  }
-
 }

http://git-wip-us.apache.org/repos/asf/drill/blob/dcbcb94f/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/SqlConverter.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/SqlConverter.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/SqlConverter.java
index e9085f7..845848c 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/SqlConverter.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/SqlConverter.java
@@ -49,17 +49,13 @@ import org.apache.calcite.sql.type.SqlTypeName;
 import org.apache.calcite.sql.util.ChainedSqlOperatorTable;
 import org.apache.calcite.sql.validate.SqlConformance;
 import org.apache.calcite.sql.validate.SqlValidatorCatalogReader;
-import org.apache.calcite.sql.validate.SqlValidatorException;
 import org.apache.calcite.sql.validate.SqlValidatorImpl;
 import org.apache.calcite.sql.validate.SqlValidatorScope;
 import org.apache.calcite.sql2rel.RelDecorrelator;
 import org.apache.calcite.sql2rel.SqlToRelConverter;
-import org.apache.commons.lang3.StringUtils;
-import org.apache.commons.lang3.exception.ExceptionUtils;
 import org.apache.drill.common.config.DrillConfig;
 import org.apache.drill.common.exceptions.UserException;
 import org.apache.drill.exec.ExecConstants;
-import org.apache.drill.exec.exception.FunctionNotFoundException;
 import org.apache.drill.exec.expr.fn.FunctionImplementationRegistry;
 import org.apache.drill.exec.ops.QueryContext;
 import org.apache.drill.exec.ops.UdfUtilities;
@@ -173,11 +169,6 @@ public class SqlConverter {
       SqlNode validatedNode = validator.validate(parsedNode);
       return validatedNode;
     } catch (RuntimeException e) {
-      final Throwable rootCause = ExceptionUtils.getRootCause(e);
-      if (rootCause instanceof SqlValidatorException
-          && StringUtils.contains(rootCause.getMessage(), "No match found for function signature")) {
-        throw new FunctionNotFoundException(rootCause.getMessage(), e);
-      }
       UserException.Builder builder = UserException
           .validationError(e)
           .addContext("SQL Query", sql);

http://git-wip-us.apache.org/repos/asf/drill/blob/dcbcb94f/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/handlers/CreateFunctionHandler.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/handlers/CreateFunctionHandler.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/handlers/CreateFunctionHandler.java
index 48bfd8b..0902fb7 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/handlers/CreateFunctionHandler.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/handlers/CreateFunctionHandler.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -186,6 +186,7 @@ public class CreateFunctionHandler extends DefaultSqlHandler {
           remoteRegistry.updateRegistry(updatedRegistry, version);
           return;
         } catch (VersionMismatchException ex) {
+          logger.debug("Failed to update function registry during registration, version mismatch was detected.", ex);
           retryAttempts--;
         }
       }

http://git-wip-us.apache.org/repos/asf/drill/blob/dcbcb94f/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/handlers/DropFunctionHandler.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/handlers/DropFunctionHandler.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/handlers/DropFunctionHandler.java
index 6e2801a..b5d0b23 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/handlers/DropFunctionHandler.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/handlers/DropFunctionHandler.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -142,6 +142,7 @@ public class DropFunctionHandler extends DefaultSqlHandler {
         remoteFunctionRegistry.updateRegistry(updatedRegistry, version);
         return jarToBeDeleted;
       } catch (VersionMismatchException ex) {
+        logger.debug("Failed to update function registry during unregistration, version mismatch was detected.", ex);
         retryAttempts--;
       }
     }

http://git-wip-us.apache.org/repos/asf/drill/blob/dcbcb94f/exec/java-exec/src/main/java/org/apache/drill/exec/store/sys/BasePersistentStore.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/sys/BasePersistentStore.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/sys/BasePersistentStore.java
index ea38278..0640407 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/sys/BasePersistentStore.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/sys/BasePersistentStore.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -29,6 +29,12 @@ public abstract class BasePersistentStore<V> implements PersistentStore<V> {
     return getRange(0, Integer.MAX_VALUE);
   }
 
+  /** By default contains with version will behave the same way as without version.
+   * Override this method to add version support. */
+  public boolean contains(String key, DataChangeVersion version) {
+    return contains(key);
+  }
+
   /** By default get with version will behave the same way as without version.
    * Override this method to add version support. */
   @Override

http://git-wip-us.apache.org/repos/asf/drill/blob/dcbcb94f/exec/java-exec/src/main/java/org/apache/drill/exec/store/sys/PersistentStore.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/sys/PersistentStore.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/sys/PersistentStore.java
index bb23752..206642a 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/sys/PersistentStore.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/sys/PersistentStore.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -34,6 +34,24 @@ public interface PersistentStore<V> extends AutoCloseable {
   PersistentStoreMode getMode();
 
   /**
+   * Checks if lookup key is present in store.
+   *
+   * @param key lookup key
+   * @return true if store contains lookup key, false otherwise
+   */
+  boolean contains(String key);
+
+  /**
+   * Checks if lookup key is present in store.
+   * Sets data change version number.
+   *
+   * @param key lookup key
+   * @param version version holder
+   * @return true if store contains lookup key, false otherwise
+   */
+  boolean contains(String key, DataChangeVersion version);
+
+  /**
    * Returns the value for the given key if exists, null otherwise.
    * @param key  lookup key
    */

http://git-wip-us.apache.org/repos/asf/drill/blob/dcbcb94f/exec/java-exec/src/main/java/org/apache/drill/exec/store/sys/store/LocalPersistentStore.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/sys/store/LocalPersistentStore.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/sys/store/LocalPersistentStore.java
index b9a4b59..ef855e2 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/sys/store/LocalPersistentStore.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/sys/store/LocalPersistentStore.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -28,12 +28,16 @@ import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
+import java.util.concurrent.locks.ReadWriteLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
 
 import javax.annotation.Nullable;
 
 import org.apache.commons.io.IOUtils;
 import org.apache.drill.common.collections.ImmutableEntry;
+import org.apache.drill.common.concurrent.AutoCloseableLock;
 import org.apache.drill.common.config.DrillConfig;
+import org.apache.drill.exec.exception.VersionMismatchException;
 import org.apache.drill.exec.store.dfs.DrillFileSystem;
 import org.apache.drill.exec.store.sys.BasePersistentStore;
 import org.apache.drill.exec.store.sys.PersistentStoreConfig;
@@ -47,13 +51,20 @@ import com.google.common.base.Function;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Iterables;
 import com.google.common.collect.Lists;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public class LocalPersistentStore<V> extends BasePersistentStore<V> {
-//  private static final Logger logger = LoggerFactory.getLogger(LocalPersistentStore.class);
+  private static final Logger logger = LoggerFactory.getLogger(LocalPersistentStore.class);
+
+  private final ReadWriteLock readWriteLock = new ReentrantReadWriteLock();
+  private final AutoCloseableLock readLock = new AutoCloseableLock(readWriteLock.readLock());
+  private final AutoCloseableLock writeLock = new AutoCloseableLock(readWriteLock.writeLock());
 
   private final Path basePath;
   private final PersistentStoreConfig<V> config;
   private final DrillFileSystem fs;
+  private int version = -1;
 
   public LocalPersistentStore(DrillFileSystem fs, Path base, PersistentStoreConfig<V> config) {
     super();
@@ -62,7 +73,9 @@ public class LocalPersistentStore<V> extends BasePersistentStore<V> {
     this.fs = fs;
 
     try {
-      mkdirs(basePath);
+      if (!fs.mkdirs(basePath)) {
+        version++;
+      }
     } catch (IOException e) {
       throw new RuntimeException("Failure setting pstore configuration path.");
     }
@@ -73,11 +86,7 @@ public class LocalPersistentStore<V> extends BasePersistentStore<V> {
     return PersistentStoreMode.PERSISTENT;
   }
 
-  private void mkdirs(Path path) throws IOException{
-    fs.mkdirs(path);
-  }
-
-  public static Path getLogDir(){
+  public static Path getLogDir() {
     String drillLogDir = System.getenv("DRILL_LOG_DIR");
     if (drillLogDir == null) {
       drillLogDir = "/var/log/drill";
@@ -85,10 +94,10 @@ public class LocalPersistentStore<V> extends BasePersistentStore<V> {
     return new Path(new File(drillLogDir).getAbsoluteFile().toURI());
   }
 
-  public static DrillFileSystem getFileSystem(DrillConfig config, Path root) throws IOException{
+  public static DrillFileSystem getFileSystem(DrillConfig config, Path root) throws IOException {
     Path blobRoot = root == null ? getLogDir() : root;
     Configuration fsConf = new Configuration();
-    if(blobRoot.toUri().getScheme() != null){
+    if (blobRoot.toUri().getScheme() != null) {
       fsConf.set(FileSystem.FS_DEFAULT_NAME_KEY, blobRoot.toUri().toString());
     }
 
@@ -100,93 +109,142 @@ public class LocalPersistentStore<V> extends BasePersistentStore<V> {
 
   @Override
   public Iterator<Map.Entry<String, V>> getRange(int skip, int take) {
-    try{
-      List<FileStatus> f = fs.list(false, basePath);
-      if (f == null || f.isEmpty()) {
-        return Collections.emptyIterator();
-      }
-      List<String> files = Lists.newArrayList();
-
-      for (FileStatus stat : f) {
-        String s = stat.getPath().getName();
-        if (s.endsWith(DRILL_SYS_FILE_SUFFIX)) {
-          files.add(s.substring(0, s.length() - DRILL_SYS_FILE_SUFFIX.length()));
+    try (AutoCloseableLock lock = readLock.open()) {
+      try {
+        List<FileStatus> f = fs.list(false, basePath);
+        if (f == null || f.isEmpty()) {
+          return Collections.emptyIterator();
         }
-      }
+        List<String> files = Lists.newArrayList();
 
-      Collections.sort(files);
-      return Iterables.transform(Iterables.limit(Iterables.skip(files, skip), take), new Function<String, Entry<String, V>>() {
-        @Nullable
-        @Override
-        public Entry<String, V> apply(String key) {
-          return new ImmutableEntry<>(key, get(key));
+        for (FileStatus stat : f) {
+          String s = stat.getPath().getName();
+          if (s.endsWith(DRILL_SYS_FILE_SUFFIX)) {
+            files.add(s.substring(0, s.length() - DRILL_SYS_FILE_SUFFIX.length()));
+          }
         }
-      }).iterator();
-    }catch(IOException e){
-      throw new RuntimeException(e);
+
+        Collections.sort(files);
+        return Iterables.transform(Iterables.limit(Iterables.skip(files, skip), take), new Function<String, Entry<String, V>>() {
+          @Nullable
+          @Override
+          public Entry<String, V> apply(String key) {
+            return new ImmutableEntry<>(key, get(key));
+          }
+        }).iterator();
+      } catch (IOException e) {
+        throw new RuntimeException(e);
+      }
     }
   }
 
   private Path makePath(String name) {
     Preconditions.checkArgument(
         !name.contains("/") &&
-        !name.contains(":") &&
-        !name.contains(".."));
+            !name.contains(":") &&
+            !name.contains(".."));
+    return new Path(basePath, name + DRILL_SYS_FILE_SUFFIX);
+  }
 
-    final Path path = new Path(basePath, name + DRILL_SYS_FILE_SUFFIX);
-    // do this to check file name.
-    return path;
+  @Override
+  public boolean contains(String key) {
+    return contains(key, null);
   }
 
   @Override
-  public V get(String key) {
-    try{
-      Path path = makePath(key);
-      if(!fs.exists(path)){
-        return null;
+  public boolean contains(String key, DataChangeVersion dataChangeVersion) {
+    try (AutoCloseableLock lock = readLock.open()) {
+      try {
+        Path path = makePath(key);
+        boolean exists = fs.exists(path);
+        if (exists && dataChangeVersion != null) {
+          dataChangeVersion.setVersion(version);
+        }
+        return exists;
+      } catch (IOException e) {
+        throw new RuntimeException(e);
       }
-    }catch(IOException e){
-      throw new RuntimeException(e);
     }
+  }
 
-    final Path path = makePath(key);
-    try (InputStream is = fs.open(path)) {
-      return config.getSerializer().deserialize(IOUtils.toByteArray(is));
-    } catch (IOException e) {
-      throw new RuntimeException("Unable to deserialize \"" + path + "\"", e);
+  @Override
+  public V get(String key) {
+    return get(key, null);
+  }
+
+  @Override
+  public V get(String key, DataChangeVersion dataChangeVersion) {
+    try (AutoCloseableLock lock = readLock.open()) {
+      try {
+        if (dataChangeVersion != null) {
+          dataChangeVersion.setVersion(version);
+        }
+        Path path = makePath(key);
+        if (!fs.exists(path)) {
+          return null;
+        }
+      } catch (IOException e) {
+        throw new RuntimeException(e);
+      }
+      final Path path = makePath(key);
+      try (InputStream is = fs.open(path)) {
+        return config.getSerializer().deserialize(IOUtils.toByteArray(is));
+      } catch (IOException e) {
+        throw new RuntimeException("Unable to deserialize \"" + path + "\"", e);
+      }
     }
   }
 
   @Override
   public void put(String key, V value) {
-    try (OutputStream os = fs.create(makePath(key))) {
-      IOUtils.write(config.getSerializer().serialize(value), os);
-    } catch (IOException e) {
-      throw new RuntimeException(e);
+    put(key, value, null);
+  }
+
+  @Override
+  public void put(String key, V value, DataChangeVersion dataChangeVersion) {
+    try (AutoCloseableLock lock = writeLock.open()) {
+      if (dataChangeVersion != null && dataChangeVersion.getVersion() != version) {
+        throw new VersionMismatchException("Version mismatch detected", dataChangeVersion.getVersion());
+      }
+      try (OutputStream os = fs.create(makePath(key))) {
+        IOUtils.write(config.getSerializer().serialize(value), os);
+        version++;
+      } catch (IOException e) {
+        throw new RuntimeException(e);
+      }
     }
   }
 
   @Override
   public boolean putIfAbsent(String key, V value) {
-    try {
-      Path p = makePath(key);
-      if (fs.exists(p)) {
-        return false;
-      } else {
-        put(key, value);
-        return true;
+    try (AutoCloseableLock lock = writeLock.open()) {
+      try {
+        Path p = makePath(key);
+        if (fs.exists(p)) {
+          return false;
+        } else {
+          try (OutputStream os = fs.create(makePath(key))) {
+            IOUtils.write(config.getSerializer().serialize(value), os);
+            version++;
+          }
+          return true;
+        }
+      } catch (IOException e) {
+        throw new RuntimeException(e);
       }
-    } catch (IOException e) {
-      throw new RuntimeException(e);
     }
   }
 
   @Override
   public void delete(String key) {
-    try {
-      fs.delete(makePath(key), false);
-    } catch (IOException e) {
-      throw new RuntimeException(e);
+    try (AutoCloseableLock lock = writeLock.open()) {
+      try {
+        fs.delete(makePath(key), false);
+        version++;
+      } catch (IOException e) {
+        logger.error("Unable to delete data from storage.", e);
+        throw new RuntimeException(e);
+      }
     }
   }
 

http://git-wip-us.apache.org/repos/asf/drill/blob/dcbcb94f/exec/java-exec/src/main/java/org/apache/drill/exec/store/sys/store/ZookeeperPersistentStore.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/sys/store/ZookeeperPersistentStore.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/sys/store/ZookeeperPersistentStore.java
index 55f72c9..a3ee58e 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/sys/store/ZookeeperPersistentStore.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/sys/store/ZookeeperPersistentStore.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -62,16 +62,26 @@ public class ZookeeperPersistentStore<V> extends BasePersistentStore<V> {
   }
 
   @Override
+  public boolean contains(final String key) {
+    return contains(key, null);
+  }
+
+  @Override
+  public boolean contains(final String key, final DataChangeVersion version) {
+    return client.hasPath(key, true, version);
+  }
+
+  @Override
   public V get(final String key) {
     return get(key, false, null);
   }
 
   @Override
-  public V get(final String key, DataChangeVersion version) {
+  public V get(final String key, final DataChangeVersion version) {
     return get(key, true, version);
   }
 
-  public V get(final String key, boolean consistencyFlag, DataChangeVersion version) {
+  public V get(final String key, final boolean consistencyFlag, final DataChangeVersion version) {
     byte[] bytes = client.get(key, consistencyFlag, version);
 
     if (bytes == null) {
@@ -90,7 +100,7 @@ public class ZookeeperPersistentStore<V> extends BasePersistentStore<V> {
   }
 
   @Override
-  public void put(final String key, final V value, DataChangeVersion version) {
+  public void put(final String key, final V value, final DataChangeVersion version) {
     final InstanceSerializer<V> serializer = config.getSerializer();
     try {
       final byte[] bytes = serializer.serialize(value);

http://git-wip-us.apache.org/repos/asf/drill/blob/dcbcb94f/exec/java-exec/src/main/java/org/apache/drill/exec/testing/store/NoWriteLocalStore.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/testing/store/NoWriteLocalStore.java b/exec/java-exec/src/main/java/org/apache/drill/exec/testing/store/NoWriteLocalStore.java
index 58ec3ea..e36dc83 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/testing/store/NoWriteLocalStore.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/testing/store/NoWriteLocalStore.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -20,8 +20,6 @@ package org.apache.drill.exec.testing.store;
 import java.util.Iterator;
 import java.util.Map;
 import java.util.concurrent.ConcurrentMap;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.atomic.AtomicLong;
 import java.util.concurrent.locks.ReadWriteLock;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 
@@ -38,13 +36,13 @@ public class NoWriteLocalStore<V> extends BasePersistentStore<V> {
   private final AutoCloseableLock readLock = new AutoCloseableLock(readWriteLock.readLock());
   private final AutoCloseableLock writeLock = new AutoCloseableLock(readWriteLock.writeLock());
   private final ConcurrentMap<String, V> store = Maps.newConcurrentMap();
-  private final AtomicInteger version = new AtomicInteger();
+  private int version = -1;
 
   @Override
   public void delete(final String key) {
     try (AutoCloseableLock lock = writeLock.open()) {
       store.remove(key);
-      version.incrementAndGet();
+      version++;
     }
   }
 
@@ -54,6 +52,21 @@ public class NoWriteLocalStore<V> extends BasePersistentStore<V> {
   }
 
   @Override
+  public boolean contains(final String key) {
+    return contains(key, null);
+  }
+
+  @Override
+  public boolean contains(final String key, final DataChangeVersion dataChangeVersion) {
+    try (AutoCloseableLock lock = readLock.open()) {
+      if (dataChangeVersion != null) {
+        dataChangeVersion.setVersion(version);
+      }
+      return store.containsKey(key);
+    }
+  }
+
+  @Override
   public V get(final String key) {
     return get(key, null);
   }
@@ -62,7 +75,7 @@ public class NoWriteLocalStore<V> extends BasePersistentStore<V> {
   public V get(final String key, final DataChangeVersion dataChangeVersion) {
     try (AutoCloseableLock lock = readLock.open()) {
       if (dataChangeVersion != null) {
-        dataChangeVersion.setVersion(version.get());
+        dataChangeVersion.setVersion(version);
       }
       return store.get(key);
     }
@@ -76,11 +89,11 @@ public class NoWriteLocalStore<V> extends BasePersistentStore<V> {
   @Override
   public void put(final String key, final V value, final DataChangeVersion dataChangeVersion) {
     try (AutoCloseableLock lock = writeLock.open()) {
-      if (dataChangeVersion != null && dataChangeVersion.getVersion() != version.get()) {
+      if (dataChangeVersion != null && dataChangeVersion.getVersion() != version) {
         throw new VersionMismatchException("Version mismatch detected", dataChangeVersion.getVersion());
       }
       store.put(key, value);
-      version.incrementAndGet();
+      version++;
     }
   }
 
@@ -89,7 +102,7 @@ public class NoWriteLocalStore<V> extends BasePersistentStore<V> {
     try (AutoCloseableLock lock = writeLock.open()) {
       final V old = store.putIfAbsent(key, value);
       if (old == null) {
-        version.incrementAndGet();
+        version++;
         return true;
       }
       return false;
@@ -107,7 +120,7 @@ public class NoWriteLocalStore<V> extends BasePersistentStore<V> {
   public void close() throws Exception {
     try (AutoCloseableLock lock = writeLock.open()) {
       store.clear();
-      version.set(0);
+      version = -1;
     }
   }
 }


[15/27] drill git commit: DRILL-5301: Server metadata API

Posted by jn...@apache.org.
DRILL-5301: Server metadata API

Add a Server metadata API to the User protocol, to query server support
of various SQL features.

Add support to the client (DrillClient) to query this information.

Add support to the JDBC driver to query this information, if the server supports
the new API, or fallback to the previous behaviour (rely on Avatica defaults) otherwise.

close #764


Project: http://git-wip-us.apache.org/repos/asf/drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/drill/commit/d2e0f415
Tree: http://git-wip-us.apache.org/repos/asf/drill/tree/d2e0f415
Diff: http://git-wip-us.apache.org/repos/asf/drill/diff/d2e0f415

Branch: refs/heads/master
Commit: d2e0f415c9cbaf609805708586832b6771883f53
Parents: 17f888d
Author: Laurent Goujon <la...@dremio.com>
Authored: Fri Feb 24 15:41:07 2017 -0800
Committer: Jinfeng Ni <jn...@apache.org>
Committed: Wed Mar 1 23:15:32 2017 -0800

----------------------------------------------------------------------
 .../apache/drill/exec/client/DrillClient.java   |    15 +
 .../apache/drill/exec/client/ServerMethod.java  |     9 +-
 .../exec/planner/sql/DrillParserConfig.java     |    65 +
 .../drill/exec/planner/sql/SqlConverter.java    |    53 +-
 .../drill/exec/resolver/TypeCastRules.java      |    67 +-
 .../apache/drill/exec/rpc/user/UserClient.java  |     3 +
 .../drill/exec/rpc/user/UserRpcConfig.java      |     5 +-
 .../drill/exec/rpc/user/UserRpcUtils.java       |    28 +-
 .../apache/drill/exec/rpc/user/UserServer.java  |    16 +-
 .../exec/rpc/user/UserServerRequestHandler.java |    20 +-
 .../exec/work/metadata/MetadataProvider.java    |     2 +-
 .../exec/work/metadata/ServerMetaProvider.java  |   168 +
 .../apache/drill/exec/work/user/UserWorker.java |     7 +
 .../work/metadata/TestServerMetaProvider.java   |    48 +
 .../drill/jdbc/DrillConnectionConfig.java       |     5 +-
 .../jdbc/impl/DrillDatabaseMetaDataImpl.java    |   456 +-
 .../drill/jdbc/impl/DrillJdbc41Factory.java     |     4 +-
 .../apache/drill/jdbc/impl/DrillMetaImpl.java   |     8 +-
 .../common/expression/fn/CastFunctions.java     |     3 +-
 .../drill/exec/proto/SchemaUserProtos.java      |   800 +
 .../org/apache/drill/exec/proto/UserProtos.java | 47067 ++++++++++-------
 .../drill/exec/proto/beans/CollateSupport.java  |    49 +
 .../drill/exec/proto/beans/ConvertSupport.java  |   199 +
 .../proto/beans/CorrelationNamesSupport.java    |    51 +
 .../proto/beans/DateTimeLiteralsSupport.java    |    79 +
 .../exec/proto/beans/GetServerMetaResp.java     |   211 +
 .../drill/exec/proto/beans/GroupBySupport.java  |    53 +
 .../exec/proto/beans/IdentifierCasing.java      |    55 +
 .../drill/exec/proto/beans/NullCollation.java   |    55 +
 .../drill/exec/proto/beans/OrderBySupport.java  |    51 +
 .../exec/proto/beans/OuterJoinSupport.java      |    61 +
 .../apache/drill/exec/proto/beans/RpcType.java  |     8 +-
 .../drill/exec/proto/beans/ServerMeta.java      |  1319 +
 .../drill/exec/proto/beans/SubQuerySupport.java |    57 +
 .../drill/exec/proto/beans/UnionSupport.java    |    51 +
 protocol/src/main/protobuf/User.proto           |   219 +-
 36 files changed, 33484 insertions(+), 17883 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/drill/blob/d2e0f415/exec/java-exec/src/main/java/org/apache/drill/exec/client/DrillClient.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/client/DrillClient.java b/exec/java-exec/src/main/java/org/apache/drill/exec/client/DrillClient.java
index 0b5bf30..0ff6a5b 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/client/DrillClient.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/client/DrillClient.java
@@ -64,6 +64,8 @@ import org.apache.drill.exec.proto.UserProtos.GetColumnsResp;
 import org.apache.drill.exec.proto.UserProtos.GetQueryPlanFragments;
 import org.apache.drill.exec.proto.UserProtos.GetSchemasReq;
 import org.apache.drill.exec.proto.UserProtos.GetSchemasResp;
+import org.apache.drill.exec.proto.UserProtos.GetServerMetaReq;
+import org.apache.drill.exec.proto.UserProtos.GetServerMetaResp;
 import org.apache.drill.exec.proto.UserProtos.GetTablesReq;
 import org.apache.drill.exec.proto.UserProtos.GetTablesResp;
 import org.apache.drill.exec.proto.UserProtos.LikeFilter;
@@ -509,6 +511,19 @@ public class DrillClient implements Closeable, ConnectionThrottle {
   public Version getServerVersion() {
     return (client != null && client.getServerInfos() != null) ? UserRpcUtils.getVersion(client.getServerInfos()) : null;
   }
+
+  /**
+   * Get server meta information
+   *
+   * Get meta information about the server like the the available functions
+   * or the identifier quoting string used by the current session
+   *
+   * @return a future to the server meta response
+   */
+  public DrillRpcFuture<GetServerMetaResp> getServerMeta() {
+    return client.send(RpcType.GET_SERVER_META, GetServerMetaReq.getDefaultInstance(), GetServerMetaResp.class);
+  }
+
   /**
    * Returns the list of methods supported by the server based on its advertised information.
    *

http://git-wip-us.apache.org/repos/asf/drill/blob/d2e0f415/exec/java-exec/src/main/java/org/apache/drill/exec/client/ServerMethod.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/client/ServerMethod.java b/exec/java-exec/src/main/java/org/apache/drill/exec/client/ServerMethod.java
index 5c6640d..5896df2 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/client/ServerMethod.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/client/ServerMethod.java
@@ -80,12 +80,17 @@ public enum ServerMethod {
   /**
    * Get columns metadata
    */
-  GET_COLUMNS(RpcType.GET_COLUMNS, Constants.DRILL_1_8_0);
+  GET_COLUMNS(RpcType.GET_COLUMNS, Constants.DRILL_1_8_0),
+
+  /**
+   * Get server metadata
+   */
+  SERVER_META(RpcType.SERVER_META, Constants.DRILL_1_10_0);
 
   private static class Constants {
     private static final Version DRILL_0_0_0 = new Version("0.0.0", 0, 0, 0, 0, "");
-
     private static final Version DRILL_1_8_0 = new Version("1.8.0", 1, 8, 0, 0, "");
+    private static final Version DRILL_1_10_0 = new Version("1.10.0", 1, 10, 0, 0, "");
   }
 
   private static final Map<RpcType, ServerMethod> REVERSE_MAPPING;

http://git-wip-us.apache.org/repos/asf/drill/blob/d2e0f415/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/DrillParserConfig.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/DrillParserConfig.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/DrillParserConfig.java
new file mode 100644
index 0000000..7e7b140
--- /dev/null
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/DrillParserConfig.java
@@ -0,0 +1,65 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.planner.sql;
+
+import org.apache.calcite.avatica.util.Casing;
+import org.apache.calcite.avatica.util.Quoting;
+import org.apache.calcite.sql.parser.SqlParser;
+import org.apache.calcite.sql.parser.SqlParserImplFactory;
+import org.apache.drill.exec.planner.physical.PlannerSettings;
+import org.apache.drill.exec.planner.sql.parser.impl.DrillParserWithCompoundIdConverter;
+
+public class DrillParserConfig implements SqlParser.Config {
+
+  private final long identifierMaxLength;
+
+  public DrillParserConfig(PlannerSettings settings) {
+    identifierMaxLength = settings.getIdentifierMaxLength();
+  }
+
+  @Override
+  public int identifierMaxLength() {
+    return (int) identifierMaxLength;
+  }
+
+  @Override
+  public Casing quotedCasing() {
+    return Casing.UNCHANGED;
+  }
+
+  @Override
+  public Casing unquotedCasing() {
+    return Casing.UNCHANGED;
+  }
+
+  @Override
+  public Quoting quoting() {
+    return Quoting.BACK_TICK;
+  }
+
+  @Override
+  public boolean caseSensitive() {
+    return false;
+  }
+
+  @Override
+  public SqlParserImplFactory parserFactory() {
+    return DrillParserWithCompoundIdConverter.FACTORY;
+  }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/drill/blob/d2e0f415/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/SqlConverter.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/SqlConverter.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/SqlConverter.java
index 3e3226d..e9085f7 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/SqlConverter.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/SqlConverter.java
@@ -21,11 +21,7 @@ import java.util.Arrays;
 import java.util.List;
 import java.util.Set;
 
-import com.google.common.collect.Lists;
-import com.google.common.collect.Sets;
 import org.apache.calcite.adapter.java.JavaTypeFactory;
-import org.apache.calcite.avatica.util.Casing;
-import org.apache.calcite.avatica.util.Quoting;
 import org.apache.calcite.jdbc.CalciteSchema;
 import org.apache.calcite.jdbc.CalciteSchemaImpl;
 import org.apache.calcite.jdbc.JavaTypeFactoryImpl;
@@ -48,7 +44,6 @@ import org.apache.calcite.sql.SqlNode;
 import org.apache.calcite.sql.SqlOperatorTable;
 import org.apache.calcite.sql.parser.SqlParseException;
 import org.apache.calcite.sql.parser.SqlParser;
-import org.apache.calcite.sql.parser.SqlParserImplFactory;
 import org.apache.calcite.sql.parser.SqlParserPos;
 import org.apache.calcite.sql.type.SqlTypeName;
 import org.apache.calcite.sql.util.ChainedSqlOperatorTable;
@@ -63,8 +58,8 @@ import org.apache.commons.lang3.StringUtils;
 import org.apache.commons.lang3.exception.ExceptionUtils;
 import org.apache.drill.common.config.DrillConfig;
 import org.apache.drill.common.exceptions.UserException;
-import org.apache.drill.exec.exception.FunctionNotFoundException;
 import org.apache.drill.exec.ExecConstants;
+import org.apache.drill.exec.exception.FunctionNotFoundException;
 import org.apache.drill.exec.expr.fn.FunctionImplementationRegistry;
 import org.apache.drill.exec.ops.QueryContext;
 import org.apache.drill.exec.ops.UdfUtilities;
@@ -72,10 +67,11 @@ import org.apache.drill.exec.planner.cost.DrillCostBase;
 import org.apache.drill.exec.planner.logical.DrillConstExecutor;
 import org.apache.drill.exec.planner.physical.DrillDistributionTraitDef;
 import org.apache.drill.exec.planner.physical.PlannerSettings;
-import org.apache.drill.exec.planner.sql.parser.impl.DrillParserWithCompoundIdConverter;
+import org.apache.drill.exec.rpc.user.UserSession;
 
 import com.google.common.base.Joiner;
-import org.apache.drill.exec.rpc.user.UserSession;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Sets;
 
 /**
  * Class responsible for managing parsing, validation and toRel conversion for sql statements.
@@ -109,9 +105,9 @@ public class SqlConverter {
 
   public SqlConverter(QueryContext context) {
     this.settings = context.getPlannerSettings();
-    this.util = (UdfUtilities) context;
+    this.util = context;
     this.functions = context.getFunctionRegistry();
-    this.parserConfig = new ParserConfig();
+    this.parserConfig = new DrillParserConfig(settings);
     this.sqlToRelConverterConfig = new SqlToRelConverterConfig();
     this.isInnerQuery = false;
     this.typeFactory = new JavaTypeFactoryImpl(DRILL_TYPE_SYSTEM);
@@ -290,6 +286,7 @@ public class SqlConverter {
     public Expander() {
     }
 
+    @Override
     public RelNode expandView(RelDataType rowType, String queryString, List<String> schemaPath) {
       final DrillCalciteCatalogReader catalogReader = new DrillCalciteCatalogReader(
           CalciteSchemaImpl.from(rootSchema),
@@ -341,42 +338,6 @@ public class SqlConverter {
 
   }
 
-  private class ParserConfig implements SqlParser.Config {
-
-    final long identifierMaxLength = settings.getIdentifierMaxLength();
-
-    @Override
-    public int identifierMaxLength() {
-      return (int) identifierMaxLength;
-    }
-
-    @Override
-    public Casing quotedCasing() {
-      return Casing.UNCHANGED;
-    }
-
-    @Override
-    public Casing unquotedCasing() {
-      return Casing.UNCHANGED;
-    }
-
-    @Override
-    public Quoting quoting() {
-      return Quoting.BACK_TICK;
-    }
-
-    @Override
-    public boolean caseSensitive() {
-      return false;
-    }
-
-    @Override
-    public SqlParserImplFactory parserFactory() {
-      return DrillParserWithCompoundIdConverter.FACTORY;
-    }
-
-  }
-
   private class SqlToRelConverterConfig implements SqlToRelConverter.Config {
 
     final int inSubqueryThreshold = (int)settings.getInSubqueryThreshold();

http://git-wip-us.apache.org/repos/asf/drill/blob/d2e0f415/exec/java-exec/src/main/java/org/apache/drill/exec/resolver/TypeCastRules.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/resolver/TypeCastRules.java b/exec/java-exec/src/main/java/org/apache/drill/exec/resolver/TypeCastRules.java
index 8bb6c2a..78a4509 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/resolver/TypeCastRules.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/resolver/TypeCastRules.java
@@ -24,10 +24,8 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 
-import com.google.common.collect.Lists;
-import com.google.common.collect.Sets;
-import org.apache.drill.common.expression.MajorTypeInLogicalExpression;
 import org.apache.drill.common.expression.LogicalExpression;
+import org.apache.drill.common.expression.MajorTypeInLogicalExpression;
 import org.apache.drill.common.types.TypeProtos.DataMode;
 import org.apache.drill.common.types.TypeProtos.MajorType;
 import org.apache.drill.common.types.TypeProtos.MinorType;
@@ -35,6 +33,9 @@ import org.apache.drill.exec.expr.annotations.FunctionTemplate.NullHandling;
 import org.apache.drill.exec.expr.fn.DrillFuncHolder;
 import org.apache.drill.exec.util.DecimalUtility;
 
+import com.google.common.collect.Lists;
+import com.google.common.collect.Sets;
+
 public class TypeCastRules {
 
   private static Map<MinorType, Set<MinorType>> rules;
@@ -47,12 +48,12 @@ public class TypeCastRules {
   }
 
   private static void initTypeRules() {
-    rules = new HashMap<MinorType, Set<MinorType>>();
+    rules = new HashMap<>();
 
     Set<MinorType> rule;
 
     /** TINYINT cast able from **/
-    rule = new HashSet<MinorType>();
+    rule = new HashSet<>();
     rule.add(MinorType.TINYINT);
     rule.add(MinorType.SMALLINT);
     rule.add(MinorType.INT);
@@ -80,7 +81,7 @@ public class TypeCastRules {
     rules.put(MinorType.TINYINT, rule);
 
     /** SMALLINT cast able from **/
-    rule = new HashSet<MinorType>();
+    rule = new HashSet<>();
     rule.add(MinorType.TINYINT);
     rule.add(MinorType.SMALLINT);
     rule.add(MinorType.INT);
@@ -108,7 +109,7 @@ public class TypeCastRules {
     rules.put(MinorType.SMALLINT, rule);
 
     /** INT cast able from **/
-    rule = new HashSet<MinorType>();
+    rule = new HashSet<>();
     rule.add(MinorType.TINYINT);
     rule.add(MinorType.SMALLINT);
     rule.add(MinorType.INT);
@@ -136,7 +137,7 @@ public class TypeCastRules {
     rules.put(MinorType.INT, rule);
 
     /** BIGINT cast able from **/
-    rule = new HashSet<MinorType>();
+    rule = new HashSet<>();
     rule.add(MinorType.TINYINT);
     rule.add(MinorType.SMALLINT);
     rule.add(MinorType.INT);
@@ -164,7 +165,7 @@ public class TypeCastRules {
     rules.put(MinorType.BIGINT, rule);
 
     /** UINT8 cast able from **/
-    rule = new HashSet<MinorType>();
+    rule = new HashSet<>();
     rule.add(MinorType.TINYINT);
     rule.add(MinorType.SMALLINT);
     rule.add(MinorType.INT);
@@ -192,7 +193,7 @@ public class TypeCastRules {
     rules.put(MinorType.UINT8, rule);
 
     /** DECIMAL9 cast able from **/
-    rule = new HashSet<MinorType>();
+    rule = new HashSet<>();
     rule.add(MinorType.TINYINT);
     rule.add(MinorType.SMALLINT);
     rule.add(MinorType.INT);
@@ -220,7 +221,7 @@ public class TypeCastRules {
     rules.put(MinorType.DECIMAL9, rule);
 
     /** DECIMAL18 cast able from **/
-    rule = new HashSet<MinorType>();
+    rule = new HashSet<>();
     rule.add(MinorType.TINYINT);
     rule.add(MinorType.SMALLINT);
     rule.add(MinorType.INT);
@@ -248,7 +249,7 @@ public class TypeCastRules {
     rules.put(MinorType.DECIMAL18, rule);
 
     /** DECIMAL28Dense cast able from **/
-    rule = new HashSet<MinorType>();
+    rule = new HashSet<>();
     rule.add(MinorType.TINYINT);
     rule.add(MinorType.SMALLINT);
     rule.add(MinorType.INT);
@@ -276,7 +277,7 @@ public class TypeCastRules {
     rules.put(MinorType.DECIMAL28DENSE, rule);
 
     /** DECIMAL28Sparse cast able from **/
-    rule = new HashSet<MinorType>();
+    rule = new HashSet<>();
     rule.add(MinorType.TINYINT);
     rule.add(MinorType.SMALLINT);
     rule.add(MinorType.INT);
@@ -304,7 +305,7 @@ public class TypeCastRules {
     rules.put(MinorType.DECIMAL28SPARSE, rule);
 
     /** DECIMAL38Dense cast able from **/
-    rule = new HashSet<MinorType>();
+    rule = new HashSet<>();
     rule.add(MinorType.TINYINT);
     rule.add(MinorType.SMALLINT);
     rule.add(MinorType.INT);
@@ -333,7 +334,7 @@ public class TypeCastRules {
 
 
     /** DECIMAL38Sparse cast able from **/
-    rule = new HashSet<MinorType>();
+    rule = new HashSet<>();
     rule.add(MinorType.TINYINT);
     rule.add(MinorType.SMALLINT);
     rule.add(MinorType.INT);
@@ -361,7 +362,7 @@ public class TypeCastRules {
     rules.put(MinorType.DECIMAL38SPARSE, rule);
 
     /** MONEY cast able from **/
-    rule = new HashSet<MinorType>();
+    rule = new HashSet<>();
     rule.add(MinorType.TINYINT);
     rule.add(MinorType.SMALLINT);
     rule.add(MinorType.INT);
@@ -389,7 +390,7 @@ public class TypeCastRules {
     rules.put(MinorType.MONEY, rule);
 
     /** DATE cast able from **/
-    rule = new HashSet<MinorType>();
+    rule = new HashSet<>();
     rule.add(MinorType.DATE);
     rule.add(MinorType.TIMESTAMP);
     rule.add(MinorType.TIMESTAMPTZ);
@@ -402,7 +403,7 @@ public class TypeCastRules {
     rules.put(MinorType.DATE, rule);
 
     /** TIME cast able from **/
-    rule = new HashSet<MinorType>();
+    rule = new HashSet<>();
     rule.add(MinorType.TIME);
     rule.add(MinorType.TIMESTAMP);
     rule.add(MinorType.TIMESTAMPTZ);
@@ -415,7 +416,7 @@ public class TypeCastRules {
     rules.put(MinorType.TIME, rule);
 
     /** TIMESTAMP cast able from **/
-    rule = new HashSet<MinorType>();
+    rule = new HashSet<>();
     rule.add(MinorType.VAR16CHAR);
     rule.add(MinorType.VARCHAR);
     rule.add(MinorType.VARBINARY);
@@ -433,7 +434,7 @@ public class TypeCastRules {
     rules.put(MinorType.TIMESTAMP, rule);
 
     /** TIMESTAMPTZ cast able from **/
-    rule = new HashSet<MinorType>();
+    rule = new HashSet<>();
     rule.add(MinorType.TIMESTAMPTZ);
     rule.add(MinorType.DATE);
     rule.add(MinorType.TIMESTAMP);
@@ -447,7 +448,7 @@ public class TypeCastRules {
     rules.put(MinorType.TIMESTAMPTZ, rule);
 
     /** Interval cast able from **/
-    rule = new HashSet<MinorType>();
+    rule = new HashSet<>();
     rule.add(MinorType.INTERVAL);
     rule.add(MinorType.INTERVALDAY);
     rule.add(MinorType.INTERVALYEAR);
@@ -463,7 +464,7 @@ public class TypeCastRules {
     rules.put(MinorType.INTERVAL, rule);
 
     /** INTERVAL YEAR cast able from **/
-    rule = new HashSet<MinorType>();
+    rule = new HashSet<>();
     rule.add(MinorType.INTERVALYEAR);
     rule.add(MinorType.INTERVAL);
     rule.add(MinorType.INTERVALDAY);
@@ -479,7 +480,7 @@ public class TypeCastRules {
     rules.put(MinorType.INTERVALYEAR, rule);
 
     /** INTERVAL DAY cast able from **/
-    rule = new HashSet<MinorType>();
+    rule = new HashSet<>();
     rule.add(MinorType.INTERVALDAY);
     rule.add(MinorType.INTERVALYEAR);
     rule.add(MinorType.INTERVAL);
@@ -495,7 +496,7 @@ public class TypeCastRules {
     rules.put(MinorType.INTERVALDAY, rule);
 
     /** FLOAT4 cast able from **/
-    rule = new HashSet<MinorType>();
+    rule = new HashSet<>();
     rule.add(MinorType.TINYINT);
     rule.add(MinorType.SMALLINT);
     rule.add(MinorType.INT);
@@ -520,7 +521,7 @@ public class TypeCastRules {
     rules.put(MinorType.FLOAT4, rule);
 
     /** FLOAT8 cast able from **/
-    rule = new HashSet<MinorType>();
+    rule = new HashSet<>();
     rule.add(MinorType.TINYINT);
     rule.add(MinorType.SMALLINT);
     rule.add(MinorType.INT);
@@ -546,7 +547,7 @@ public class TypeCastRules {
     rules.put(MinorType.FLOAT8, rule);
 
     /** BIT cast able from **/
-    rule = new HashSet<MinorType>();
+    rule = new HashSet<>();
     rule.add(MinorType.TINYINT);
     rule.add(MinorType.BIT);
     rule.add(MinorType.FIXEDCHAR);
@@ -557,7 +558,7 @@ public class TypeCastRules {
     rules.put(MinorType.BIT, rule);
 
     /** FIXEDCHAR cast able from **/
-    rule = new HashSet<MinorType>();
+    rule = new HashSet<>();
     rule.add(MinorType.TINYINT);
     rule.add(MinorType.SMALLINT);
     rule.add(MinorType.INT);
@@ -594,7 +595,7 @@ public class TypeCastRules {
     rules.put(MinorType.FIXEDCHAR, rule);
 
     /** FIXED16CHAR cast able from **/
-    rule = new HashSet<MinorType>();
+    rule = new HashSet<>();
     rule.add(MinorType.TINYINT);
     rule.add(MinorType.SMALLINT);
     rule.add(MinorType.INT);
@@ -630,7 +631,7 @@ public class TypeCastRules {
     rules.put(MinorType.FIXED16CHAR, rule);
 
     /** FIXEDBINARY cast able from **/
-    rule = new HashSet<MinorType>();
+    rule = new HashSet<>();
     rule.add(MinorType.TINYINT);
     rule.add(MinorType.SMALLINT);
     rule.add(MinorType.INT);
@@ -657,7 +658,7 @@ public class TypeCastRules {
     rules.put(MinorType.FIXEDBINARY, rule);
 
     /** VARCHAR cast able from **/
-    rule = new HashSet<MinorType>();
+    rule = new HashSet<>();
     rule.add(MinorType.TINYINT);
     rule.add(MinorType.SMALLINT);
     rule.add(MinorType.INT);
@@ -693,7 +694,7 @@ public class TypeCastRules {
     rules.put(MinorType.VARCHAR, rule);
 
     /** VAR16CHAR cast able from **/
-    rule = new HashSet<MinorType>();
+    rule = new HashSet<>();
     rule.add(MinorType.TINYINT);
     rule.add(MinorType.SMALLINT);
     rule.add(MinorType.INT);
@@ -728,7 +729,7 @@ public class TypeCastRules {
     rules.put(MinorType.VAR16CHAR, rule);
 
     /** VARBINARY cast able from **/
-    rule = new HashSet<MinorType>();
+    rule = new HashSet<>();
     rule.add(MinorType.TINYINT);
     rule.add(MinorType.SMALLINT);
     rule.add(MinorType.INT);
@@ -769,7 +770,7 @@ public class TypeCastRules {
     return isCastable(from.getMinorType(), to.getMinorType());
   }
 
-  private static boolean isCastable(MinorType from, MinorType to) {
+  public static boolean isCastable(MinorType from, MinorType to) {
     return from.equals(MinorType.NULL) ||      //null could be casted to any other type.
         (rules.get(to) == null ? false : rules.get(to).contains(from));
   }

http://git-wip-us.apache.org/repos/asf/drill/blob/d2e0f415/exec/java-exec/src/main/java/org/apache/drill/exec/rpc/user/UserClient.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/rpc/user/UserClient.java b/exec/java-exec/src/main/java/org/apache/drill/exec/rpc/user/UserClient.java
index 847b726..a7ea7b7 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/rpc/user/UserClient.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/rpc/user/UserClient.java
@@ -44,6 +44,7 @@ import org.apache.drill.exec.proto.UserProtos.GetCatalogsResp;
 import org.apache.drill.exec.proto.UserProtos.GetColumnsResp;
 import org.apache.drill.exec.proto.UserProtos.GetQueryPlanFragments;
 import org.apache.drill.exec.proto.UserProtos.GetSchemasResp;
+import org.apache.drill.exec.proto.UserProtos.GetServerMetaResp;
 import org.apache.drill.exec.proto.UserProtos.GetTablesResp;
 import org.apache.drill.exec.proto.UserProtos.QueryPlanFragments;
 import org.apache.drill.exec.proto.UserProtos.RpcEndpointInfos;
@@ -318,6 +319,8 @@ public class UserClient extends BasicClient<RpcType, UserClient.UserToBitConnect
       return CreatePreparedStatementResp.getDefaultInstance();
     case RpcType.SASL_MESSAGE_VALUE:
       return SaslMessage.getDefaultInstance();
+    case RpcType.SERVER_META_VALUE:
+      return GetServerMetaResp.getDefaultInstance();
     }
     throw new RpcException(String.format("Unable to deal with RpcType of %d", rpcType));
   }

http://git-wip-us.apache.org/repos/asf/drill/blob/d2e0f415/exec/java-exec/src/main/java/org/apache/drill/exec/rpc/user/UserRpcConfig.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/rpc/user/UserRpcConfig.java b/exec/java-exec/src/main/java/org/apache/drill/exec/rpc/user/UserRpcConfig.java
index ecf15dd..357f633 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/rpc/user/UserRpcConfig.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/rpc/user/UserRpcConfig.java
@@ -37,6 +37,8 @@ import org.apache.drill.exec.proto.UserProtos.GetColumnsResp;
 import org.apache.drill.exec.proto.UserProtos.GetQueryPlanFragments;
 import org.apache.drill.exec.proto.UserProtos.GetSchemasReq;
 import org.apache.drill.exec.proto.UserProtos.GetSchemasResp;
+import org.apache.drill.exec.proto.UserProtos.GetServerMetaReq;
+import org.apache.drill.exec.proto.UserProtos.GetServerMetaResp;
 import org.apache.drill.exec.proto.UserProtos.GetTablesReq;
 import org.apache.drill.exec.proto.UserProtos.GetTablesResp;
 import org.apache.drill.exec.proto.UserProtos.QueryPlanFragments;
@@ -71,6 +73,7 @@ public class UserRpcConfig {
         .add(RpcType.CREATE_PREPARED_STATEMENT, CreatePreparedStatementReq.class,
             RpcType.PREPARED_STATEMENT, CreatePreparedStatementResp.class) // user to bit
         .add(RpcType.SASL_MESSAGE, SaslMessage.class, RpcType.SASL_MESSAGE, SaslMessage.class) // user <-> bit
+        .add(RpcType.GET_SERVER_META, GetServerMetaReq.class, RpcType.SERVER_META, GetServerMetaResp.class) // user to bit
         .build();
   }
 
@@ -88,7 +91,7 @@ public class UserRpcConfig {
         .<RpcType> builder()
         .add(RpcType.RUN_QUERY, RpcType.CANCEL_QUERY, RpcType.GET_QUERY_PLAN_FRAGMENTS, RpcType.RESUME_PAUSED_QUERY,
           RpcType.GET_CATALOGS, RpcType.GET_SCHEMAS, RpcType.GET_TABLES, RpcType.GET_COLUMNS,
-          RpcType.CREATE_PREPARED_STATEMENT)
+          RpcType.CREATE_PREPARED_STATEMENT, RpcType.GET_SERVER_META)
         .build()
         );
 }

http://git-wip-us.apache.org/repos/asf/drill/blob/d2e0f415/exec/java-exec/src/main/java/org/apache/drill/exec/rpc/user/UserRpcUtils.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/rpc/user/UserRpcUtils.java b/exec/java-exec/src/main/java/org/apache/drill/exec/rpc/user/UserRpcUtils.java
index c513d11..43e1c7f 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/rpc/user/UserRpcUtils.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/rpc/user/UserRpcUtils.java
@@ -18,7 +18,6 @@
 package org.apache.drill.exec.rpc.user;
 
 import java.lang.management.ManagementFactory;
-import java.lang.management.RuntimeMXBean;
 
 import org.apache.drill.common.Version;
 import org.apache.drill.common.util.DrillVersionInfo;
@@ -33,6 +32,23 @@ import com.google.common.base.Preconditions;
 public final class UserRpcUtils {
   private UserRpcUtils() {}
 
+  /*
+   * Template for the endpoint infos.
+   *
+   * It speeds up things not to check application JMX for
+   * each connection.
+   */
+  private static final RpcEndpointInfos INFOS_TEMPLATE =
+      RpcEndpointInfos.newBuilder()
+        .setApplication(ManagementFactory.getRuntimeMXBean().getName())
+        .setVersion(DrillVersionInfo.getVersion())
+        .setMajorVersion(DrillVersionInfo.getMajorVersion())
+        .setMinorVersion(DrillVersionInfo.getMinorVersion())
+        .setPatchVersion(DrillVersionInfo.getPatchVersion())
+        .setBuildNumber(DrillVersionInfo.getBuildNumber())
+        .setVersionQualifier(DrillVersionInfo.getQualifier())
+        .buildPartial();
+
   /**
    * Returns a {@code RpcEndpointInfos} instance
    *
@@ -45,16 +61,8 @@ public final class UserRpcUtils {
    * @throws NullPointerException if name is null
    */
   public static RpcEndpointInfos getRpcEndpointInfos(String name) {
-    RuntimeMXBean mxBean = ManagementFactory.getRuntimeMXBean();
-    RpcEndpointInfos infos = RpcEndpointInfos.newBuilder()
+    RpcEndpointInfos infos = RpcEndpointInfos.newBuilder(INFOS_TEMPLATE)
         .setName(Preconditions.checkNotNull(name))
-        .setApplication(mxBean.getName())
-        .setVersion(DrillVersionInfo.getVersion())
-        .setMajorVersion(DrillVersionInfo.getMajorVersion())
-        .setMinorVersion(DrillVersionInfo.getMinorVersion())
-        .setPatchVersion(DrillVersionInfo.getPatchVersion())
-        .setBuildNumber(DrillVersionInfo.getBuildNumber())
-        .setVersionQualifier(DrillVersionInfo.getQualifier())
         .build();
 
     return infos;

http://git-wip-us.apache.org/repos/asf/drill/blob/d2e0f415/exec/java-exec/src/main/java/org/apache/drill/exec/rpc/user/UserServer.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/rpc/user/UserServer.java b/exec/java-exec/src/main/java/org/apache/drill/exec/rpc/user/UserServer.java
index e917b3e..9f0d502 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/rpc/user/UserServer.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/rpc/user/UserServer.java
@@ -21,8 +21,8 @@ import java.io.IOException;
 import java.net.SocketAddress;
 import java.util.UUID;
 
-import io.netty.util.concurrent.Future;
-import io.netty.util.concurrent.GenericFutureListener;
+import javax.security.sasl.SaslException;
+
 import org.apache.drill.common.config.DrillProperties;
 import org.apache.drill.exec.exception.DrillbitStartupException;
 import org.apache.drill.exec.memory.BufferAllocator;
@@ -39,19 +39,21 @@ import org.apache.drill.exec.proto.UserProtos.SaslSupport;
 import org.apache.drill.exec.proto.UserProtos.UserProperties;
 import org.apache.drill.exec.proto.UserProtos.UserToBitHandshake;
 import org.apache.drill.exec.rpc.AbstractRemoteConnection;
+import org.apache.drill.exec.rpc.AbstractServerConnection;
 import org.apache.drill.exec.rpc.BasicServer;
 import org.apache.drill.exec.rpc.OutOfMemoryHandler;
 import org.apache.drill.exec.rpc.OutboundRpcMessage;
 import org.apache.drill.exec.rpc.ProtobufLengthDecoder;
 import org.apache.drill.exec.rpc.RpcException;
 import org.apache.drill.exec.rpc.RpcOutcomeListener;
-import org.apache.drill.exec.rpc.AbstractServerConnection;
 import org.apache.drill.exec.rpc.security.ServerAuthenticationHandler;
-import org.apache.drill.exec.rpc.user.UserServer.BitToUserConnection;
 import org.apache.drill.exec.rpc.security.plain.PlainFactory;
+import org.apache.drill.exec.rpc.user.UserServer.BitToUserConnection;
 import org.apache.drill.exec.rpc.user.security.UserAuthenticationException;
 import org.apache.drill.exec.server.BootStrapContext;
 import org.apache.drill.exec.work.user.UserWorker;
+import org.apache.hadoop.security.HadoopKerberosName;
+import org.slf4j.Logger;
 
 import com.google.protobuf.MessageLite;
 
@@ -59,10 +61,8 @@ import io.netty.channel.ChannelFuture;
 import io.netty.channel.ChannelHandlerContext;
 import io.netty.channel.EventLoopGroup;
 import io.netty.channel.socket.SocketChannel;
-import org.apache.hadoop.security.HadoopKerberosName;
-import org.slf4j.Logger;
-
-import javax.security.sasl.SaslException;
+import io.netty.util.concurrent.Future;
+import io.netty.util.concurrent.GenericFutureListener;
 
 public class UserServer extends BasicServer<RpcType, BitToUserConnection> {
   private static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(UserServer.class);

http://git-wip-us.apache.org/repos/asf/drill/blob/d2e0f415/exec/java-exec/src/main/java/org/apache/drill/exec/rpc/user/UserServerRequestHandler.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/rpc/user/UserServerRequestHandler.java b/exec/java-exec/src/main/java/org/apache/drill/exec/rpc/user/UserServerRequestHandler.java
index 1c44176..0be4b2c 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/rpc/user/UserServerRequestHandler.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/rpc/user/UserServerRequestHandler.java
@@ -17,9 +17,6 @@
  */
 package org.apache.drill.exec.rpc.user;
 
-import com.google.protobuf.InvalidProtocolBufferException;
-import io.netty.buffer.ByteBuf;
-import io.netty.buffer.ByteBufInputStream;
 import org.apache.drill.exec.proto.GeneralRPCProtos.Ack;
 import org.apache.drill.exec.proto.UserBitShared.QueryId;
 import org.apache.drill.exec.proto.UserProtos.CreatePreparedStatementReq;
@@ -27,16 +24,22 @@ import org.apache.drill.exec.proto.UserProtos.GetCatalogsReq;
 import org.apache.drill.exec.proto.UserProtos.GetColumnsReq;
 import org.apache.drill.exec.proto.UserProtos.GetQueryPlanFragments;
 import org.apache.drill.exec.proto.UserProtos.GetSchemasReq;
+import org.apache.drill.exec.proto.UserProtos.GetServerMetaReq;
 import org.apache.drill.exec.proto.UserProtos.GetTablesReq;
 import org.apache.drill.exec.proto.UserProtos.RpcType;
 import org.apache.drill.exec.proto.UserProtos.RunQuery;
+import org.apache.drill.exec.rpc.RequestHandler;
 import org.apache.drill.exec.rpc.Response;
 import org.apache.drill.exec.rpc.ResponseSender;
 import org.apache.drill.exec.rpc.RpcException;
-import org.apache.drill.exec.rpc.RequestHandler;
 import org.apache.drill.exec.rpc.user.UserServer.BitToUserConnection;
 import org.apache.drill.exec.work.user.UserWorker;
 
+import com.google.protobuf.InvalidProtocolBufferException;
+
+import io.netty.buffer.ByteBuf;
+import io.netty.buffer.ByteBufInputStream;
+
 /**
  * Should create only one instance of this class per Drillbit service.
  */
@@ -135,6 +138,15 @@ class UserServerRequestHandler implements RequestHandler<BitToUserConnection> {
       } catch (final InvalidProtocolBufferException e) {
         throw new RpcException("Failure while decoding CreatePreparedStatementReq body.", e);
       }
+    case RpcType.GET_SERVER_META_VALUE:
+      try {
+        final GetServerMetaReq req =
+            GetServerMetaReq.PARSER.parseFrom(new ByteBufInputStream(pBody));
+        worker.submitServerMetadataWork(connection.getSession(), req, responseSender);
+        break;
+      } catch (final InvalidProtocolBufferException e) {
+        throw new RpcException("Failure while decoding CreatePreparedStatementReq body.", e);
+      }
     default:
       throw new UnsupportedOperationException(
           String.format("UserServerRequestHandler received rpc of unknown type. Type was %d.", rpcType));

http://git-wip-us.apache.org/repos/asf/drill/blob/d2e0f415/exec/java-exec/src/main/java/org/apache/drill/exec/work/metadata/MetadataProvider.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/work/metadata/MetadataProvider.java b/exec/java-exec/src/main/java/org/apache/drill/exec/work/metadata/MetadataProvider.java
index 6ababf4..cf64b20 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/work/metadata/MetadataProvider.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/work/metadata/MetadataProvider.java
@@ -585,7 +585,7 @@ public class MetadataProvider {
    * @param ex Exception thrown
    * @return
    */
-  private static DrillPBError createPBError(final String failedFunction, final Throwable ex) {
+  static DrillPBError createPBError(final String failedFunction, final Throwable ex) {
     final String errorId = UUID.randomUUID().toString();
     logger.error("Failed to {}. ErrorId: {}", failedFunction, errorId, ex);
 

http://git-wip-us.apache.org/repos/asf/drill/blob/d2e0f415/exec/java-exec/src/main/java/org/apache/drill/exec/work/metadata/ServerMetaProvider.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/work/metadata/ServerMetaProvider.java b/exec/java-exec/src/main/java/org/apache/drill/exec/work/metadata/ServerMetaProvider.java
new file mode 100644
index 0000000..41421e3
--- /dev/null
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/work/metadata/ServerMetaProvider.java
@@ -0,0 +1,168 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p/>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p/>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.work.metadata;
+
+import java.util.Arrays;
+
+import org.apache.calcite.avatica.util.Casing;
+import org.apache.calcite.sql.SqlJdbcFunctionCall;
+import org.apache.calcite.sql.parser.SqlAbstractParserImpl.Metadata;
+import org.apache.calcite.sql.parser.SqlParser;
+import org.apache.drill.common.types.TypeProtos.MinorType;
+import org.apache.drill.exec.planner.physical.PlannerSettings;
+import org.apache.drill.exec.planner.sql.DrillParserConfig;
+import org.apache.drill.exec.proto.UserProtos.ConvertSupport;
+import org.apache.drill.exec.proto.UserProtos.CorrelationNamesSupport;
+import org.apache.drill.exec.proto.UserProtos.DateTimeLiteralsSupport;
+import org.apache.drill.exec.proto.UserProtos.GetServerMetaReq;
+import org.apache.drill.exec.proto.UserProtos.GetServerMetaResp;
+import org.apache.drill.exec.proto.UserProtos.GroupBySupport;
+import org.apache.drill.exec.proto.UserProtos.IdentifierCasing;
+import org.apache.drill.exec.proto.UserProtos.NullCollation;
+import org.apache.drill.exec.proto.UserProtos.OrderBySupport;
+import org.apache.drill.exec.proto.UserProtos.OuterJoinSupport;
+import org.apache.drill.exec.proto.UserProtos.RequestStatus;
+import org.apache.drill.exec.proto.UserProtos.RpcType;
+import org.apache.drill.exec.proto.UserProtos.ServerMeta;
+import org.apache.drill.exec.proto.UserProtos.SubQuerySupport;
+import org.apache.drill.exec.proto.UserProtos.UnionSupport;
+import org.apache.drill.exec.resolver.TypeCastRules;
+import org.apache.drill.exec.rpc.Response;
+import org.apache.drill.exec.rpc.ResponseSender;
+import org.apache.drill.exec.rpc.user.UserSession;
+import org.apache.drill.exec.server.DrillbitContext;
+
+import com.google.common.base.Splitter;
+import com.google.common.collect.ImmutableList;
+
+/**
+ * Contains worker {@link Runnable} for returning server meta information
+ */
+public class ServerMetaProvider {
+  private static ServerMeta DEFAULT = ServerMeta.newBuilder()
+      .addAllConvertSupport(getSupportedConvertOps())
+      .addAllDateTimeFunctions(Splitter.on(",").split(SqlJdbcFunctionCall.getTimeDateFunctions()))
+      .addAllDateTimeLiteralsSupport(Arrays.asList(DateTimeLiteralsSupport.values()))
+      .addAllNumericFunctions(Splitter.on(",").split(SqlJdbcFunctionCall.getNumericFunctions()))
+      .addAllOrderBySupport(Arrays.asList(OrderBySupport.OB_UNRELATED, OrderBySupport.OB_EXPRESSION))
+      .addAllOuterJoinSupport(Arrays.asList(OuterJoinSupport.OJ_LEFT, OuterJoinSupport.OJ_RIGHT, OuterJoinSupport.OJ_FULL))
+      .addAllStringFunctions(Splitter.on(",").split(SqlJdbcFunctionCall.getStringFunctions()))
+      .addAllSystemFunctions(Splitter.on(",").split(SqlJdbcFunctionCall.getSystemFunctions()))
+      .addAllSubquerySupport(Arrays.asList(SubQuerySupport.SQ_CORRELATED, SubQuerySupport.SQ_IN_COMPARISON, SubQuerySupport.SQ_IN_EXISTS, SubQuerySupport.SQ_IN_QUANTIFIED))
+      .addAllUnionSupport(Arrays.asList(UnionSupport.U_UNION, UnionSupport.U_UNION_ALL))
+      .setAllTablesSelectable(false)
+      .setBlobIncludedInMaxRowSize(true)
+      .setCatalogAtStart(true)
+      .setCatalogSeparator(".")
+      .setCatalogTerm("catalog")
+      .setColumnAliasingSupported(true)
+      .setNullPlusNonNullEqualsNull(true)
+      .setCorrelationNamesSupport(CorrelationNamesSupport.CN_ANY)
+      .setReadOnly(false)
+      .setGroupBySupport(GroupBySupport.GB_UNRELATED)
+      .setLikeEscapeClauseSupported(true)
+      .setNullCollation(NullCollation.NC_AT_END)
+      .setSchemaTerm("schema")
+      .setSearchEscapeString("\\")
+      .setTableTerm("table")
+      .build();
+
+
+  private static final Iterable<ConvertSupport> getSupportedConvertOps() {
+    // A set would be more appropriate but it's not possible to produce
+    // duplicates, and an iterable is all we need.
+    ImmutableList.Builder<ConvertSupport> supportedConvertedOps = ImmutableList.builder();
+
+    for(MinorType from: MinorType.values()) {
+      for(MinorType to: MinorType.values()) {
+        if (TypeCastRules.isCastable(from, to)) {
+          supportedConvertedOps.add(ConvertSupport.newBuilder().setFrom(from).setTo(to).build());
+        }
+      }
+    }
+
+    return supportedConvertedOps.build();
+  }
+  /**
+   * Runnable that creates server meta information for given {@code ServerMetaReq} and
+   * sends the response at the end.
+   */
+  public static class ServerMetaWorker implements Runnable {
+    private final UserSession session;
+    private final DrillbitContext context;
+    @SuppressWarnings("unused")
+    private final GetServerMetaReq req;
+    private final ResponseSender responseSender;
+
+    public ServerMetaWorker(final UserSession session, final DrillbitContext context,
+        final GetServerMetaReq req, final ResponseSender responseSender) {
+      this.session = session;
+      this.context = context;
+      this.req = req;
+      this.responseSender = responseSender;
+    }
+
+    @Override
+    public void run() {
+      final GetServerMetaResp.Builder respBuilder = GetServerMetaResp.newBuilder();
+      try {
+        final ServerMeta.Builder metaBuilder = ServerMeta.newBuilder(DEFAULT);
+        PlannerSettings plannerSettings = new PlannerSettings(session.getOptions(), context.getFunctionImplementationRegistry());
+
+        DrillParserConfig config = new DrillParserConfig(plannerSettings);
+
+        int identifierMaxLength = config.identifierMaxLength();
+        Metadata metadata = SqlParser.create("", config).getMetadata();
+        metaBuilder
+          .setMaxCatalogNameLength(identifierMaxLength)
+          .setMaxColumnNameLength(identifierMaxLength)
+          .setMaxCursorNameLength(identifierMaxLength)
+          .setMaxSchemaNameLength(identifierMaxLength)
+          .setMaxTableNameLength(identifierMaxLength)
+          .setMaxUserNameLength(identifierMaxLength)
+          .setIdentifierQuoteString(config.quoting().string)
+          .setIdentifierCasing(getIdentifierCasing(config.unquotedCasing(), config.caseSensitive()))
+          .setQuotedIdentifierCasing(getIdentifierCasing(config.quotedCasing(), config.caseSensitive()))
+          .addAllSqlKeywords(Splitter.on(",").split(metadata.getJdbcKeywords()));
+        respBuilder.setServerMeta(metaBuilder);
+        respBuilder.setStatus(RequestStatus.OK);
+      } catch(Throwable t) {
+        respBuilder.setStatus(RequestStatus.FAILED);
+        respBuilder.setError(MetadataProvider.createPBError("server meta", t));
+      } finally {
+        responseSender.send(new Response(RpcType.SERVER_META, respBuilder.build()));
+      }
+    }
+
+    public static IdentifierCasing getIdentifierCasing(Casing casing, boolean caseSensitive) {
+      switch(casing) {
+      case TO_LOWER:
+        return IdentifierCasing.IC_STORES_LOWER;
+
+      case TO_UPPER:
+        return IdentifierCasing.IC_STORES_UPPER;
+
+      case UNCHANGED:
+        return caseSensitive ? IdentifierCasing.IC_SUPPORTS_MIXED : IdentifierCasing.IC_STORES_MIXED;
+
+      default:
+        throw new AssertionError("Unknown casing:" + casing);
+      }
+    }
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/drill/blob/d2e0f415/exec/java-exec/src/main/java/org/apache/drill/exec/work/user/UserWorker.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/work/user/UserWorker.java b/exec/java-exec/src/main/java/org/apache/drill/exec/work/user/UserWorker.java
index c1fa7a0..b90b4d2 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/work/user/UserWorker.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/work/user/UserWorker.java
@@ -26,6 +26,7 @@ import org.apache.drill.exec.proto.UserProtos.GetCatalogsReq;
 import org.apache.drill.exec.proto.UserProtos.GetColumnsReq;
 import org.apache.drill.exec.proto.UserProtos.GetQueryPlanFragments;
 import org.apache.drill.exec.proto.UserProtos.GetSchemasReq;
+import org.apache.drill.exec.proto.UserProtos.GetServerMetaReq;
 import org.apache.drill.exec.proto.UserProtos.GetTablesReq;
 import org.apache.drill.exec.proto.UserProtos.QueryPlanFragments;
 import org.apache.drill.exec.proto.UserProtos.RunQuery;
@@ -38,6 +39,7 @@ import org.apache.drill.exec.server.options.OptionManager;
 import org.apache.drill.exec.work.WorkManager.WorkerBee;
 import org.apache.drill.exec.work.foreman.Foreman;
 import org.apache.drill.exec.work.metadata.MetadataProvider;
+import org.apache.drill.exec.work.metadata.ServerMetaProvider.ServerMetaWorker;
 import org.apache.drill.exec.work.prepare.PreparedStatementProvider.PreparedStatementWorker;
 
 public class UserWorker{
@@ -125,4 +127,9 @@ public class UserWorker{
       final ResponseSender sender) {
     bee.addNewWork(new PreparedStatementWorker(connection, this, sender, req));
   }
+
+  public void submitServerMetadataWork(final UserSession session, final GetServerMetaReq req,
+      final ResponseSender sender) {
+    bee.addNewWork(new ServerMetaWorker(session, bee.getContext(), req, sender));
+  }
 }

http://git-wip-us.apache.org/repos/asf/drill/blob/d2e0f415/exec/java-exec/src/test/java/org/apache/drill/exec/work/metadata/TestServerMetaProvider.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/work/metadata/TestServerMetaProvider.java b/exec/java-exec/src/test/java/org/apache/drill/exec/work/metadata/TestServerMetaProvider.java
new file mode 100644
index 0000000..c1fd38b
--- /dev/null
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/work/metadata/TestServerMetaProvider.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p/>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p/>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.work.metadata;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+
+import org.apache.calcite.avatica.util.Quoting;
+import org.apache.drill.BaseTestQuery;
+import org.apache.drill.exec.proto.UserProtos.GetServerMetaResp;
+import org.apache.drill.exec.proto.UserProtos.RequestStatus;
+import org.apache.drill.exec.proto.UserProtos.ServerMeta;
+import org.junit.Test;
+
+/**
+ * Tests for server metadata provider APIs.
+ */
+public class TestServerMetaProvider extends BaseTestQuery {
+  private static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(TestServerMetaProvider.class);
+
+  @Test
+  public void testServerMeta() throws Exception {
+    GetServerMetaResp resp = client.getServerMeta().get();
+    assertNotNull(resp);
+    assertEquals(RequestStatus.OK, resp.getStatus());
+    assertNotNull(resp.getServerMeta());
+
+    ServerMeta serverMeta = resp.getServerMeta();
+    logger.trace("Server metadata: {}", serverMeta);
+
+    assertEquals(Quoting.BACK_TICK.string, serverMeta.getIdentifierQuoteString());
+  }
+}

http://git-wip-us.apache.org/repos/asf/drill/blob/d2e0f415/exec/jdbc/src/main/java/org/apache/drill/jdbc/DrillConnectionConfig.java
----------------------------------------------------------------------
diff --git a/exec/jdbc/src/main/java/org/apache/drill/jdbc/DrillConnectionConfig.java b/exec/jdbc/src/main/java/org/apache/drill/jdbc/DrillConnectionConfig.java
index 15f676c..c225895 100644
--- a/exec/jdbc/src/main/java/org/apache/drill/jdbc/DrillConnectionConfig.java
+++ b/exec/jdbc/src/main/java/org/apache/drill/jdbc/DrillConnectionConfig.java
@@ -66,12 +66,11 @@ public class DrillConnectionConfig extends ConnectionConfigImpl {
     return TimeZone.getDefault();
   }
 
-  public boolean disableServerPreparedStatement() {
+  public boolean isServerPreparedStatementDisabled() {
     return Boolean.valueOf(props.getProperty("server.preparedstatement.disabled"));
   }
 
-  public boolean disableServerMetadata() {
+  public boolean isServerMetadataDisabled() {
     return Boolean.valueOf(props.getProperty("server.metadata.disabled"));
   }
-
 }

http://git-wip-us.apache.org/repos/asf/drill/blob/d2e0f415/exec/jdbc/src/main/java/org/apache/drill/jdbc/impl/DrillDatabaseMetaDataImpl.java
----------------------------------------------------------------------
diff --git a/exec/jdbc/src/main/java/org/apache/drill/jdbc/impl/DrillDatabaseMetaDataImpl.java b/exec/jdbc/src/main/java/org/apache/drill/jdbc/impl/DrillDatabaseMetaDataImpl.java
index 3d19f82..eeb7847 100644
--- a/exec/jdbc/src/main/java/org/apache/drill/jdbc/impl/DrillDatabaseMetaDataImpl.java
+++ b/exec/jdbc/src/main/java/org/apache/drill/jdbc/impl/DrillDatabaseMetaDataImpl.java
@@ -23,15 +23,35 @@ import java.sql.ResultSet;
 import java.sql.RowIdLifetime;
 import java.sql.SQLException;
 import java.sql.SQLFeatureNotSupportedException;
+import java.util.Objects;
+import java.util.Set;
+import java.util.concurrent.ExecutionException;
 
-import org.apache.calcite.avatica.AvaticaConnection;
 import org.apache.calcite.avatica.AvaticaDatabaseMetaData;
 import org.apache.drill.common.Version;
 import org.apache.drill.common.exceptions.DrillRuntimeException;
+import org.apache.drill.common.types.TypeProtos.MinorType;
+import org.apache.drill.common.types.Types;
+import org.apache.drill.exec.client.ServerMethod;
+import org.apache.drill.exec.proto.UserBitShared.DrillPBError;
+import org.apache.drill.exec.proto.UserProtos.ConvertSupport;
+import org.apache.drill.exec.proto.UserProtos.CorrelationNamesSupport;
+import org.apache.drill.exec.proto.UserProtos.GetServerMetaResp;
+import org.apache.drill.exec.proto.UserProtos.GroupBySupport;
+import org.apache.drill.exec.proto.UserProtos.IdentifierCasing;
+import org.apache.drill.exec.proto.UserProtos.NullCollation;
+import org.apache.drill.exec.proto.UserProtos.OrderBySupport;
+import org.apache.drill.exec.proto.UserProtos.OuterJoinSupport;
+import org.apache.drill.exec.proto.UserProtos.RequestStatus;
+import org.apache.drill.exec.proto.UserProtos.ServerMeta;
+import org.apache.drill.exec.proto.UserProtos.SubQuerySupport;
+import org.apache.drill.exec.proto.UserProtos.UnionSupport;
 import org.apache.drill.jdbc.AlreadyClosedSqlException;
 import org.apache.drill.jdbc.DrillDatabaseMetaData;
 
+import com.google.common.base.Joiner;
 import com.google.common.base.Throwables;
+import com.google.common.collect.ImmutableSet;
 
 
 /**
@@ -40,8 +60,64 @@ import com.google.common.base.Throwables;
 class DrillDatabaseMetaDataImpl extends AvaticaDatabaseMetaData
                                 implements DrillDatabaseMetaData {
 
-  protected DrillDatabaseMetaDataImpl( AvaticaConnection connection ) {
+
+  /**
+   * Holds allowed conversion between SQL types
+   *
+   */
+  private static final class SQLConvertSupport {
+    public final int from;
+    public final int to;
+
+    public SQLConvertSupport(int from, int to) {
+      this.from = from;
+      this.to = to;
+    }
+
+    @Override
+    public int hashCode() {
+      return Objects.hash(from, to);
+    }
+
+    @Override public boolean equals(Object obj) {
+      if (this == obj) {
+        return true;
+      }
+
+      if (!(obj instanceof SQLConvertSupport)) {
+        return false;
+      }
+
+      SQLConvertSupport other = (SQLConvertSupport) obj;
+      return from == other.from && to == other.to;
+    }
+
+    public static final Set<SQLConvertSupport> toSQLConvertSupport(Iterable<ConvertSupport> convertSupportIterable) {
+      ImmutableSet.Builder<SQLConvertSupport> sqlConvertSupportSet = ImmutableSet.builder();
+      for(ConvertSupport convertSupport: convertSupportIterable) {
+        try {
+          sqlConvertSupportSet.add(new SQLConvertSupport(
+              toSQLType(convertSupport.getFrom()),
+              toSQLType(convertSupport.getTo())));
+        } catch(IllegalArgumentException e) {
+          // Ignore unknown types...
+        }
+      }
+      return sqlConvertSupportSet.build();
+    }
+
+    private static int toSQLType(MinorType minorType) {
+      String sqlTypeName = Types.getSqlTypeName(Types.optional(minorType));
+      return Types.getJdbcTypeCode(sqlTypeName);
+    }
+  }
+
+  private volatile ServerMeta serverMeta;
+  private volatile Set<SQLConvertSupport> convertSupport;
+
+  protected DrillDatabaseMetaDataImpl( DrillConnectionImpl connection ) {
     super( connection );
+
   }
 
   /**
@@ -58,6 +134,13 @@ class DrillDatabaseMetaDataImpl extends AvaticaDatabaseMetaData
     }
   }
 
+  private boolean getServerMetaSupported() throws SQLException {
+    DrillConnectionImpl connection = (DrillConnectionImpl) getConnection();
+    return
+        !connection.getConfig().isServerMetadataDisabled()
+        && connection.getClient().getSupportedMethods().contains(ServerMethod.SERVER_META);
+  }
+
   private String getServerName() throws SQLException {
     DrillConnectionImpl connection = (DrillConnectionImpl) getConnection();
     return connection.getClient().getServerName();
@@ -68,6 +151,39 @@ class DrillDatabaseMetaDataImpl extends AvaticaDatabaseMetaData
     return connection.getClient().getServerVersion();
   }
 
+  private ServerMeta getServerMeta() throws SQLException {
+    assert getServerMetaSupported();
+
+    if (serverMeta == null) {
+      synchronized(this) {
+        if (serverMeta == null) {
+          DrillConnectionImpl connection = (DrillConnectionImpl) getConnection();
+
+          try {
+            GetServerMetaResp resp = connection.getClient().getServerMeta().get();
+            if (resp.getStatus() != RequestStatus.OK) {
+              DrillPBError drillError = resp.getError();
+              throw new SQLException("Error when getting server meta: " + drillError.getMessage());
+            }
+            serverMeta = resp.getServerMeta();
+            convertSupport = SQLConvertSupport.toSQLConvertSupport(serverMeta.getConvertSupportList());
+          } catch (InterruptedException e) {
+            throw new SQLException("Interrupted when getting server meta", e);
+          } catch (ExecutionException e) {
+            Throwable cause =  e.getCause();
+            if (cause == null) {
+              throw new AssertionError("Something unknown happened", e);
+            }
+            Throwables.propagateIfPossible(cause);
+            throw new SQLException("Error when getting server meta", cause);
+          }
+        }
+      }
+    }
+
+    return serverMeta;
+  }
+
   // Note:  Dynamic proxies could be used to reduce the quantity (450?) of
   // method overrides by eliminating those that exist solely to check whether
   // the object is closed.  (Check performance before applying to frequently
@@ -87,7 +203,10 @@ class DrillDatabaseMetaDataImpl extends AvaticaDatabaseMetaData
   @Override
   public boolean allTablesAreSelectable() throws SQLException {
     throwIfClosed();
-    return super.allTablesAreSelectable();
+    if (!getServerMetaSupported()) {
+      return super.allTablesAreSelectable();
+    }
+    return getServerMeta().getAllTablesSelectable();
   }
 
   @Override
@@ -105,7 +224,10 @@ class DrillDatabaseMetaDataImpl extends AvaticaDatabaseMetaData
   @Override
   public boolean isReadOnly() throws SQLException {
     throwIfClosed();
-    return super.isReadOnly();
+    if (!getServerMetaSupported()) {
+      return super.isReadOnly();
+    }
+    return getServerMeta().getReadOnly();
   }
 
 
@@ -114,25 +236,37 @@ class DrillDatabaseMetaDataImpl extends AvaticaDatabaseMetaData
   @Override
   public boolean nullsAreSortedHigh() throws SQLException {
     throwIfClosed();
-    return true;
+    if (!getServerMetaSupported()) {
+      return true;
+    }
+    return getServerMeta().getNullCollation() == NullCollation.NC_HIGH;
   }
 
   @Override
   public boolean nullsAreSortedLow() throws SQLException {
     throwIfClosed();
-    return false;
+    if (!getServerMetaSupported()) {
+      return false;
+    }
+    return getServerMeta().getNullCollation() == NullCollation.NC_LOW;
   }
 
   @Override
   public boolean nullsAreSortedAtStart() throws SQLException {
     throwIfClosed();
-    return false;
+    if (!getServerMetaSupported()) {
+      return false;
+    }
+    return getServerMeta().getNullCollation() == NullCollation.NC_AT_START;
   }
 
   @Override
   public boolean nullsAreSortedAtEnd() throws SQLException {
     throwIfClosed();
-    return false;
+    if (!getServerMetaSupported()) {
+      return false;
+    }
+    return getServerMeta().getNullCollation() == NullCollation.NC_AT_END;
   }
 
   @Override
@@ -194,98 +328,146 @@ class DrillDatabaseMetaDataImpl extends AvaticaDatabaseMetaData
   @Override
   public boolean supportsMixedCaseIdentifiers() throws SQLException {
     throwIfClosed();
-    return super.supportsMixedCaseIdentifiers();
+    if (!getServerMetaSupported()) {
+      return super.supportsMixedCaseIdentifiers();
+    }
+    return getServerMeta().getIdentifierCasing() == IdentifierCasing.IC_SUPPORTS_MIXED;
   }
 
   @Override
   public boolean storesUpperCaseIdentifiers() throws SQLException {
     throwIfClosed();
-    return super.storesUpperCaseIdentifiers();
+    if (!getServerMetaSupported()) {
+      return super.storesUpperCaseIdentifiers();
+    }
+    return getServerMeta().getIdentifierCasing() == IdentifierCasing.IC_STORES_UPPER;
   }
 
   @Override
   public boolean storesLowerCaseIdentifiers() throws SQLException {
     throwIfClosed();
-    return super.storesLowerCaseIdentifiers();
+    if (!getServerMetaSupported()) {
+      return super.storesLowerCaseIdentifiers();
+    }
+    return getServerMeta().getIdentifierCasing() == IdentifierCasing.IC_STORES_LOWER;
   }
 
   @Override
   public boolean storesMixedCaseIdentifiers() throws SQLException {
     throwIfClosed();
-    return super.storesMixedCaseIdentifiers();
+    if (!getServerMetaSupported()) {
+      return super.storesMixedCaseIdentifiers();
+    }
+    return getServerMeta().getIdentifierCasing() == IdentifierCasing.IC_STORES_MIXED;
   }
 
   @Override
   public boolean supportsMixedCaseQuotedIdentifiers() throws SQLException {
     throwIfClosed();
-    return super.supportsMixedCaseQuotedIdentifiers();
+    if (!getServerMetaSupported()) {
+      return super.supportsMixedCaseQuotedIdentifiers();
+    }
+    return getServerMeta().getQuotedIdentifierCasing() == IdentifierCasing.IC_SUPPORTS_MIXED;
   }
 
   @Override
   public boolean storesUpperCaseQuotedIdentifiers() throws SQLException {
     throwIfClosed();
-    return super.storesUpperCaseQuotedIdentifiers();
+    if (!getServerMetaSupported()) {
+      return super.storesUpperCaseQuotedIdentifiers();
+    }
+    return getServerMeta().getQuotedIdentifierCasing() == IdentifierCasing.IC_STORES_UPPER;
   }
 
   @Override
   public boolean storesLowerCaseQuotedIdentifiers() throws SQLException {
     throwIfClosed();
-    return super.storesLowerCaseQuotedIdentifiers();
+    if (!getServerMetaSupported()) {
+      return super.storesLowerCaseQuotedIdentifiers();
+    }
+    return getServerMeta().getQuotedIdentifierCasing() == IdentifierCasing.IC_STORES_LOWER;
   }
 
   @Override
   public boolean storesMixedCaseQuotedIdentifiers() throws SQLException {
     throwIfClosed();
-    return super.storesMixedCaseQuotedIdentifiers();
+    if (!getServerMetaSupported()) {
+      return super.storesMixedCaseQuotedIdentifiers();
+    }
+    return getServerMeta().getQuotedIdentifierCasing() == IdentifierCasing.IC_STORES_MIXED;
   }
 
   // TODO(DRILL-3510):  Update when Drill accepts standard SQL's double quote.
   @Override
   public String getIdentifierQuoteString() throws SQLException {
     throwIfClosed();
-    return "`";
+    if (!getServerMetaSupported()) {
+      return "`";
+    }
+    return getServerMeta().getIdentifierQuoteString();
   }
 
   @Override
   public String getSQLKeywords() throws SQLException {
     throwIfClosed();
-    return super.getSQLKeywords();
+    if (!getServerMetaSupported()) {
+      return super.getSQLKeywords();
+    }
+    return Joiner.on(",").join(getServerMeta().getSqlKeywordsList());
   }
 
   @Override
   public String getNumericFunctions() throws SQLException {
     throwIfClosed();
-    return super.getNumericFunctions();
+    if (!getServerMetaSupported()) {
+      return super.getNumericFunctions();
+    }
+    return Joiner.on(",").join(getServerMeta().getNumericFunctionsList());
   }
 
   @Override
   public String getStringFunctions() throws SQLException {
     throwIfClosed();
-    return super.getStringFunctions();
+    if (!getServerMetaSupported()) {
+      return super.getStringFunctions();
+    }
+    return Joiner.on(",").join(getServerMeta().getStringFunctionsList());
   }
 
   @Override
   public String getSystemFunctions() throws SQLException {
     throwIfClosed();
-    return super.getSystemFunctions();
+    if (!getServerMetaSupported()) {
+      return super.getSystemFunctions();
+    }
+    return Joiner.on(",").join(getServerMeta().getSystemFunctionsList());
   }
 
   @Override
   public String getTimeDateFunctions() throws SQLException {
     throwIfClosed();
-    return super.getTimeDateFunctions();
+    if (!getServerMetaSupported()) {
+      return super.getTimeDateFunctions();
+    }
+    return Joiner.on(",").join(getServerMeta().getDateTimeFunctionsList());
   }
 
   @Override
   public String getSearchStringEscape() throws SQLException {
     throwIfClosed();
-    return super.getSearchStringEscape();
+    if (!getServerMetaSupported()) {
+      return super.getSearchStringEscape();
+    }
+    return getServerMeta().getSearchEscapeString();
   }
 
   @Override
   public String getExtraNameCharacters() throws SQLException {
     throwIfClosed();
-    return super.getExtraNameCharacters();
+    if (!getServerMetaSupported()) {
+      return super.getExtraNameCharacters();
+    }
+    return getServerMeta().getSpecialCharacters();
   }
 
   @Override
@@ -303,73 +485,114 @@ class DrillDatabaseMetaDataImpl extends AvaticaDatabaseMetaData
   @Override
   public boolean supportsColumnAliasing() throws SQLException {
     throwIfClosed();
-    return super.supportsColumnAliasing();
+    if (!getServerMetaSupported()) {
+      return super.supportsColumnAliasing();
+    }
+    return getServerMeta().getColumnAliasingSupported();
   }
 
   @Override
   public boolean nullPlusNonNullIsNull() throws SQLException {
     throwIfClosed();
-    return super.nullPlusNonNullIsNull();
+    if (!getServerMetaSupported()) {
+      return super.nullPlusNonNullIsNull();
+    }
+    return getServerMeta().getNullPlusNonNullEqualsNull();
   }
 
   @Override
   public boolean supportsConvert() throws SQLException {
     throwIfClosed();
-    return super.supportsConvert();
+    if (!getServerMetaSupported()) {
+      return super.supportsConvert();
+    }
+    // Make sure the convert table is loaded
+    getServerMeta();
+    return !convertSupport.isEmpty();
   }
 
   @Override
   public boolean supportsConvert(int fromType, int toType) throws SQLException {
     throwIfClosed();
-    return super.supportsConvert(fromType, toType);
+    if (!getServerMetaSupported()) {
+      return super.supportsConvert(fromType, toType);
+    }
+    // Make sure the convert table is loaded
+    getServerMeta();
+    return convertSupport.contains(new SQLConvertSupport(fromType, toType));
   }
 
   @Override
   public boolean supportsTableCorrelationNames() throws SQLException {
     throwIfClosed();
-    return super.supportsTableCorrelationNames();
+    if (!getServerMetaSupported()) {
+      return super.supportsTableCorrelationNames();
+    }
+    return getServerMeta().getCorrelationNamesSupport() == CorrelationNamesSupport.CN_ANY
+        || getServerMeta().getCorrelationNamesSupport() == CorrelationNamesSupport.CN_DIFFERENT_NAMES;
   }
 
   @Override
   public boolean supportsDifferentTableCorrelationNames() throws SQLException {
     throwIfClosed();
-    return super.supportsDifferentTableCorrelationNames();
+    if (!getServerMetaSupported()) {
+      return super.supportsDifferentTableCorrelationNames();
+    }
+    return getServerMeta().getCorrelationNamesSupport() == CorrelationNamesSupport.CN_DIFFERENT_NAMES;
   }
 
   @Override
   public boolean supportsExpressionsInOrderBy() throws SQLException {
     throwIfClosed();
-    return super.supportsExpressionsInOrderBy();
+    if (!getServerMetaSupported()) {
+      return super.supportsExpressionsInOrderBy();
+    }
+    return getServerMeta().getOrderBySupportList().contains(OrderBySupport.OB_EXPRESSION);
   }
 
   @Override
   public boolean supportsOrderByUnrelated() throws SQLException {
     throwIfClosed();
-    return super.supportsOrderByUnrelated();
+    if (!getServerMetaSupported()) {
+      return super.supportsOrderByUnrelated();
+    }
+    return getServerMeta().getOrderBySupportList().contains(OrderBySupport.OB_UNRELATED);
   }
 
   @Override
   public boolean supportsGroupBy() throws SQLException {
     throwIfClosed();
-    return super.supportsGroupBy();
+    if (!getServerMetaSupported()) {
+      return super.supportsGroupBy();
+    }
+    return getServerMeta().getGroupBySupport() != GroupBySupport.GB_NONE;
   }
 
   @Override
   public boolean supportsGroupByUnrelated() throws SQLException {
     throwIfClosed();
-    return super.supportsGroupByUnrelated();
+    if (!getServerMetaSupported()) {
+      return super.supportsGroupByUnrelated();
+    }
+    return getServerMeta().getGroupBySupport() == GroupBySupport.GB_UNRELATED;
   }
 
   @Override
   public boolean supportsGroupByBeyondSelect() throws SQLException {
     throwIfClosed();
-    return super.supportsGroupByBeyondSelect();
+    if (!getServerMetaSupported()) {
+      return super.supportsGroupByBeyondSelect();
+    }
+    return getServerMeta().getGroupBySupport() == GroupBySupport.GB_BEYOND_SELECT;
   }
 
   @Override
   public boolean supportsLikeEscapeClause() throws SQLException {
     throwIfClosed();
-    return super.supportsLikeEscapeClause();
+    if (!getServerMetaSupported()) {
+      return super.supportsLikeEscapeClause();
+    }
+    return getServerMeta().getLikeEscapeClauseSupported();
   }
 
   @Override
@@ -435,25 +658,38 @@ class DrillDatabaseMetaDataImpl extends AvaticaDatabaseMetaData
   @Override
   public boolean supportsOuterJoins() throws SQLException {
     throwIfClosed();
-    return super.supportsOuterJoins();
+    if (!getServerMetaSupported()) {
+      return super.supportsOuterJoins();
+    }
+    return getServerMeta().getOuterJoinSupportCount() > 0;
   }
 
   @Override
   public boolean supportsFullOuterJoins() throws SQLException {
     throwIfClosed();
-    return super.supportsFullOuterJoins();
+    if (!getServerMetaSupported()) {
+      return super.supportsFullOuterJoins();
+    }
+    return getServerMeta().getOuterJoinSupportList().contains(OuterJoinSupport.OJ_FULL);
   }
 
   @Override
   public boolean supportsLimitedOuterJoins() throws SQLException {
     throwIfClosed();
-    return super.supportsLimitedOuterJoins();
+    if (!getServerMetaSupported()) {
+      return super.supportsFullOuterJoins();
+    }
+    return getServerMeta().getOuterJoinSupportCount() > 0
+        && !(getServerMeta().getOuterJoinSupportList().contains(OuterJoinSupport.OJ_FULL));
   }
 
   @Override
   public String getSchemaTerm() throws SQLException {
     throwIfClosed();
-    return super.getSchemaTerm();
+    if (!getServerMetaSupported()) {
+      return super.getSchemaTerm();
+    }
+    return getServerMeta().getSchemaTerm();
   }
 
   @Override
@@ -465,19 +701,28 @@ class DrillDatabaseMetaDataImpl extends AvaticaDatabaseMetaData
   @Override
   public String getCatalogTerm() throws SQLException {
     throwIfClosed();
-    return super.getCatalogTerm();
+    if (!getServerMetaSupported()) {
+      return super.getCatalogTerm();
+    }
+    return getServerMeta().getCatalogTerm();
   }
 
   @Override
   public boolean isCatalogAtStart() throws SQLException {
     throwIfClosed();
-    return super.isCatalogAtStart();
+    if (!getServerMetaSupported()) {
+      return super.isCatalogAtStart();
+    }
+    return getServerMeta().getCatalogAtStart();
   }
 
   @Override
   public String getCatalogSeparator() throws SQLException {
     throwIfClosed();
-    return super.getCatalogSeparator();
+    if (!getServerMetaSupported()) {
+      return super.getCatalogSeparator();
+    }
+    return getServerMeta().getCatalogSeparator();
   }
 
   @Override
@@ -555,7 +800,10 @@ class DrillDatabaseMetaDataImpl extends AvaticaDatabaseMetaData
   @Override
   public boolean supportsSelectForUpdate() throws SQLException {
     throwIfClosed();
-    return super.supportsSelectForUpdate();
+    if (!getServerMetaSupported()) {
+      return super.supportsSelectForUpdate();
+    }
+    return getServerMeta().getSelectForUpdateSupported();
   }
 
   @Override
@@ -567,43 +815,64 @@ class DrillDatabaseMetaDataImpl extends AvaticaDatabaseMetaData
   @Override
   public boolean supportsSubqueriesInComparisons() throws SQLException {
     throwIfClosed();
-    return super.supportsSubqueriesInComparisons();
+    if (!getServerMetaSupported()) {
+      return super.supportsSubqueriesInComparisons();
+    }
+    return getServerMeta().getSubquerySupportList().contains(SubQuerySupport.SQ_IN_COMPARISON);
   }
 
   @Override
   public boolean supportsSubqueriesInExists() throws SQLException {
     throwIfClosed();
-    return super.supportsSubqueriesInExists();
+    if (!getServerMetaSupported()) {
+      return super.supportsSubqueriesInExists();
+    }
+    return getServerMeta().getSubquerySupportList().contains(SubQuerySupport.SQ_IN_EXISTS);
   }
 
   @Override
   public boolean supportsSubqueriesInIns() throws SQLException {
     throwIfClosed();
-    return super.supportsSubqueriesInIns();
+    if (!getServerMetaSupported()) {
+      return super.supportsSubqueriesInIns();
+    }
+    return getServerMeta().getSubquerySupportList().contains(SubQuerySupport.SQ_IN_INSERT);
   }
 
   @Override
   public boolean supportsSubqueriesInQuantifieds() throws SQLException {
     throwIfClosed();
-    return super.supportsSubqueriesInQuantifieds();
+    if (!getServerMetaSupported()) {
+      return super.supportsSubqueriesInQuantifieds();
+    }
+    return getServerMeta().getSubquerySupportList().contains(SubQuerySupport.SQ_IN_QUANTIFIED);
   }
 
   @Override
   public boolean supportsCorrelatedSubqueries() throws SQLException {
     throwIfClosed();
-    return super.supportsCorrelatedSubqueries();
+    if (!getServerMetaSupported()) {
+      return super.supportsCorrelatedSubqueries();
+    }
+    return getServerMeta().getSubquerySupportList().contains(SubQuerySupport.SQ_CORRELATED);
   }
 
   @Override
   public boolean supportsUnion() throws SQLException {
     throwIfClosed();
-    return super.supportsUnion();
+    if (!getServerMetaSupported()) {
+      return super.supportsUnion();
+    }
+    return getServerMeta().getUnionSupportList().contains(UnionSupport.U_UNION);
   }
 
   @Override
   public boolean supportsUnionAll() throws SQLException {
     throwIfClosed();
-    return super.supportsUnionAll();
+    if (!getServerMetaSupported()) {
+      return super.supportsUnionAll();
+    }
+    return getServerMeta().getUnionSupportList().contains(UnionSupport.U_UNION_ALL);
   }
 
   @Override
@@ -633,25 +902,37 @@ class DrillDatabaseMetaDataImpl extends AvaticaDatabaseMetaData
   @Override
   public int getMaxBinaryLiteralLength() throws SQLException {
     throwIfClosed();
-    return super.getMaxBinaryLiteralLength();
+    if (!getServerMetaSupported()) {
+      return super.getMaxBinaryLiteralLength();
+    }
+    return getServerMeta().getMaxBinaryLiteralLength();
   }
 
   @Override
   public int getMaxCharLiteralLength() throws SQLException {
     throwIfClosed();
-    return super.getMaxCharLiteralLength();
+    if (!getServerMetaSupported()) {
+      return super.getMaxCharLiteralLength();
+    }
+    return getServerMeta().getMaxCharLiteralLength();
   }
 
   @Override
   public int getMaxColumnNameLength() throws SQLException {
     throwIfClosed();
-    return super.getMaxColumnNameLength();
+    if (!getServerMetaSupported()) {
+      return super.getMaxColumnNameLength();
+    }
+    return getServerMeta().getMaxColumnNameLength();
   }
 
   @Override
   public int getMaxColumnsInGroupBy() throws SQLException {
     throwIfClosed();
-    return super.getMaxColumnsInGroupBy();
+    if (!getServerMetaSupported()) {
+      return super.getMaxColumnsInGroupBy();
+    }
+    return getServerMeta().getMaxColumnsInGroupBy();
   }
 
   @Override
@@ -663,13 +944,19 @@ class DrillDatabaseMetaDataImpl extends AvaticaDatabaseMetaData
   @Override
   public int getMaxColumnsInOrderBy() throws SQLException {
     throwIfClosed();
-    return super.getMaxColumnsInOrderBy();
+    if (!getServerMetaSupported()) {
+      return super.getMaxColumnsInOrderBy();
+    }
+    return getServerMeta().getMaxColumnsInOrderBy();
   }
 
   @Override
   public int getMaxColumnsInSelect() throws SQLException {
     throwIfClosed();
-    return super.getMaxColumnsInSelect();
+    if (!getServerMetaSupported()) {
+      return super.getMaxColumnsInSelect();
+    }
+    return getServerMeta().getMaxColumnsInSelect();
   }
 
   @Override
@@ -687,7 +974,10 @@ class DrillDatabaseMetaDataImpl extends AvaticaDatabaseMetaData
   @Override
   public int getMaxCursorNameLength() throws SQLException {
     throwIfClosed();
-    return super.getMaxCursorNameLength();
+    if (!getServerMetaSupported()) {
+      return super.getMaxCursorNameLength();
+    }
+    return getServerMeta().getMaxCursorNameLength();
   }
 
   @Override
@@ -699,7 +989,10 @@ class DrillDatabaseMetaDataImpl extends AvaticaDatabaseMetaData
   @Override
   public int getMaxSchemaNameLength() throws SQLException {
     throwIfClosed();
-    return super.getMaxSchemaNameLength();
+    if (!getServerMetaSupported()) {
+      return super.getMaxSchemaNameLength();
+    }
+    return getServerMeta().getMaxSchemaNameLength();
   }
 
   @Override
@@ -711,49 +1004,73 @@ class DrillDatabaseMetaDataImpl extends AvaticaDatabaseMetaData
   @Override
   public int getMaxCatalogNameLength() throws SQLException {
     throwIfClosed();
-    return super.getMaxCatalogNameLength();
+    if (!getServerMetaSupported()) {
+      return super.getMaxCatalogNameLength();
+    }
+    return getServerMeta().getMaxCatalogNameLength();
   }
 
   @Override
   public int getMaxRowSize() throws SQLException {
     throwIfClosed();
-    return super.getMaxRowSize();
+    if (!getServerMetaSupported()) {
+      return super.getMaxRowSize();
+    }
+    return getServerMeta().getMaxRowSize();
   }
 
   @Override
   public boolean doesMaxRowSizeIncludeBlobs() throws SQLException {
     throwIfClosed();
-    return super.doesMaxRowSizeIncludeBlobs();
+    if (!getServerMetaSupported()) {
+      return super.doesMaxRowSizeIncludeBlobs();
+    }
+    return getServerMeta().getBlobIncludedInMaxRowSize();
   }
 
   @Override
   public int getMaxStatementLength() throws SQLException {
     throwIfClosed();
-    return super.getMaxStatementLength();
+    if (!getServerMetaSupported()) {
+      return super.getMaxStatementLength();
+    }
+    return getServerMeta().getMaxStatementLength();
   }
 
   @Override
   public int getMaxStatements() throws SQLException {
     throwIfClosed();
-    return super.getMaxStatements();
+    if (!getServerMetaSupported()) {
+      return super.getMaxStatements();
+    }
+    return getServerMeta().getMaxStatements();
   }
 
   @Override
   public int getMaxTableNameLength() throws SQLException {
     throwIfClosed();
-    return super.getMaxTableNameLength();
+    if (!getServerMetaSupported()) {
+      return super.getMaxTableNameLength();
+    }
+    return getServerMeta().getMaxTableNameLength();
   }
 
   @Override
   public int getMaxTablesInSelect() throws SQLException {
     throwIfClosed();
-    return super.getMaxTablesInSelect();
+    if (!getServerMetaSupported()) {
+      return super.getMaxTablesInSelect();
+    }
+    return getServerMeta().getMaxTablesInSelect();
   }
 
   @Override
   public int getMaxUserNameLength() throws SQLException {
     throwIfClosed();
-    return super.getMaxUserNameLength();
+    if (!getServerMetaSupported()) {
+      return super.getMaxUserNameLength();
+    }
+    return getServerMeta().getMaxUserNameLength();
   }
 
   @Override
@@ -765,7 +1082,10 @@ class DrillDatabaseMetaDataImpl extends AvaticaDatabaseMetaData
   @Override
   public boolean supportsTransactions() throws SQLException {
     throwIfClosed();
-    return super.supportsTransactions();
+    if (!getServerMetaSupported()) {
+      return super.supportsTransactions();
+    }
+    return getServerMeta().getTransactionSupported();
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/drill/blob/d2e0f415/exec/jdbc/src/main/java/org/apache/drill/jdbc/impl/DrillJdbc41Factory.java
----------------------------------------------------------------------
diff --git a/exec/jdbc/src/main/java/org/apache/drill/jdbc/impl/DrillJdbc41Factory.java b/exec/jdbc/src/main/java/org/apache/drill/jdbc/impl/DrillJdbc41Factory.java
index 28a4372..629e47b 100644
--- a/exec/jdbc/src/main/java/org/apache/drill/jdbc/impl/DrillJdbc41Factory.java
+++ b/exec/jdbc/src/main/java/org/apache/drill/jdbc/impl/DrillJdbc41Factory.java
@@ -74,7 +74,7 @@ public class DrillJdbc41Factory extends DrillFactory {
 
   @Override
   public DrillDatabaseMetaDataImpl newDatabaseMetaData(AvaticaConnection connection) {
-    return new DrillDatabaseMetaDataImpl(connection);
+    return new DrillDatabaseMetaDataImpl((DrillConnectionImpl) connection);
   }
 
 
@@ -101,7 +101,7 @@ public class DrillJdbc41Factory extends DrillFactory {
       throws SQLException {
     DrillConnectionImpl drillConnection = (DrillConnectionImpl) connection;
     DrillClient client = drillConnection.getClient();
-    if (drillConnection.getConfig().disableServerPreparedStatement() || !client.getSupportedMethods().contains(ServerMethod.PREPARED_STATEMENT)) {
+    if (drillConnection.getConfig().isServerPreparedStatementDisabled() || !client.getSupportedMethods().contains(ServerMethod.PREPARED_STATEMENT)) {
       // fallback to client side prepared statement
       return new DrillJdbc41PreparedStatement(drillConnection, h, signature, null, resultSetType, resultSetConcurrency, resultSetHoldability);
     }

http://git-wip-us.apache.org/repos/asf/drill/blob/d2e0f415/exec/jdbc/src/main/java/org/apache/drill/jdbc/impl/DrillMetaImpl.java
----------------------------------------------------------------------
diff --git a/exec/jdbc/src/main/java/org/apache/drill/jdbc/impl/DrillMetaImpl.java b/exec/jdbc/src/main/java/org/apache/drill/jdbc/impl/DrillMetaImpl.java
index 10d4225..b78e93a 100644
--- a/exec/jdbc/src/main/java/org/apache/drill/jdbc/impl/DrillMetaImpl.java
+++ b/exec/jdbc/src/main/java/org/apache/drill/jdbc/impl/DrillMetaImpl.java
@@ -421,7 +421,7 @@ class DrillMetaImpl extends MetaImpl {
   @Override
   public MetaResultSet getTables(String catalog, final Pat schemaPattern, final Pat tableNamePattern,
       final List<String> typeList) {
-    if (connection.getConfig().disableServerMetadata() || ! connection.getClient().getSupportedMethods().contains(ServerMethod.GET_TABLES)) {
+    if (connection.getConfig().isServerMetadataDisabled() || ! connection.getClient().getSupportedMethods().contains(ServerMethod.GET_TABLES)) {
       return clientGetTables(catalog, schemaPattern, tableNamePattern, typeList);
     }
 
@@ -964,7 +964,7 @@ class DrillMetaImpl extends MetaImpl {
   @Override
   public MetaResultSet getColumns(String catalog, Pat schemaPattern,
                               Pat tableNamePattern, Pat columnNamePattern) {
-    if (connection.getConfig().disableServerMetadata() || ! connection.getClient().getSupportedMethods().contains(ServerMethod.GET_COLUMNS)) {
+    if (connection.getConfig().isServerMetadataDisabled() || ! connection.getClient().getSupportedMethods().contains(ServerMethod.GET_COLUMNS)) {
       return clientGetColumns(catalog, schemaPattern, tableNamePattern, columnNamePattern);
     }
 
@@ -1023,7 +1023,7 @@ class DrillMetaImpl extends MetaImpl {
    */
   @Override
   public MetaResultSet getSchemas(String catalog, Pat schemaPattern) {
-    if (connection.getConfig().disableServerMetadata() || ! connection.getClient().getSupportedMethods().contains(ServerMethod.GET_SCHEMAS)) {
+    if (connection.getConfig().isServerMetadataDisabled() || ! connection.getClient().getSupportedMethods().contains(ServerMethod.GET_SCHEMAS)) {
       return clientGetSchemas(catalog, schemaPattern);
     }
 
@@ -1070,7 +1070,7 @@ class DrillMetaImpl extends MetaImpl {
    */
   @Override
   public MetaResultSet getCatalogs() {
-    if (connection.getConfig().disableServerMetadata() || ! connection.getClient().getSupportedMethods().contains(ServerMethod.GET_CATALOGS)) {
+    if (connection.getConfig().isServerMetadataDisabled() || ! connection.getClient().getSupportedMethods().contains(ServerMethod.GET_CATALOGS)) {
       return clientGetCatalogs();
     }
 

http://git-wip-us.apache.org/repos/asf/drill/blob/d2e0f415/logical/src/main/java/org/apache/drill/common/expression/fn/CastFunctions.java
----------------------------------------------------------------------
diff --git a/logical/src/main/java/org/apache/drill/common/expression/fn/CastFunctions.java b/logical/src/main/java/org/apache/drill/common/expression/fn/CastFunctions.java
index 43b3b2e..c026b9f 100644
--- a/logical/src/main/java/org/apache/drill/common/expression/fn/CastFunctions.java
+++ b/logical/src/main/java/org/apache/drill/common/expression/fn/CastFunctions.java
@@ -21,6 +21,7 @@ import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Map;
 import java.util.Set;
+
 import org.apache.drill.common.types.TypeProtos.DataMode;
 import org.apache.drill.common.types.TypeProtos.MinorType;
 
@@ -142,7 +143,7 @@ public class CastFunctions {
       return func;
     }
 
-    throw new RuntimeException(
+    throw new IllegalArgumentException(
       String.format("cast function for type %s is not defined", targetMinorType.name()));
   }
 


[04/27] drill git commit: DRILL-4994: Add back JDBC prepared statement for older servers

Posted by jn...@apache.org.
DRILL-4994: Add back JDBC prepared statement for older servers

When the JDBC client is connected to an older Drill server, it always
attempted to use server-side prepared statement with no fallback.

With this change, client will check server version and will fallback to the
previous client-side prepared statement (which is still limited to only execute
queries and does not provide metadata).

close #613


Project: http://git-wip-us.apache.org/repos/asf/drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/drill/commit/16aa0810
Tree: http://git-wip-us.apache.org/repos/asf/drill/tree/16aa0810
Diff: http://git-wip-us.apache.org/repos/asf/drill/diff/16aa0810

Branch: refs/heads/master
Commit: 16aa0810c6b5ab7466b3b7eeaf8652b765da0f89
Parents: ab60855
Author: Laurent Goujon <la...@dremio.com>
Authored: Fri Nov 4 17:36:42 2016 -0700
Committer: Jinfeng Ni <jn...@apache.org>
Committed: Wed Mar 1 23:15:30 2017 -0800

----------------------------------------------------------------------
 .../java/org/apache/drill/common/Version.java   | 157 ++++
 .../org/apache/drill/common/TestVersion.java    | 103 +++
 contrib/native/client/src/protobuf/User.pb.cc   | 415 ++++++----
 contrib/native/client/src/protobuf/User.pb.h    | 159 +++-
 .../client/src/protobuf/UserBitShared.pb.cc     | 200 +++--
 .../client/src/protobuf/UserBitShared.pb.h      |  66 +-
 .../codegen/templates/DrillVersionInfo.java     |  44 +-
 .../apache/drill/exec/client/DrillClient.java   |  50 +-
 .../apache/drill/exec/client/ServerMethod.java  | 144 ++++
 .../apache/drill/exec/rpc/user/UserClient.java  |  45 +-
 .../drill/exec/rpc/user/UserRpcConfig.java      |  18 +-
 .../drill/exec/rpc/user/UserRpcUtils.java       |  16 +
 .../apache/drill/exec/rpc/user/UserServer.java  |   3 +-
 .../drill/jdbc/DrillConnectionConfig.java       |   4 +
 .../org/apache/drill/jdbc/impl/DrillCursor.java |  11 +-
 .../jdbc/impl/DrillDatabaseMetaDataImpl.java    |  35 +-
 .../drill/jdbc/impl/DrillJdbc41Factory.java     |  26 +-
 .../jdbc/impl/DrillPreparedStatementImpl.java   |   6 +-
 .../drill/jdbc/LegacyPreparedStatementTest.java | 130 ++++
 .../drill/exec/proto/SchemaUserProtos.java      |  21 +
 .../org/apache/drill/exec/proto/UserProtos.java | 751 +++++++++++++++----
 .../exec/proto/beans/BitToUserHandshake.java    |  30 +
 .../exec/proto/beans/RpcEndpointInfos.java      |  44 ++
 protocol/src/main/protobuf/User.proto           |  15 +-
 24 files changed, 2108 insertions(+), 385 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/drill/blob/16aa0810/common/src/main/java/org/apache/drill/common/Version.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/drill/common/Version.java b/common/src/main/java/org/apache/drill/common/Version.java
new file mode 100644
index 0000000..fdd9f84
--- /dev/null
+++ b/common/src/main/java/org/apache/drill/common/Version.java
@@ -0,0 +1,157 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.common;
+
+import java.util.Comparator;
+import java.util.Locale;
+import java.util.Objects;
+
+import com.google.common.base.Preconditions;
+import com.google.common.collect.ComparisonChain;
+
+/**
+ * Encapsulates version information and provides ordering
+ *
+ */
+public final class Version implements Comparable<Version> {
+  private final String version;
+  private final int major;
+  private final int minor;
+  private final int patch;
+  private final int buildNumber;
+  private final String qualifier;
+  private final String lcQualifier; // lower-case qualifier for comparison
+
+  public Version(String version, int major, int minor, int patch, int buildNumber,
+      String qualifier) {
+    this.version = version;
+    this.major = major;
+    this.minor = minor;
+    this.patch = patch;
+    this.buildNumber = buildNumber;
+    this.qualifier = qualifier;
+    this.lcQualifier = qualifier.toLowerCase(Locale.ENGLISH);
+  }
+
+  /**
+   * Get the version string
+   *
+   * @return the version number as x.y.z
+   */
+  public String getVersion() {
+    return version;
+  }
+
+  /**
+   * Get the major version
+   *
+   * @return x if assuming the version number is x.y.z
+   */
+  public int getMajorVersion() {
+    return major;
+  }
+
+  /**
+   * Get the minor version
+   *
+   * @return y if assuming the version number is x.y.z
+   */
+  public int getMinorVersion() {
+    return minor;
+  }
+
+  /**
+   * Get the patch version
+   *
+   * @return z if assuming the version number is x.y.z(-suffix)
+   */
+  public int getPatchVersion() {
+    return patch;
+  }
+
+  /**
+   * Get the build number
+   *
+   * @return b if assuming the version number is x.y.z(.b)(-suffix)
+   */
+  public int getBuildNumber() {
+    return buildNumber;
+  }
+
+  /**
+   * Get the version qualifier
+   *
+   * @return b if assuming the version number is x.y.z(.b)(-suffix)
+   */
+  public String getQualifier() {
+    return qualifier;
+  }
+
+  @Override
+  public int hashCode() {
+    return Objects.hash(major, minor, patch, buildNumber, lcQualifier);
+  }
+
+  @Override
+  public boolean equals(Object obj) {
+    if (!(obj instanceof Version)) {
+      return false;
+    }
+    Version dvi = (Version) obj;
+    return this.major == dvi.major
+        && this.minor == dvi.minor
+        && this.patch == dvi.patch
+        && this.buildNumber == dvi.buildNumber
+        && Objects.equals(this.lcQualifier, dvi.lcQualifier);
+  }
+
+  @Override
+  public String toString() {
+    return String.format("Version; %s", version);
+  }
+
+  private static final Comparator<String> QUALIFIER_COMPARATOR = new Comparator<String>() {
+    @Override public int compare(String q1, String q2) {
+      if (q1.equals(q2)) {
+        return 0;
+      }
+
+      if ("snapshot".equals(q1)) {
+        return -1;
+      }
+
+      if ("snapshot".equals(q2)) {
+        return 1;
+      }
+
+      return q1.compareTo(q2);
+    }
+  };
+
+  @Override
+  public int compareTo(Version o) {
+    Preconditions.checkNotNull(o);
+    return ComparisonChain.start()
+        .compare(this.major, o.major)
+        .compare(this.minor, o.minor)
+        .compare(this.patch, o.patch)
+        .compare(this.buildNumber, o.buildNumber)
+        .compare(this.lcQualifier, o.lcQualifier, QUALIFIER_COMPARATOR)
+        .result();
+  }
+}

http://git-wip-us.apache.org/repos/asf/drill/blob/16aa0810/common/src/test/java/org/apache/drill/common/TestVersion.java
----------------------------------------------------------------------
diff --git a/common/src/test/java/org/apache/drill/common/TestVersion.java b/common/src/test/java/org/apache/drill/common/TestVersion.java
new file mode 100644
index 0000000..cabacb3
--- /dev/null
+++ b/common/src/test/java/org/apache/drill/common/TestVersion.java
@@ -0,0 +1,103 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.common;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotEquals;
+import static org.junit.Assert.assertTrue;
+
+import org.junit.Test;
+
+/**
+ * Test class for {@code Version}
+ *
+ */
+public class TestVersion {
+
+  @Test
+  public void testSnapshotVersion() {
+    Version version = new Version("1.2.3-SNAPSHOT", 1, 2, 3, 0, "SNAPSHOT");
+
+    assertEquals("1.2.3-SNAPSHOT", version.getVersion());
+    assertEquals(1, version.getMajorVersion());
+    assertEquals(2, version.getMinorVersion());
+    assertEquals(3, version.getPatchVersion());
+    assertEquals(0, version.getBuildNumber());
+    assertEquals("SNAPSHOT", version.getQualifier());
+  }
+
+  @Test
+  public void testReleaseVersion() {
+    Version version = new Version("2.1.4", 2, 1, 4, 0, "");
+
+    assertEquals("2.1.4", version.getVersion());
+    assertEquals(2, version.getMajorVersion());
+    assertEquals(1, version.getMinorVersion());
+    assertEquals(4, version.getPatchVersion());
+    assertEquals(0, version.getBuildNumber());
+    assertEquals("", version.getQualifier());
+  }
+
+  @Test
+  public void testBuildNumberVersion() {
+    Version version = new Version("3.1.5-2-BUGFIX", 3, 1, 5, 2, "BUGFIX");
+
+    assertEquals("3.1.5-2-BUGFIX", version.getVersion());
+    assertEquals(3, version.getMajorVersion());
+    assertEquals(1, version.getMinorVersion());
+    assertEquals(5, version.getPatchVersion());
+    assertEquals(2, version.getBuildNumber());
+    assertEquals("BUGFIX", version.getQualifier());
+  }
+
+  private final Version v1 = new Version("1.2.3-SNAPSHOT", 1, 2, 3, 0, "SNAPSHOT");
+  private final Version v2 = new Version("2.1.4", 2, 1, 4, 0, "");
+  private final Version v3 = new Version("3.1.5-2-BUGFIX", 3, 1, 5, 2, "BUGFIX");
+  private final Version v4 = new Version("1.2.3-snapshot", 1, 2, 3, 0, "snapshot");
+  private final Version v5 = new Version("1.2.3", 1, 2, 3, 0, "");
+
+  @Test
+  public void testEquals() {
+    assertEquals(v1, v1);
+    assertNotEquals(v1, v2);
+    assertNotEquals(v1, v3);
+    assertEquals(v1, v4);
+    assertNotEquals(v1, v5);
+    assertNotEquals(v1, null);
+    assertNotEquals(v1, new Object());
+  }
+
+  @Test
+  public void testHashcode() {
+    assertEquals(v1.hashCode(), v1.hashCode());
+    assertNotEquals(v1.hashCode(), v2.hashCode());
+    assertNotEquals(v1.hashCode(), v3.hashCode());
+    assertEquals(v1.hashCode(), v4.hashCode());
+    assertNotEquals(v1.hashCode(), v5.hashCode());
+  }
+
+  @Test
+  public void testCompareTo() {
+    assertTrue(v1.compareTo(v1) == 0);
+    assertTrue(v1.compareTo(v2) < 0);
+    assertTrue(v1.compareTo(v3) < 0);
+    assertTrue(v1.compareTo(v4) == 0);
+    assertTrue(v1.compareTo(v5) < 0);
+    assertTrue(v1.compareTo(new Version("1.2", 1, 2, 0, 0, "")) > 0);
+  }
+}

http://git-wip-us.apache.org/repos/asf/drill/blob/16aa0810/contrib/native/client/src/protobuf/User.pb.cc
----------------------------------------------------------------------
diff --git a/contrib/native/client/src/protobuf/User.pb.cc b/contrib/native/client/src/protobuf/User.pb.cc
index a292b38..aee70b8 100644
--- a/contrib/native/client/src/protobuf/User.pb.cc
+++ b/contrib/native/client/src/protobuf/User.pb.cc
@@ -151,13 +151,15 @@ void protobuf_AssignDesc_User_2eproto() {
       ::google::protobuf::MessageFactory::generated_factory(),
       sizeof(UserProperties));
   RpcEndpointInfos_descriptor_ = file->message_type(2);
-  static const int RpcEndpointInfos_offsets_[6] = {
+  static const int RpcEndpointInfos_offsets_[8] = {
     GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(RpcEndpointInfos, name_),
     GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(RpcEndpointInfos, version_),
     GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(RpcEndpointInfos, majorversion_),
     GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(RpcEndpointInfos, minorversion_),
     GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(RpcEndpointInfos, patchversion_),
     GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(RpcEndpointInfos, application_),
+    GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(RpcEndpointInfos, buildnumber_),
+    GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(RpcEndpointInfos, versionqualifier_),
   };
   RpcEndpointInfos_reflection_ =
     new ::google::protobuf::internal::GeneratedMessageReflection(
@@ -245,13 +247,14 @@ void protobuf_AssignDesc_User_2eproto() {
       ::google::protobuf::MessageFactory::generated_factory(),
       sizeof(QueryPlanFragments));
   BitToUserHandshake_descriptor_ = file->message_type(7);
-  static const int BitToUserHandshake_offsets_[6] = {
+  static const int BitToUserHandshake_offsets_[7] = {
     GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(BitToUserHandshake, rpc_version_),
     GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(BitToUserHandshake, status_),
     GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(BitToUserHandshake, errorid_),
     GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(BitToUserHandshake, errormessage_),
     GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(BitToUserHandshake, server_infos_),
     GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(BitToUserHandshake, authenticationmechanisms_),
+    GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(BitToUserHandshake, supported_methods_),
   };
   BitToUserHandshake_reflection_ =
     new ::google::protobuf::internal::GeneratedMessageReflection(
@@ -766,136 +769,138 @@ void protobuf_AddDesc_User_2eproto() {
     "tControl.proto\032\025ExecutionProtos.proto\"&\n"
     "\010Property\022\013\n\003key\030\001 \002(\t\022\r\n\005value\030\002 \002(\t\"9\n"
     "\016UserProperties\022\'\n\nproperties\030\001 \003(\0132\023.ex"
-    "ec.user.Property\"\210\001\n\020RpcEndpointInfos\022\014\n"
+    "ec.user.Property\"\267\001\n\020RpcEndpointInfos\022\014\n"
     "\004name\030\001 \001(\t\022\017\n\007version\030\002 \001(\t\022\024\n\014majorVer"
     "sion\030\003 \001(\r\022\024\n\014minorVersion\030\004 \001(\r\022\024\n\014patc"
-    "hVersion\030\005 \001(\r\022\023\n\013application\030\006 \001(\t\"\375\002\n\022"
-    "UserToBitHandshake\022.\n\007channel\030\001 \001(\0162\027.ex"
-    "ec.shared.RpcChannel:\004USER\022\031\n\021support_li"
-    "stening\030\002 \001(\010\022\023\n\013rpc_version\030\003 \001(\005\0221\n\013cr"
-    "edentials\030\004 \001(\0132\034.exec.shared.UserCreden"
-    "tials\022-\n\nproperties\030\005 \001(\0132\031.exec.user.Us"
-    "erProperties\022$\n\025support_complex_types\030\006 "
-    "\001(\010:\005false\022\036\n\017support_timeout\030\007 \001(\010:\005fal"
-    "se\0221\n\014client_infos\030\010 \001(\0132\033.exec.user.Rpc"
-    "EndpointInfos\022,\n\014sasl_support\030\t \001(\0162\026.ex"
-    "ec.user.SaslSupport\"S\n\016RequestResults\022&\n"
-    "\010query_id\030\001 \001(\0132\024.exec.shared.QueryId\022\031\n"
-    "\021maximum_responses\030\002 \001(\005\"g\n\025GetQueryPlan"
-    "Fragments\022\r\n\005query\030\001 \002(\t\022$\n\004type\030\002 \001(\0162\026"
-    ".exec.shared.QueryType\022\031\n\nsplit_plan\030\003 \001"
-    "(\010:\005false\"\316\001\n\022QueryPlanFragments\0223\n\006stat"
-    "us\030\001 \002(\0162#.exec.shared.QueryResult.Query"
-    "State\022&\n\010query_id\030\002 \001(\0132\024.exec.shared.Qu"
-    "eryId\0221\n\tfragments\030\003 \003(\0132\036.exec.bit.cont"
-    "rol.PlanFragment\022(\n\005error\030\004 \001(\0132\031.exec.s"
-    "hared.DrillPBError\"\321\001\n\022BitToUserHandshak"
-    "e\022\023\n\013rpc_version\030\002 \001(\005\022*\n\006status\030\003 \001(\0162\032"
-    ".exec.user.HandshakeStatus\022\017\n\007errorId\030\004 "
-    "\001(\t\022\024\n\014errorMessage\030\005 \001(\t\0221\n\014server_info"
-    "s\030\006 \001(\0132\033.exec.user.RpcEndpointInfos\022 \n\030"
-    "authenticationMechanisms\030\007 \003(\t\"-\n\nLikeFi"
-    "lter\022\017\n\007pattern\030\001 \001(\t\022\016\n\006escape\030\002 \001(\t\"D\n"
-    "\016GetCatalogsReq\0222\n\023catalog_name_filter\030\001"
-    " \001(\0132\025.exec.user.LikeFilter\"M\n\017CatalogMe"
-    "tadata\022\024\n\014catalog_name\030\001 \001(\t\022\023\n\013descript"
-    "ion\030\002 \001(\t\022\017\n\007connect\030\003 \001(\t\"\223\001\n\017GetCatalo"
-    "gsResp\022(\n\006status\030\001 \001(\0162\030.exec.user.Reque"
-    "stStatus\022,\n\010catalogs\030\002 \003(\0132\032.exec.user.C"
-    "atalogMetadata\022(\n\005error\030\003 \001(\0132\031.exec.sha"
-    "red.DrillPBError\"v\n\rGetSchemasReq\0222\n\023cat"
-    "alog_name_filter\030\001 \001(\0132\025.exec.user.LikeF"
-    "ilter\0221\n\022schema_name_filter\030\002 \001(\0132\025.exec"
-    ".user.LikeFilter\"i\n\016SchemaMetadata\022\024\n\014ca"
-    "talog_name\030\001 \001(\t\022\023\n\013schema_name\030\002 \001(\t\022\r\n"
-    "\005owner\030\003 \001(\t\022\014\n\004type\030\004 \001(\t\022\017\n\007mutable\030\005 "
-    "\001(\t\"\220\001\n\016GetSchemasResp\022(\n\006status\030\001 \001(\0162\030"
-    ".exec.user.RequestStatus\022*\n\007schemas\030\002 \003("
-    "\0132\031.exec.user.SchemaMetadata\022(\n\005error\030\003 "
-    "\001(\0132\031.exec.shared.DrillPBError\"\302\001\n\014GetTa"
-    "blesReq\0222\n\023catalog_name_filter\030\001 \001(\0132\025.e"
+    "hVersion\030\005 \001(\r\022\023\n\013application\030\006 \001(\t\022\023\n\013b"
+    "uildNumber\030\007 \001(\r\022\030\n\020versionQualifier\030\010 \001"
+    "(\t\"\375\002\n\022UserToBitHandshake\022.\n\007channel\030\001 \001"
+    "(\0162\027.exec.shared.RpcChannel:\004USER\022\031\n\021sup"
+    "port_listening\030\002 \001(\010\022\023\n\013rpc_version\030\003 \001("
+    "\005\0221\n\013credentials\030\004 \001(\0132\034.exec.shared.Use"
+    "rCredentials\022-\n\nproperties\030\005 \001(\0132\031.exec."
+    "user.UserProperties\022$\n\025support_complex_t"
+    "ypes\030\006 \001(\010:\005false\022\036\n\017support_timeout\030\007 \001"
+    "(\010:\005false\0221\n\014client_infos\030\010 \001(\0132\033.exec.u"
+    "ser.RpcEndpointInfos\022,\n\014sasl_support\030\t \001"
+    "(\0162\026.exec.user.SaslSupport\"S\n\016RequestRes"
+    "ults\022&\n\010query_id\030\001 \001(\0132\024.exec.shared.Que"
+    "ryId\022\031\n\021maximum_responses\030\002 \001(\005\"g\n\025GetQu"
+    "eryPlanFragments\022\r\n\005query\030\001 \002(\t\022$\n\004type\030"
+    "\002 \001(\0162\026.exec.shared.QueryType\022\031\n\nsplit_p"
+    "lan\030\003 \001(\010:\005false\"\316\001\n\022QueryPlanFragments\022"
+    "3\n\006status\030\001 \002(\0162#.exec.shared.QueryResul"
+    "t.QueryState\022&\n\010query_id\030\002 \001(\0132\024.exec.sh"
+    "ared.QueryId\0221\n\tfragments\030\003 \003(\0132\036.exec.b"
+    "it.control.PlanFragment\022(\n\005error\030\004 \001(\0132\031"
+    ".exec.shared.DrillPBError\"\200\002\n\022BitToUserH"
+    "andshake\022\023\n\013rpc_version\030\002 \001(\005\022*\n\006status\030"
+    "\003 \001(\0162\032.exec.user.HandshakeStatus\022\017\n\007err"
+    "orId\030\004 \001(\t\022\024\n\014errorMessage\030\005 \001(\t\0221\n\014serv"
+    "er_infos\030\006 \001(\0132\033.exec.user.RpcEndpointIn"
+    "fos\022 \n\030authenticationMechanisms\030\007 \003(\t\022-\n"
+    "\021supported_methods\030\010 \003(\0162\022.exec.user.Rpc"
+    "Type\"-\n\nLikeFilter\022\017\n\007pattern\030\001 \001(\t\022\016\n\006e"
+    "scape\030\002 \001(\t\"D\n\016GetCatalogsReq\0222\n\023catalog"
+    "_name_filter\030\001 \001(\0132\025.exec.user.LikeFilte"
+    "r\"M\n\017CatalogMetadata\022\024\n\014catalog_name\030\001 \001"
+    "(\t\022\023\n\013description\030\002 \001(\t\022\017\n\007connect\030\003 \001(\t"
+    "\"\223\001\n\017GetCatalogsResp\022(\n\006status\030\001 \001(\0162\030.e"
+    "xec.user.RequestStatus\022,\n\010catalogs\030\002 \003(\013"
+    "2\032.exec.user.CatalogMetadata\022(\n\005error\030\003 "
+    "\001(\0132\031.exec.shared.DrillPBError\"v\n\rGetSch"
+    "emasReq\0222\n\023catalog_name_filter\030\001 \001(\0132\025.e"
     "xec.user.LikeFilter\0221\n\022schema_name_filte"
-    "r\030\002 \001(\0132\025.exec.user.LikeFilter\0220\n\021table_"
-    "name_filter\030\003 \001(\0132\025.exec.user.LikeFilter"
-    "\022\031\n\021table_type_filter\030\004 \003(\t\"\\\n\rTableMeta"
-    "data\022\024\n\014catalog_name\030\001 \001(\t\022\023\n\013schema_nam"
-    "e\030\002 \001(\t\022\022\n\ntable_name\030\003 \001(\t\022\014\n\004type\030\004 \001("
-    "\t\"\215\001\n\rGetTablesResp\022(\n\006status\030\001 \001(\0162\030.ex"
-    "ec.user.RequestStatus\022(\n\006tables\030\002 \003(\0132\030."
-    "exec.user.TableMetadata\022(\n\005error\030\003 \001(\0132\031"
-    ".exec.shared.DrillPBError\"\333\001\n\rGetColumns"
-    "Req\0222\n\023catalog_name_filter\030\001 \001(\0132\025.exec."
-    "user.LikeFilter\0221\n\022schema_name_filter\030\002 "
-    "\001(\0132\025.exec.user.LikeFilter\0220\n\021table_name"
-    "_filter\030\003 \001(\0132\025.exec.user.LikeFilter\0221\n\022"
-    "column_name_filter\030\004 \001(\0132\025.exec.user.Lik"
-    "eFilter\"\251\003\n\016ColumnMetadata\022\024\n\014catalog_na"
-    "me\030\001 \001(\t\022\023\n\013schema_name\030\002 \001(\t\022\022\n\ntable_n"
-    "ame\030\003 \001(\t\022\023\n\013column_name\030\004 \001(\t\022\030\n\020ordina"
-    "l_position\030\005 \001(\005\022\025\n\rdefault_value\030\006 \001(\t\022"
-    "\023\n\013is_nullable\030\007 \001(\010\022\021\n\tdata_type\030\010 \001(\t\022"
-    "\027\n\017char_max_length\030\t \001(\005\022\031\n\021char_octet_l"
-    "ength\030\n \001(\005\022\031\n\021numeric_precision\030\013 \001(\005\022\037"
-    "\n\027numeric_precision_radix\030\014 \001(\005\022\025\n\rnumer"
-    "ic_scale\030\r \001(\005\022\033\n\023date_time_precision\030\016 "
-    "\001(\005\022\025\n\rinterval_type\030\017 \001(\t\022\032\n\022interval_p"
-    "recision\030\020 \001(\005\022\023\n\013column_size\030\021 \001(\005\"\220\001\n\016"
-    "GetColumnsResp\022(\n\006status\030\001 \001(\0162\030.exec.us"
-    "er.RequestStatus\022*\n\007columns\030\002 \003(\0132\031.exec"
-    ".user.ColumnMetadata\022(\n\005error\030\003 \001(\0132\031.ex"
-    "ec.shared.DrillPBError\"/\n\032CreatePrepared"
-    "StatementReq\022\021\n\tsql_query\030\001 \001(\t\"\326\003\n\024Resu"
-    "ltColumnMetadata\022\024\n\014catalog_name\030\001 \001(\t\022\023"
-    "\n\013schema_name\030\002 \001(\t\022\022\n\ntable_name\030\003 \001(\t\022"
-    "\023\n\013column_name\030\004 \001(\t\022\r\n\005label\030\005 \001(\t\022\021\n\td"
-    "ata_type\030\006 \001(\t\022\023\n\013is_nullable\030\007 \001(\010\022\021\n\tp"
-    "recision\030\010 \001(\005\022\r\n\005scale\030\t \001(\005\022\016\n\006signed\030"
-    "\n \001(\010\022\024\n\014display_size\030\013 \001(\005\022\022\n\nis_aliase"
-    "d\030\014 \001(\010\0225\n\rsearchability\030\r \001(\0162\036.exec.us"
-    "er.ColumnSearchability\0223\n\014updatability\030\016"
-    " \001(\0162\035.exec.user.ColumnUpdatability\022\026\n\016a"
-    "uto_increment\030\017 \001(\010\022\030\n\020case_sensitivity\030"
-    "\020 \001(\010\022\020\n\010sortable\030\021 \001(\010\022\022\n\nclass_name\030\022 "
-    "\001(\t\022\023\n\013is_currency\030\024 \001(\010\".\n\027PreparedStat"
-    "ementHandle\022\023\n\013server_info\030\001 \001(\014\"\200\001\n\021Pre"
-    "paredStatement\0220\n\007columns\030\001 \003(\0132\037.exec.u"
-    "ser.ResultColumnMetadata\0229\n\rserver_handl"
-    "e\030\002 \001(\0132\".exec.user.PreparedStatementHan"
-    "dle\"\253\001\n\033CreatePreparedStatementResp\022(\n\006s"
-    "tatus\030\001 \001(\0162\030.exec.user.RequestStatus\0228\n"
-    "\022prepared_statement\030\002 \001(\0132\034.exec.user.Pr"
-    "eparedStatement\022(\n\005error\030\003 \001(\0132\031.exec.sh"
-    "ared.DrillPBError\"\353\001\n\010RunQuery\0221\n\014result"
-    "s_mode\030\001 \001(\0162\033.exec.user.QueryResultsMod"
-    "e\022$\n\004type\030\002 \001(\0162\026.exec.shared.QueryType\022"
-    "\014\n\004plan\030\003 \001(\t\0221\n\tfragments\030\004 \003(\0132\036.exec."
-    "bit.control.PlanFragment\022E\n\031prepared_sta"
-    "tement_handle\030\005 \001(\0132\".exec.user.Prepared"
-    "StatementHandle*\332\003\n\007RpcType\022\r\n\tHANDSHAKE"
-    "\020\000\022\007\n\003ACK\020\001\022\013\n\007GOODBYE\020\002\022\r\n\tRUN_QUERY\020\003\022"
-    "\020\n\014CANCEL_QUERY\020\004\022\023\n\017REQUEST_RESULTS\020\005\022\027"
-    "\n\023RESUME_PAUSED_QUERY\020\013\022\034\n\030GET_QUERY_PLA"
-    "N_FRAGMENTS\020\014\022\020\n\014GET_CATALOGS\020\016\022\017\n\013GET_S"
-    "CHEMAS\020\017\022\016\n\nGET_TABLES\020\020\022\017\n\013GET_COLUMNS\020"
-    "\021\022\035\n\031CREATE_PREPARED_STATEMENT\020\026\022\016\n\nQUER"
-    "Y_DATA\020\006\022\020\n\014QUERY_HANDLE\020\007\022\030\n\024QUERY_PLAN"
-    "_FRAGMENTS\020\r\022\014\n\010CATALOGS\020\022\022\013\n\007SCHEMAS\020\023\022"
-    "\n\n\006TABLES\020\024\022\013\n\007COLUMNS\020\025\022\026\n\022PREPARED_STA"
-    "TEMENT\020\027\022\026\n\022REQ_META_FUNCTIONS\020\010\022\026\n\022RESP"
-    "_FUNCTION_LIST\020\t\022\020\n\014QUERY_RESULT\020\n\022\020\n\014SA"
-    "SL_MESSAGE\020\030*6\n\013SaslSupport\022\030\n\024UNKNOWN_S"
-    "ASL_SUPPORT\020\000\022\r\n\tSASL_AUTH\020\001*#\n\020QueryRes"
-    "ultsMode\022\017\n\013STREAM_FULL\020\001*q\n\017HandshakeSt"
-    "atus\022\013\n\007SUCCESS\020\001\022\030\n\024RPC_VERSION_MISMATC"
-    "H\020\002\022\017\n\013AUTH_FAILED\020\003\022\023\n\017UNKNOWN_FAILURE\020"
-    "\004\022\021\n\rAUTH_REQUIRED\020\005*D\n\rRequestStatus\022\022\n"
-    "\016UNKNOWN_STATUS\020\000\022\006\n\002OK\020\001\022\n\n\006FAILED\020\002\022\013\n"
-    "\007TIMEOUT\020\003*Y\n\023ColumnSearchability\022\031\n\025UNK"
-    "NOWN_SEARCHABILITY\020\000\022\010\n\004NONE\020\001\022\010\n\004CHAR\020\002"
-    "\022\n\n\006NUMBER\020\003\022\007\n\003ALL\020\004*K\n\022ColumnUpdatabil"
-    "ity\022\030\n\024UNKNOWN_UPDATABILITY\020\000\022\r\n\tREAD_ON"
-    "LY\020\001\022\014\n\010WRITABLE\020\002B+\n\033org.apache.drill.e"
-    "xec.protoB\nUserProtosH\001", 5383);
+    "r\030\002 \001(\0132\025.exec.user.LikeFilter\"i\n\016Schema"
+    "Metadata\022\024\n\014catalog_name\030\001 \001(\t\022\023\n\013schema"
+    "_name\030\002 \001(\t\022\r\n\005owner\030\003 \001(\t\022\014\n\004type\030\004 \001(\t"
+    "\022\017\n\007mutable\030\005 \001(\t\"\220\001\n\016GetSchemasResp\022(\n\006"
+    "status\030\001 \001(\0162\030.exec.user.RequestStatus\022*"
+    "\n\007schemas\030\002 \003(\0132\031.exec.user.SchemaMetada"
+    "ta\022(\n\005error\030\003 \001(\0132\031.exec.shared.DrillPBE"
+    "rror\"\302\001\n\014GetTablesReq\0222\n\023catalog_name_fi"
+    "lter\030\001 \001(\0132\025.exec.user.LikeFilter\0221\n\022sch"
+    "ema_name_filter\030\002 \001(\0132\025.exec.user.LikeFi"
+    "lter\0220\n\021table_name_filter\030\003 \001(\0132\025.exec.u"
+    "ser.LikeFilter\022\031\n\021table_type_filter\030\004 \003("
+    "\t\"\\\n\rTableMetadata\022\024\n\014catalog_name\030\001 \001(\t"
+    "\022\023\n\013schema_name\030\002 \001(\t\022\022\n\ntable_name\030\003 \001("
+    "\t\022\014\n\004type\030\004 \001(\t\"\215\001\n\rGetTablesResp\022(\n\006sta"
+    "tus\030\001 \001(\0162\030.exec.user.RequestStatus\022(\n\006t"
+    "ables\030\002 \003(\0132\030.exec.user.TableMetadata\022(\n"
+    "\005error\030\003 \001(\0132\031.exec.shared.DrillPBError\""
+    "\333\001\n\rGetColumnsReq\0222\n\023catalog_name_filter"
+    "\030\001 \001(\0132\025.exec.user.LikeFilter\0221\n\022schema_"
+    "name_filter\030\002 \001(\0132\025.exec.user.LikeFilter"
+    "\0220\n\021table_name_filter\030\003 \001(\0132\025.exec.user."
+    "LikeFilter\0221\n\022column_name_filter\030\004 \001(\0132\025"
+    ".exec.user.LikeFilter\"\251\003\n\016ColumnMetadata"
+    "\022\024\n\014catalog_name\030\001 \001(\t\022\023\n\013schema_name\030\002 "
+    "\001(\t\022\022\n\ntable_name\030\003 \001(\t\022\023\n\013column_name\030\004"
+    " \001(\t\022\030\n\020ordinal_position\030\005 \001(\005\022\025\n\rdefaul"
+    "t_value\030\006 \001(\t\022\023\n\013is_nullable\030\007 \001(\010\022\021\n\tda"
+    "ta_type\030\010 \001(\t\022\027\n\017char_max_length\030\t \001(\005\022\031"
+    "\n\021char_octet_length\030\n \001(\005\022\031\n\021numeric_pre"
+    "cision\030\013 \001(\005\022\037\n\027numeric_precision_radix\030"
+    "\014 \001(\005\022\025\n\rnumeric_scale\030\r \001(\005\022\033\n\023date_tim"
+    "e_precision\030\016 \001(\005\022\025\n\rinterval_type\030\017 \001(\t"
+    "\022\032\n\022interval_precision\030\020 \001(\005\022\023\n\013column_s"
+    "ize\030\021 \001(\005\"\220\001\n\016GetColumnsResp\022(\n\006status\030\001"
+    " \001(\0162\030.exec.user.RequestStatus\022*\n\007column"
+    "s\030\002 \003(\0132\031.exec.user.ColumnMetadata\022(\n\005er"
+    "ror\030\003 \001(\0132\031.exec.shared.DrillPBError\"/\n\032"
+    "CreatePreparedStatementReq\022\021\n\tsql_query\030"
+    "\001 \001(\t\"\326\003\n\024ResultColumnMetadata\022\024\n\014catalo"
+    "g_name\030\001 \001(\t\022\023\n\013schema_name\030\002 \001(\t\022\022\n\ntab"
+    "le_name\030\003 \001(\t\022\023\n\013column_name\030\004 \001(\t\022\r\n\005la"
+    "bel\030\005 \001(\t\022\021\n\tdata_type\030\006 \001(\t\022\023\n\013is_nulla"
+    "ble\030\007 \001(\010\022\021\n\tprecision\030\010 \001(\005\022\r\n\005scale\030\t "
+    "\001(\005\022\016\n\006signed\030\n \001(\010\022\024\n\014display_size\030\013 \001("
+    "\005\022\022\n\nis_aliased\030\014 \001(\010\0225\n\rsearchability\030\r"
+    " \001(\0162\036.exec.user.ColumnSearchability\0223\n\014"
+    "updatability\030\016 \001(\0162\035.exec.user.ColumnUpd"
+    "atability\022\026\n\016auto_increment\030\017 \001(\010\022\030\n\020cas"
+    "e_sensitivity\030\020 \001(\010\022\020\n\010sortable\030\021 \001(\010\022\022\n"
+    "\nclass_name\030\022 \001(\t\022\023\n\013is_currency\030\024 \001(\010\"."
+    "\n\027PreparedStatementHandle\022\023\n\013server_info"
+    "\030\001 \001(\014\"\200\001\n\021PreparedStatement\0220\n\007columns\030"
+    "\001 \003(\0132\037.exec.user.ResultColumnMetadata\0229"
+    "\n\rserver_handle\030\002 \001(\0132\".exec.user.Prepar"
+    "edStatementHandle\"\253\001\n\033CreatePreparedStat"
+    "ementResp\022(\n\006status\030\001 \001(\0162\030.exec.user.Re"
+    "questStatus\0228\n\022prepared_statement\030\002 \001(\0132"
+    "\034.exec.user.PreparedStatement\022(\n\005error\030\003"
+    " \001(\0132\031.exec.shared.DrillPBError\"\353\001\n\010RunQ"
+    "uery\0221\n\014results_mode\030\001 \001(\0162\033.exec.user.Q"
+    "ueryResultsMode\022$\n\004type\030\002 \001(\0162\026.exec.sha"
+    "red.QueryType\022\014\n\004plan\030\003 \001(\t\0221\n\tfragments"
+    "\030\004 \003(\0132\036.exec.bit.control.PlanFragment\022E"
+    "\n\031prepared_statement_handle\030\005 \001(\0132\".exec"
+    ".user.PreparedStatementHandle*\332\003\n\007RpcTyp"
+    "e\022\r\n\tHANDSHAKE\020\000\022\007\n\003ACK\020\001\022\013\n\007GOODBYE\020\002\022\r"
+    "\n\tRUN_QUERY\020\003\022\020\n\014CANCEL_QUERY\020\004\022\023\n\017REQUE"
+    "ST_RESULTS\020\005\022\027\n\023RESUME_PAUSED_QUERY\020\013\022\034\n"
+    "\030GET_QUERY_PLAN_FRAGMENTS\020\014\022\020\n\014GET_CATAL"
+    "OGS\020\016\022\017\n\013GET_SCHEMAS\020\017\022\016\n\nGET_TABLES\020\020\022\017"
+    "\n\013GET_COLUMNS\020\021\022\035\n\031CREATE_PREPARED_STATE"
+    "MENT\020\026\022\016\n\nQUERY_DATA\020\006\022\020\n\014QUERY_HANDLE\020\007"
+    "\022\030\n\024QUERY_PLAN_FRAGMENTS\020\r\022\014\n\010CATALOGS\020\022"
+    "\022\013\n\007SCHEMAS\020\023\022\n\n\006TABLES\020\024\022\013\n\007COLUMNS\020\025\022\026"
+    "\n\022PREPARED_STATEMENT\020\027\022\026\n\022REQ_META_FUNCT"
+    "IONS\020\010\022\026\n\022RESP_FUNCTION_LIST\020\t\022\020\n\014QUERY_"
+    "RESULT\020\n\022\020\n\014SASL_MESSAGE\020\030*6\n\013SaslSuppor"
+    "t\022\030\n\024UNKNOWN_SASL_SUPPORT\020\000\022\r\n\tSASL_AUTH"
+    "\020\001*#\n\020QueryResultsMode\022\017\n\013STREAM_FULL\020\001*"
+    "q\n\017HandshakeStatus\022\013\n\007SUCCESS\020\001\022\030\n\024RPC_V"
+    "ERSION_MISMATCH\020\002\022\017\n\013AUTH_FAILED\020\003\022\023\n\017UN"
+    "KNOWN_FAILURE\020\004\022\021\n\rAUTH_REQUIRED\020\005*D\n\rRe"
+    "questStatus\022\022\n\016UNKNOWN_STATUS\020\000\022\006\n\002OK\020\001\022"
+    "\n\n\006FAILED\020\002\022\013\n\007TIMEOUT\020\003*Y\n\023ColumnSearch"
+    "ability\022\031\n\025UNKNOWN_SEARCHABILITY\020\000\022\010\n\004NO"
+    "NE\020\001\022\010\n\004CHAR\020\002\022\n\n\006NUMBER\020\003\022\007\n\003ALL\020\004*K\n\022C"
+    "olumnUpdatability\022\030\n\024UNKNOWN_UPDATABILIT"
+    "Y\020\000\022\r\n\tREAD_ONLY\020\001\022\014\n\010WRITABLE\020\002B+\n\033org."
+    "apache.drill.exec.protoB\nUserProtosH\001", 5477);
   ::google::protobuf::MessageFactory::InternalRegisterGeneratedFile(
     "User.proto", &protobuf_RegisterTypes);
   Property::default_instance_ = new Property();
@@ -1589,6 +1594,8 @@ const int RpcEndpointInfos::kMajorVersionFieldNumber;
 const int RpcEndpointInfos::kMinorVersionFieldNumber;
 const int RpcEndpointInfos::kPatchVersionFieldNumber;
 const int RpcEndpointInfos::kApplicationFieldNumber;
+const int RpcEndpointInfos::kBuildNumberFieldNumber;
+const int RpcEndpointInfos::kVersionQualifierFieldNumber;
 #endif  // !_MSC_VER
 
 RpcEndpointInfos::RpcEndpointInfos()
@@ -1613,6 +1620,8 @@ void RpcEndpointInfos::SharedCtor() {
   minorversion_ = 0u;
   patchversion_ = 0u;
   application_ = const_cast< ::std::string*>(&::google::protobuf::internal::kEmptyString);
+  buildnumber_ = 0u;
+  versionqualifier_ = const_cast< ::std::string*>(&::google::protobuf::internal::kEmptyString);
   ::memset(_has_bits_, 0, sizeof(_has_bits_));
 }
 
@@ -1630,6 +1639,9 @@ void RpcEndpointInfos::SharedDtor() {
   if (application_ != &::google::protobuf::internal::kEmptyString) {
     delete application_;
   }
+  if (versionqualifier_ != &::google::protobuf::internal::kEmptyString) {
+    delete versionqualifier_;
+  }
   if (this != default_instance_) {
   }
 }
@@ -1675,6 +1687,12 @@ void RpcEndpointInfos::Clear() {
         application_->clear();
       }
     }
+    buildnumber_ = 0u;
+    if (has_versionqualifier()) {
+      if (versionqualifier_ != &::google::protobuf::internal::kEmptyString) {
+        versionqualifier_->clear();
+      }
+    }
   }
   ::memset(_has_bits_, 0, sizeof(_has_bits_));
   mutable_unknown_fields()->Clear();
@@ -1780,6 +1798,39 @@ bool RpcEndpointInfos::MergePartialFromCodedStream(
         } else {
           goto handle_uninterpreted;
         }
+        if (input->ExpectTag(56)) goto parse_buildNumber;
+        break;
+      }
+
+      // optional uint32 buildNumber = 7;
+      case 7: {
+        if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+            ::google::protobuf::internal::WireFormatLite::WIRETYPE_VARINT) {
+         parse_buildNumber:
+          DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive<
+                   ::google::protobuf::uint32, ::google::protobuf::internal::WireFormatLite::TYPE_UINT32>(
+                 input, &buildnumber_)));
+          set_has_buildnumber();
+        } else {
+          goto handle_uninterpreted;
+        }
+        if (input->ExpectTag(66)) goto parse_versionQualifier;
+        break;
+      }
+
+      // optional string versionQualifier = 8;
+      case 8: {
+        if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+            ::google::protobuf::internal::WireFormatLite::WIRETYPE_LENGTH_DELIMITED) {
+         parse_versionQualifier:
+          DO_(::google::protobuf::internal::WireFormatLite::ReadString(
+                input, this->mutable_versionqualifier()));
+          ::google::protobuf::internal::WireFormat::VerifyUTF8String(
+            this->versionqualifier().data(), this->versionqualifier().length(),
+            ::google::protobuf::internal::WireFormat::PARSE);
+        } else {
+          goto handle_uninterpreted;
+        }
         if (input->ExpectAtEnd()) return true;
         break;
       }
@@ -1844,6 +1895,20 @@ void RpcEndpointInfos::SerializeWithCachedSizes(
       6, this->application(), output);
   }
 
+  // optional uint32 buildNumber = 7;
+  if (has_buildnumber()) {
+    ::google::protobuf::internal::WireFormatLite::WriteUInt32(7, this->buildnumber(), output);
+  }
+
+  // optional string versionQualifier = 8;
+  if (has_versionqualifier()) {
+    ::google::protobuf::internal::WireFormat::VerifyUTF8String(
+      this->versionqualifier().data(), this->versionqualifier().length(),
+      ::google::protobuf::internal::WireFormat::SERIALIZE);
+    ::google::protobuf::internal::WireFormatLite::WriteString(
+      8, this->versionqualifier(), output);
+  }
+
   if (!unknown_fields().empty()) {
     ::google::protobuf::internal::WireFormat::SerializeUnknownFields(
         unknown_fields(), output);
@@ -1897,6 +1962,21 @@ void RpcEndpointInfos::SerializeWithCachedSizes(
         6, this->application(), target);
   }
 
+  // optional uint32 buildNumber = 7;
+  if (has_buildnumber()) {
+    target = ::google::protobuf::internal::WireFormatLite::WriteUInt32ToArray(7, this->buildnumber(), target);
+  }
+
+  // optional string versionQualifier = 8;
+  if (has_versionqualifier()) {
+    ::google::protobuf::internal::WireFormat::VerifyUTF8String(
+      this->versionqualifier().data(), this->versionqualifier().length(),
+      ::google::protobuf::internal::WireFormat::SERIALIZE);
+    target =
+      ::google::protobuf::internal::WireFormatLite::WriteStringToArray(
+        8, this->versionqualifier(), target);
+  }
+
   if (!unknown_fields().empty()) {
     target = ::google::protobuf::internal::WireFormat::SerializeUnknownFieldsToArray(
         unknown_fields(), target);
@@ -1950,6 +2030,20 @@ int RpcEndpointInfos::ByteSize() const {
           this->application());
     }
 
+    // optional uint32 buildNumber = 7;
+    if (has_buildnumber()) {
+      total_size += 1 +
+        ::google::protobuf::internal::WireFormatLite::UInt32Size(
+          this->buildnumber());
+    }
+
+    // optional string versionQualifier = 8;
+    if (has_versionqualifier()) {
+      total_size += 1 +
+        ::google::protobuf::internal::WireFormatLite::StringSize(
+          this->versionqualifier());
+    }
+
   }
   if (!unknown_fields().empty()) {
     total_size +=
@@ -1995,6 +2089,12 @@ void RpcEndpointInfos::MergeFrom(const RpcEndpointInfos& from) {
     if (from.has_application()) {
       set_application(from.application());
     }
+    if (from.has_buildnumber()) {
+      set_buildnumber(from.buildnumber());
+    }
+    if (from.has_versionqualifier()) {
+      set_versionqualifier(from.versionqualifier());
+    }
   }
   mutable_unknown_fields()->MergeFrom(from.unknown_fields());
 }
@@ -2024,6 +2124,8 @@ void RpcEndpointInfos::Swap(RpcEndpointInfos* other) {
     std::swap(minorversion_, other->minorversion_);
     std::swap(patchversion_, other->patchversion_);
     std::swap(application_, other->application_);
+    std::swap(buildnumber_, other->buildnumber_);
+    std::swap(versionqualifier_, other->versionqualifier_);
     std::swap(_has_bits_[0], other->_has_bits_[0]);
     _unknown_fields_.Swap(&other->_unknown_fields_);
     std::swap(_cached_size_, other->_cached_size_);
@@ -3514,6 +3616,7 @@ const int BitToUserHandshake::kErrorIdFieldNumber;
 const int BitToUserHandshake::kErrorMessageFieldNumber;
 const int BitToUserHandshake::kServerInfosFieldNumber;
 const int BitToUserHandshake::kAuthenticationMechanismsFieldNumber;
+const int BitToUserHandshake::kSupportedMethodsFieldNumber;
 #endif  // !_MSC_VER
 
 BitToUserHandshake::BitToUserHandshake()
@@ -3597,6 +3700,7 @@ void BitToUserHandshake::Clear() {
     }
   }
   authenticationmechanisms_.Clear();
+  supported_methods_.Clear();
   ::memset(_has_bits_, 0, sizeof(_has_bits_));
   mutable_unknown_fields()->Clear();
 }
@@ -3706,6 +3810,35 @@ bool BitToUserHandshake::MergePartialFromCodedStream(
           goto handle_uninterpreted;
         }
         if (input->ExpectTag(58)) goto parse_authenticationMechanisms;
+        if (input->ExpectTag(64)) goto parse_supported_methods;
+        break;
+      }
+
+      // repeated .exec.user.RpcType supported_methods = 8;
+      case 8: {
+        if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+            ::google::protobuf::internal::WireFormatLite::WIRETYPE_VARINT) {
+         parse_supported_methods:
+          int value;
+          DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive<
+                   int, ::google::protobuf::internal::WireFormatLite::TYPE_ENUM>(
+                 input, &value)));
+          if (::exec::user::RpcType_IsValid(value)) {
+            add_supported_methods(static_cast< ::exec::user::RpcType >(value));
+          } else {
+            mutable_unknown_fields()->AddVarint(8, value);
+          }
+        } else if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag)
+                   == ::google::protobuf::internal::WireFormatLite::
+                      WIRETYPE_LENGTH_DELIMITED) {
+          DO_((::google::protobuf::internal::WireFormatLite::ReadPackedEnumNoInline(
+                 input,
+                 &::exec::user::RpcType_IsValid,
+                 this->mutable_supported_methods())));
+        } else {
+          goto handle_uninterpreted;
+        }
+        if (input->ExpectTag(64)) goto parse_supported_methods;
         if (input->ExpectAtEnd()) return true;
         break;
       }
@@ -3772,6 +3905,12 @@ void BitToUserHandshake::SerializeWithCachedSizes(
       7, this->authenticationmechanisms(i), output);
   }
 
+  // repeated .exec.user.RpcType supported_methods = 8;
+  for (int i = 0; i < this->supported_methods_size(); i++) {
+    ::google::protobuf::internal::WireFormatLite::WriteEnum(
+      8, this->supported_methods(i), output);
+  }
+
   if (!unknown_fields().empty()) {
     ::google::protobuf::internal::WireFormat::SerializeUnknownFields(
         unknown_fields(), output);
@@ -3827,6 +3966,12 @@ void BitToUserHandshake::SerializeWithCachedSizes(
       WriteStringToArray(7, this->authenticationmechanisms(i), target);
   }
 
+  // repeated .exec.user.RpcType supported_methods = 8;
+  for (int i = 0; i < this->supported_methods_size(); i++) {
+    target = ::google::protobuf::internal::WireFormatLite::WriteEnumToArray(
+      8, this->supported_methods(i), target);
+  }
+
   if (!unknown_fields().empty()) {
     target = ::google::protobuf::internal::WireFormat::SerializeUnknownFieldsToArray(
         unknown_fields(), target);
@@ -3880,6 +4025,16 @@ int BitToUserHandshake::ByteSize() const {
       this->authenticationmechanisms(i));
   }
 
+  // repeated .exec.user.RpcType supported_methods = 8;
+  {
+    int data_size = 0;
+    for (int i = 0; i < this->supported_methods_size(); i++) {
+      data_size += ::google::protobuf::internal::WireFormatLite::EnumSize(
+        this->supported_methods(i));
+    }
+    total_size += 1 * this->supported_methods_size() + data_size;
+  }
+
   if (!unknown_fields().empty()) {
     total_size +=
       ::google::protobuf::internal::WireFormat::ComputeUnknownFieldsSize(
@@ -3906,6 +4061,7 @@ void BitToUserHandshake::MergeFrom(const ::google::protobuf::Message& from) {
 void BitToUserHandshake::MergeFrom(const BitToUserHandshake& from) {
   GOOGLE_CHECK_NE(&from, this);
   authenticationmechanisms_.MergeFrom(from.authenticationmechanisms_);
+  supported_methods_.MergeFrom(from.supported_methods_);
   if (from._has_bits_[0 / 32] & (0xffu << (0 % 32))) {
     if (from.has_rpc_version()) {
       set_rpc_version(from.rpc_version());
@@ -3951,6 +4107,7 @@ void BitToUserHandshake::Swap(BitToUserHandshake* other) {
     std::swap(errormessage_, other->errormessage_);
     std::swap(server_infos_, other->server_infos_);
     authenticationmechanisms_.Swap(&other->authenticationmechanisms_);
+    supported_methods_.Swap(&other->supported_methods_);
     std::swap(_has_bits_[0], other->_has_bits_[0]);
     _unknown_fields_.Swap(&other->_unknown_fields_);
     std::swap(_cached_size_, other->_cached_size_);

http://git-wip-us.apache.org/repos/asf/drill/blob/16aa0810/contrib/native/client/src/protobuf/User.pb.h
----------------------------------------------------------------------
diff --git a/contrib/native/client/src/protobuf/User.pb.h b/contrib/native/client/src/protobuf/User.pb.h
index bd050ee..d332c36 100644
--- a/contrib/native/client/src/protobuf/User.pb.h
+++ b/contrib/native/client/src/protobuf/User.pb.h
@@ -532,6 +532,25 @@ class RpcEndpointInfos : public ::google::protobuf::Message {
   inline ::std::string* release_application();
   inline void set_allocated_application(::std::string* application);
 
+  // optional uint32 buildNumber = 7;
+  inline bool has_buildnumber() const;
+  inline void clear_buildnumber();
+  static const int kBuildNumberFieldNumber = 7;
+  inline ::google::protobuf::uint32 buildnumber() const;
+  inline void set_buildnumber(::google::protobuf::uint32 value);
+
+  // optional string versionQualifier = 8;
+  inline bool has_versionqualifier() const;
+  inline void clear_versionqualifier();
+  static const int kVersionQualifierFieldNumber = 8;
+  inline const ::std::string& versionqualifier() const;
+  inline void set_versionqualifier(const ::std::string& value);
+  inline void set_versionqualifier(const char* value);
+  inline void set_versionqualifier(const char* value, size_t size);
+  inline ::std::string* mutable_versionqualifier();
+  inline ::std::string* release_versionqualifier();
+  inline void set_allocated_versionqualifier(::std::string* versionqualifier);
+
   // @@protoc_insertion_point(class_scope:exec.user.RpcEndpointInfos)
  private:
   inline void set_has_name();
@@ -546,6 +565,10 @@ class RpcEndpointInfos : public ::google::protobuf::Message {
   inline void clear_has_patchversion();
   inline void set_has_application();
   inline void clear_has_application();
+  inline void set_has_buildnumber();
+  inline void clear_has_buildnumber();
+  inline void set_has_versionqualifier();
+  inline void clear_has_versionqualifier();
 
   ::google::protobuf::UnknownFieldSet _unknown_fields_;
 
@@ -555,9 +578,11 @@ class RpcEndpointInfos : public ::google::protobuf::Message {
   ::google::protobuf::uint32 minorversion_;
   ::std::string* application_;
   ::google::protobuf::uint32 patchversion_;
+  ::google::protobuf::uint32 buildnumber_;
+  ::std::string* versionqualifier_;
 
   mutable int _cached_size_;
-  ::google::protobuf::uint32 _has_bits_[(6 + 31) / 32];
+  ::google::protobuf::uint32 _has_bits_[(8 + 31) / 32];
 
   friend void  protobuf_AddDesc_User_2eproto();
   friend void protobuf_AssignDesc_User_2eproto();
@@ -1173,6 +1198,16 @@ class BitToUserHandshake : public ::google::protobuf::Message {
   inline const ::google::protobuf::RepeatedPtrField< ::std::string>& authenticationmechanisms() const;
   inline ::google::protobuf::RepeatedPtrField< ::std::string>* mutable_authenticationmechanisms();
 
+  // repeated .exec.user.RpcType supported_methods = 8;
+  inline int supported_methods_size() const;
+  inline void clear_supported_methods();
+  static const int kSupportedMethodsFieldNumber = 8;
+  inline ::exec::user::RpcType supported_methods(int index) const;
+  inline void set_supported_methods(int index, ::exec::user::RpcType value);
+  inline void add_supported_methods(::exec::user::RpcType value);
+  inline const ::google::protobuf::RepeatedField<int>& supported_methods() const;
+  inline ::google::protobuf::RepeatedField<int>* mutable_supported_methods();
+
   // @@protoc_insertion_point(class_scope:exec.user.BitToUserHandshake)
  private:
   inline void set_has_rpc_version();
@@ -1194,9 +1229,10 @@ class BitToUserHandshake : public ::google::protobuf::Message {
   ::std::string* errormessage_;
   ::exec::user::RpcEndpointInfos* server_infos_;
   ::google::protobuf::RepeatedPtrField< ::std::string> authenticationmechanisms_;
+  ::google::protobuf::RepeatedField<int> supported_methods_;
 
   mutable int _cached_size_;
-  ::google::protobuf::uint32 _has_bits_[(6 + 31) / 32];
+  ::google::protobuf::uint32 _has_bits_[(7 + 31) / 32];
 
   friend void  protobuf_AddDesc_User_2eproto();
   friend void protobuf_AssignDesc_User_2eproto();
@@ -4095,6 +4131,98 @@ inline void RpcEndpointInfos::set_allocated_application(::std::string* applicati
   }
 }
 
+// optional uint32 buildNumber = 7;
+inline bool RpcEndpointInfos::has_buildnumber() const {
+  return (_has_bits_[0] & 0x00000040u) != 0;
+}
+inline void RpcEndpointInfos::set_has_buildnumber() {
+  _has_bits_[0] |= 0x00000040u;
+}
+inline void RpcEndpointInfos::clear_has_buildnumber() {
+  _has_bits_[0] &= ~0x00000040u;
+}
+inline void RpcEndpointInfos::clear_buildnumber() {
+  buildnumber_ = 0u;
+  clear_has_buildnumber();
+}
+inline ::google::protobuf::uint32 RpcEndpointInfos::buildnumber() const {
+  return buildnumber_;
+}
+inline void RpcEndpointInfos::set_buildnumber(::google::protobuf::uint32 value) {
+  set_has_buildnumber();
+  buildnumber_ = value;
+}
+
+// optional string versionQualifier = 8;
+inline bool RpcEndpointInfos::has_versionqualifier() const {
+  return (_has_bits_[0] & 0x00000080u) != 0;
+}
+inline void RpcEndpointInfos::set_has_versionqualifier() {
+  _has_bits_[0] |= 0x00000080u;
+}
+inline void RpcEndpointInfos::clear_has_versionqualifier() {
+  _has_bits_[0] &= ~0x00000080u;
+}
+inline void RpcEndpointInfos::clear_versionqualifier() {
+  if (versionqualifier_ != &::google::protobuf::internal::kEmptyString) {
+    versionqualifier_->clear();
+  }
+  clear_has_versionqualifier();
+}
+inline const ::std::string& RpcEndpointInfos::versionqualifier() const {
+  return *versionqualifier_;
+}
+inline void RpcEndpointInfos::set_versionqualifier(const ::std::string& value) {
+  set_has_versionqualifier();
+  if (versionqualifier_ == &::google::protobuf::internal::kEmptyString) {
+    versionqualifier_ = new ::std::string;
+  }
+  versionqualifier_->assign(value);
+}
+inline void RpcEndpointInfos::set_versionqualifier(const char* value) {
+  set_has_versionqualifier();
+  if (versionqualifier_ == &::google::protobuf::internal::kEmptyString) {
+    versionqualifier_ = new ::std::string;
+  }
+  versionqualifier_->assign(value);
+}
+inline void RpcEndpointInfos::set_versionqualifier(const char* value, size_t size) {
+  set_has_versionqualifier();
+  if (versionqualifier_ == &::google::protobuf::internal::kEmptyString) {
+    versionqualifier_ = new ::std::string;
+  }
+  versionqualifier_->assign(reinterpret_cast<const char*>(value), size);
+}
+inline ::std::string* RpcEndpointInfos::mutable_versionqualifier() {
+  set_has_versionqualifier();
+  if (versionqualifier_ == &::google::protobuf::internal::kEmptyString) {
+    versionqualifier_ = new ::std::string;
+  }
+  return versionqualifier_;
+}
+inline ::std::string* RpcEndpointInfos::release_versionqualifier() {
+  clear_has_versionqualifier();
+  if (versionqualifier_ == &::google::protobuf::internal::kEmptyString) {
+    return NULL;
+  } else {
+    ::std::string* temp = versionqualifier_;
+    versionqualifier_ = const_cast< ::std::string*>(&::google::protobuf::internal::kEmptyString);
+    return temp;
+  }
+}
+inline void RpcEndpointInfos::set_allocated_versionqualifier(::std::string* versionqualifier) {
+  if (versionqualifier_ != &::google::protobuf::internal::kEmptyString) {
+    delete versionqualifier_;
+  }
+  if (versionqualifier) {
+    set_has_versionqualifier();
+    versionqualifier_ = versionqualifier;
+  } else {
+    clear_has_versionqualifier();
+    versionqualifier_ = const_cast< ::std::string*>(&::google::protobuf::internal::kEmptyString);
+  }
+}
+
 // -------------------------------------------------------------------
 
 // UserToBitHandshake
@@ -4929,6 +5057,33 @@ BitToUserHandshake::mutable_authenticationmechanisms() {
   return &authenticationmechanisms_;
 }
 
+// repeated .exec.user.RpcType supported_methods = 8;
+inline int BitToUserHandshake::supported_methods_size() const {
+  return supported_methods_.size();
+}
+inline void BitToUserHandshake::clear_supported_methods() {
+  supported_methods_.Clear();
+}
+inline ::exec::user::RpcType BitToUserHandshake::supported_methods(int index) const {
+  return static_cast< ::exec::user::RpcType >(supported_methods_.Get(index));
+}
+inline void BitToUserHandshake::set_supported_methods(int index, ::exec::user::RpcType value) {
+  assert(::exec::user::RpcType_IsValid(value));
+  supported_methods_.Set(index, value);
+}
+inline void BitToUserHandshake::add_supported_methods(::exec::user::RpcType value) {
+  assert(::exec::user::RpcType_IsValid(value));
+  supported_methods_.Add(value);
+}
+inline const ::google::protobuf::RepeatedField<int>&
+BitToUserHandshake::supported_methods() const {
+  return supported_methods_;
+}
+inline ::google::protobuf::RepeatedField<int>*
+BitToUserHandshake::mutable_supported_methods() {
+  return &supported_methods_;
+}
+
 // -------------------------------------------------------------------
 
 // LikeFilter

http://git-wip-us.apache.org/repos/asf/drill/blob/16aa0810/contrib/native/client/src/protobuf/UserBitShared.pb.cc
----------------------------------------------------------------------
diff --git a/contrib/native/client/src/protobuf/UserBitShared.pb.cc b/contrib/native/client/src/protobuf/UserBitShared.pb.cc
index 2fcf116..0745228 100644
--- a/contrib/native/client/src/protobuf/UserBitShared.pb.cc
+++ b/contrib/native/client/src/protobuf/UserBitShared.pb.cc
@@ -339,7 +339,7 @@ void protobuf_AssignDesc_UserBitShared_2eproto() {
       ::google::protobuf::MessageFactory::generated_factory(),
       sizeof(QueryInfo));
   QueryProfile_descriptor_ = file->message_type(13);
-  static const int QueryProfile_offsets_[17] = {
+  static const int QueryProfile_offsets_[19] = {
     GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(QueryProfile, id_),
     GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(QueryProfile, type_),
     GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(QueryProfile, start_),
@@ -357,6 +357,8 @@ void protobuf_AssignDesc_UserBitShared_2eproto() {
     GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(QueryProfile, error_id_),
     GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(QueryProfile, error_node_),
     GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(QueryProfile, options_json_),
+    GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(QueryProfile, planend_),
+    GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(QueryProfile, queuewaitend_),
   };
   QueryProfile_reflection_ =
     new ::google::protobuf::internal::GeneratedMessageReflection(
@@ -690,7 +692,7 @@ void protobuf_AddDesc_UserBitShared_2eproto() {
     "rt\030\002 \001(\003\0222\n\005state\030\003 \001(\0162#.exec.shared.Qu"
     "eryResult.QueryState\022\017\n\004user\030\004 \001(\t:\001-\022\'\n"
     "\007foreman\030\005 \001(\0132\026.exec.DrillbitEndpoint\022\024"
-    "\n\014options_json\030\006 \001(\t\"\320\003\n\014QueryProfile\022 \n"
+    "\n\014options_json\030\006 \001(\t\"\367\003\n\014QueryProfile\022 \n"
     "\002id\030\001 \001(\0132\024.exec.shared.QueryId\022$\n\004type\030"
     "\002 \001(\0162\026.exec.shared.QueryType\022\r\n\005start\030\003"
     " \001(\003\022\013\n\003end\030\004 \001(\003\022\r\n\005query\030\005 \001(\t\022\014\n\004plan"
@@ -702,63 +704,63 @@ void protobuf_AddDesc_UserBitShared_2eproto() {
     "agmentProfile\022\017\n\004user\030\014 \001(\t:\001-\022\r\n\005error\030"
     "\r \001(\t\022\024\n\014verboseError\030\016 \001(\t\022\020\n\010error_id\030"
     "\017 \001(\t\022\022\n\nerror_node\030\020 \001(\t\022\024\n\014options_jso"
-    "n\030\021 \001(\t\"t\n\024MajorFragmentProfile\022\031\n\021major"
-    "_fragment_id\030\001 \001(\005\022A\n\026minor_fragment_pro"
-    "file\030\002 \003(\0132!.exec.shared.MinorFragmentPr"
-    "ofile\"\350\002\n\024MinorFragmentProfile\022)\n\005state\030"
-    "\001 \001(\0162\032.exec.shared.FragmentState\022(\n\005err"
-    "or\030\002 \001(\0132\031.exec.shared.DrillPBError\022\031\n\021m"
-    "inor_fragment_id\030\003 \001(\005\0226\n\020operator_profi"
-    "le\030\004 \003(\0132\034.exec.shared.OperatorProfile\022\022"
-    "\n\nstart_time\030\005 \001(\003\022\020\n\010end_time\030\006 \001(\003\022\023\n\013"
-    "memory_used\030\007 \001(\003\022\027\n\017max_memory_used\030\010 \001"
-    "(\003\022(\n\010endpoint\030\t \001(\0132\026.exec.DrillbitEndp"
-    "oint\022\023\n\013last_update\030\n \001(\003\022\025\n\rlast_progre"
-    "ss\030\013 \001(\003\"\377\001\n\017OperatorProfile\0221\n\rinput_pr"
-    "ofile\030\001 \003(\0132\032.exec.shared.StreamProfile\022"
-    "\023\n\013operator_id\030\003 \001(\005\022\025\n\roperator_type\030\004 "
-    "\001(\005\022\023\n\013setup_nanos\030\005 \001(\003\022\025\n\rprocess_nano"
-    "s\030\006 \001(\003\022#\n\033peak_local_memory_allocated\030\007"
-    " \001(\003\022(\n\006metric\030\010 \003(\0132\030.exec.shared.Metri"
-    "cValue\022\022\n\nwait_nanos\030\t \001(\003\"B\n\rStreamProf"
-    "ile\022\017\n\007records\030\001 \001(\003\022\017\n\007batches\030\002 \001(\003\022\017\n"
-    "\007schemas\030\003 \001(\003\"J\n\013MetricValue\022\021\n\tmetric_"
-    "id\030\001 \001(\005\022\022\n\nlong_value\030\002 \001(\003\022\024\n\014double_v"
-    "alue\030\003 \001(\001\")\n\010Registry\022\035\n\003jar\030\001 \003(\0132\020.ex"
-    "ec.shared.Jar\"/\n\003Jar\022\014\n\004name\030\001 \001(\t\022\032\n\022fu"
-    "nction_signature\030\002 \003(\t\"W\n\013SaslMessage\022\021\n"
-    "\tmechanism\030\001 \001(\t\022\014\n\004data\030\002 \001(\014\022\'\n\006status"
-    "\030\003 \001(\0162\027.exec.shared.SaslStatus*5\n\nRpcCh"
-    "annel\022\017\n\013BIT_CONTROL\020\000\022\014\n\010BIT_DATA\020\001\022\010\n\004"
-    "USER\020\002*V\n\tQueryType\022\007\n\003SQL\020\001\022\013\n\007LOGICAL\020"
-    "\002\022\014\n\010PHYSICAL\020\003\022\r\n\tEXECUTION\020\004\022\026\n\022PREPAR"
-    "ED_STATEMENT\020\005*\207\001\n\rFragmentState\022\013\n\007SEND"
-    "ING\020\000\022\027\n\023AWAITING_ALLOCATION\020\001\022\013\n\007RUNNIN"
-    "G\020\002\022\014\n\010FINISHED\020\003\022\r\n\tCANCELLED\020\004\022\n\n\006FAIL"
-    "ED\020\005\022\032\n\026CANCELLATION_REQUESTED\020\006*\335\005\n\020Cor"
-    "eOperatorType\022\021\n\rSINGLE_SENDER\020\000\022\024\n\020BROA"
-    "DCAST_SENDER\020\001\022\n\n\006FILTER\020\002\022\022\n\016HASH_AGGRE"
-    "GATE\020\003\022\r\n\tHASH_JOIN\020\004\022\016\n\nMERGE_JOIN\020\005\022\031\n"
-    "\025HASH_PARTITION_SENDER\020\006\022\t\n\005LIMIT\020\007\022\024\n\020M"
-    "ERGING_RECEIVER\020\010\022\034\n\030ORDERED_PARTITION_S"
-    "ENDER\020\t\022\013\n\007PROJECT\020\n\022\026\n\022UNORDERED_RECEIV"
-    "ER\020\013\022\020\n\014RANGE_SENDER\020\014\022\n\n\006SCREEN\020\r\022\034\n\030SE"
-    "LECTION_VECTOR_REMOVER\020\016\022\027\n\023STREAMING_AG"
-    "GREGATE\020\017\022\016\n\nTOP_N_SORT\020\020\022\021\n\rEXTERNAL_SO"
-    "RT\020\021\022\t\n\005TRACE\020\022\022\t\n\005UNION\020\023\022\014\n\010OLD_SORT\020\024"
-    "\022\032\n\026PARQUET_ROW_GROUP_SCAN\020\025\022\021\n\rHIVE_SUB"
-    "_SCAN\020\026\022\025\n\021SYSTEM_TABLE_SCAN\020\027\022\021\n\rMOCK_S"
-    "UB_SCAN\020\030\022\022\n\016PARQUET_WRITER\020\031\022\023\n\017DIRECT_"
-    "SUB_SCAN\020\032\022\017\n\013TEXT_WRITER\020\033\022\021\n\rTEXT_SUB_"
-    "SCAN\020\034\022\021\n\rJSON_SUB_SCAN\020\035\022\030\n\024INFO_SCHEMA"
-    "_SUB_SCAN\020\036\022\023\n\017COMPLEX_TO_JSON\020\037\022\025\n\021PROD"
-    "UCER_CONSUMER\020 \022\022\n\016HBASE_SUB_SCAN\020!\022\n\n\006W"
-    "INDOW\020\"\022\024\n\020NESTED_LOOP_JOIN\020#\022\021\n\rAVRO_SU"
-    "B_SCAN\020$*g\n\nSaslStatus\022\020\n\014SASL_UNKNOWN\020\000"
-    "\022\016\n\nSASL_START\020\001\022\024\n\020SASL_IN_PROGRESS\020\002\022\020"
-    "\n\014SASL_SUCCESS\020\003\022\017\n\013SASL_FAILED\020\004B.\n\033org"
-    ".apache.drill.exec.protoB\rUserBitSharedH"
-    "\001", 4801);
+    "n\030\021 \001(\t\022\017\n\007planEnd\030\022 \001(\003\022\024\n\014queueWaitEnd"
+    "\030\023 \001(\003\"t\n\024MajorFragmentProfile\022\031\n\021major_"
+    "fragment_id\030\001 \001(\005\022A\n\026minor_fragment_prof"
+    "ile\030\002 \003(\0132!.exec.shared.MinorFragmentPro"
+    "file\"\350\002\n\024MinorFragmentProfile\022)\n\005state\030\001"
+    " \001(\0162\032.exec.shared.FragmentState\022(\n\005erro"
+    "r\030\002 \001(\0132\031.exec.shared.DrillPBError\022\031\n\021mi"
+    "nor_fragment_id\030\003 \001(\005\0226\n\020operator_profil"
+    "e\030\004 \003(\0132\034.exec.shared.OperatorProfile\022\022\n"
+    "\nstart_time\030\005 \001(\003\022\020\n\010end_time\030\006 \001(\003\022\023\n\013m"
+    "emory_used\030\007 \001(\003\022\027\n\017max_memory_used\030\010 \001("
+    "\003\022(\n\010endpoint\030\t \001(\0132\026.exec.DrillbitEndpo"
+    "int\022\023\n\013last_update\030\n \001(\003\022\025\n\rlast_progres"
+    "s\030\013 \001(\003\"\377\001\n\017OperatorProfile\0221\n\rinput_pro"
+    "file\030\001 \003(\0132\032.exec.shared.StreamProfile\022\023"
+    "\n\013operator_id\030\003 \001(\005\022\025\n\roperator_type\030\004 \001"
+    "(\005\022\023\n\013setup_nanos\030\005 \001(\003\022\025\n\rprocess_nanos"
+    "\030\006 \001(\003\022#\n\033peak_local_memory_allocated\030\007 "
+    "\001(\003\022(\n\006metric\030\010 \003(\0132\030.exec.shared.Metric"
+    "Value\022\022\n\nwait_nanos\030\t \001(\003\"B\n\rStreamProfi"
+    "le\022\017\n\007records\030\001 \001(\003\022\017\n\007batches\030\002 \001(\003\022\017\n\007"
+    "schemas\030\003 \001(\003\"J\n\013MetricValue\022\021\n\tmetric_i"
+    "d\030\001 \001(\005\022\022\n\nlong_value\030\002 \001(\003\022\024\n\014double_va"
+    "lue\030\003 \001(\001\")\n\010Registry\022\035\n\003jar\030\001 \003(\0132\020.exe"
+    "c.shared.Jar\"/\n\003Jar\022\014\n\004name\030\001 \001(\t\022\032\n\022fun"
+    "ction_signature\030\002 \003(\t\"W\n\013SaslMessage\022\021\n\t"
+    "mechanism\030\001 \001(\t\022\014\n\004data\030\002 \001(\014\022\'\n\006status\030"
+    "\003 \001(\0162\027.exec.shared.SaslStatus*5\n\nRpcCha"
+    "nnel\022\017\n\013BIT_CONTROL\020\000\022\014\n\010BIT_DATA\020\001\022\010\n\004U"
+    "SER\020\002*V\n\tQueryType\022\007\n\003SQL\020\001\022\013\n\007LOGICAL\020\002"
+    "\022\014\n\010PHYSICAL\020\003\022\r\n\tEXECUTION\020\004\022\026\n\022PREPARE"
+    "D_STATEMENT\020\005*\207\001\n\rFragmentState\022\013\n\007SENDI"
+    "NG\020\000\022\027\n\023AWAITING_ALLOCATION\020\001\022\013\n\007RUNNING"
+    "\020\002\022\014\n\010FINISHED\020\003\022\r\n\tCANCELLED\020\004\022\n\n\006FAILE"
+    "D\020\005\022\032\n\026CANCELLATION_REQUESTED\020\006*\335\005\n\020Core"
+    "OperatorType\022\021\n\rSINGLE_SENDER\020\000\022\024\n\020BROAD"
+    "CAST_SENDER\020\001\022\n\n\006FILTER\020\002\022\022\n\016HASH_AGGREG"
+    "ATE\020\003\022\r\n\tHASH_JOIN\020\004\022\016\n\nMERGE_JOIN\020\005\022\031\n\025"
+    "HASH_PARTITION_SENDER\020\006\022\t\n\005LIMIT\020\007\022\024\n\020ME"
+    "RGING_RECEIVER\020\010\022\034\n\030ORDERED_PARTITION_SE"
+    "NDER\020\t\022\013\n\007PROJECT\020\n\022\026\n\022UNORDERED_RECEIVE"
+    "R\020\013\022\020\n\014RANGE_SENDER\020\014\022\n\n\006SCREEN\020\r\022\034\n\030SEL"
+    "ECTION_VECTOR_REMOVER\020\016\022\027\n\023STREAMING_AGG"
+    "REGATE\020\017\022\016\n\nTOP_N_SORT\020\020\022\021\n\rEXTERNAL_SOR"
+    "T\020\021\022\t\n\005TRACE\020\022\022\t\n\005UNION\020\023\022\014\n\010OLD_SORT\020\024\022"
+    "\032\n\026PARQUET_ROW_GROUP_SCAN\020\025\022\021\n\rHIVE_SUB_"
+    "SCAN\020\026\022\025\n\021SYSTEM_TABLE_SCAN\020\027\022\021\n\rMOCK_SU"
+    "B_SCAN\020\030\022\022\n\016PARQUET_WRITER\020\031\022\023\n\017DIRECT_S"
+    "UB_SCAN\020\032\022\017\n\013TEXT_WRITER\020\033\022\021\n\rTEXT_SUB_S"
+    "CAN\020\034\022\021\n\rJSON_SUB_SCAN\020\035\022\030\n\024INFO_SCHEMA_"
+    "SUB_SCAN\020\036\022\023\n\017COMPLEX_TO_JSON\020\037\022\025\n\021PRODU"
+    "CER_CONSUMER\020 \022\022\n\016HBASE_SUB_SCAN\020!\022\n\n\006WI"
+    "NDOW\020\"\022\024\n\020NESTED_LOOP_JOIN\020#\022\021\n\rAVRO_SUB"
+    "_SCAN\020$*g\n\nSaslStatus\022\020\n\014SASL_UNKNOWN\020\000\022"
+    "\016\n\nSASL_START\020\001\022\024\n\020SASL_IN_PROGRESS\020\002\022\020\n"
+    "\014SASL_SUCCESS\020\003\022\017\n\013SASL_FAILED\020\004B.\n\033org."
+    "apache.drill.exec.protoB\rUserBitSharedH\001", 4840);
   ::google::protobuf::MessageFactory::InternalRegisterGeneratedFile(
     "UserBitShared.proto", &protobuf_RegisterTypes);
   UserCredentials::default_instance_ = new UserCredentials();
@@ -5423,6 +5425,8 @@ const int QueryProfile::kVerboseErrorFieldNumber;
 const int QueryProfile::kErrorIdFieldNumber;
 const int QueryProfile::kErrorNodeFieldNumber;
 const int QueryProfile::kOptionsJsonFieldNumber;
+const int QueryProfile::kPlanEndFieldNumber;
+const int QueryProfile::kQueueWaitEndFieldNumber;
 #endif  // !_MSC_VER
 
 QueryProfile::QueryProfile()
@@ -5459,6 +5463,8 @@ void QueryProfile::SharedCtor() {
   error_id_ = const_cast< ::std::string*>(&::google::protobuf::internal::kEmptyString);
   error_node_ = const_cast< ::std::string*>(&::google::protobuf::internal::kEmptyString);
   options_json_ = const_cast< ::std::string*>(&::google::protobuf::internal::kEmptyString);
+  planend_ = GOOGLE_LONGLONG(0);
+  queuewaitend_ = GOOGLE_LONGLONG(0);
   ::memset(_has_bits_, 0, sizeof(_has_bits_));
 }
 
@@ -5576,6 +5582,8 @@ void QueryProfile::Clear() {
         options_json_->clear();
       }
     }
+    planend_ = GOOGLE_LONGLONG(0);
+    queuewaitend_ = GOOGLE_LONGLONG(0);
   }
   fragment_profile_.Clear();
   ::memset(_has_bits_, 0, sizeof(_has_bits_));
@@ -5868,6 +5876,38 @@ bool QueryProfile::MergePartialFromCodedStream(
         } else {
           goto handle_uninterpreted;
         }
+        if (input->ExpectTag(144)) goto parse_planEnd;
+        break;
+      }
+
+      // optional int64 planEnd = 18;
+      case 18: {
+        if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+            ::google::protobuf::internal::WireFormatLite::WIRETYPE_VARINT) {
+         parse_planEnd:
+          DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive<
+                   ::google::protobuf::int64, ::google::protobuf::internal::WireFormatLite::TYPE_INT64>(
+                 input, &planend_)));
+          set_has_planend();
+        } else {
+          goto handle_uninterpreted;
+        }
+        if (input->ExpectTag(152)) goto parse_queueWaitEnd;
+        break;
+      }
+
+      // optional int64 queueWaitEnd = 19;
+      case 19: {
+        if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+            ::google::protobuf::internal::WireFormatLite::WIRETYPE_VARINT) {
+         parse_queueWaitEnd:
+          DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive<
+                   ::google::protobuf::int64, ::google::protobuf::internal::WireFormatLite::TYPE_INT64>(
+                 input, &queuewaitend_)));
+          set_has_queuewaitend();
+        } else {
+          goto handle_uninterpreted;
+        }
         if (input->ExpectAtEnd()) return true;
         break;
       }
@@ -6012,6 +6052,16 @@ void QueryProfile::SerializeWithCachedSizes(
       17, this->options_json(), output);
   }
 
+  // optional int64 planEnd = 18;
+  if (has_planend()) {
+    ::google::protobuf::internal::WireFormatLite::WriteInt64(18, this->planend(), output);
+  }
+
+  // optional int64 queueWaitEnd = 19;
+  if (has_queuewaitend()) {
+    ::google::protobuf::internal::WireFormatLite::WriteInt64(19, this->queuewaitend(), output);
+  }
+
   if (!unknown_fields().empty()) {
     ::google::protobuf::internal::WireFormat::SerializeUnknownFields(
         unknown_fields(), output);
@@ -6153,6 +6203,16 @@ void QueryProfile::SerializeWithCachedSizes(
         17, this->options_json(), target);
   }
 
+  // optional int64 planEnd = 18;
+  if (has_planend()) {
+    target = ::google::protobuf::internal::WireFormatLite::WriteInt64ToArray(18, this->planend(), target);
+  }
+
+  // optional int64 queueWaitEnd = 19;
+  if (has_queuewaitend()) {
+    target = ::google::protobuf::internal::WireFormatLite::WriteInt64ToArray(19, this->queuewaitend(), target);
+  }
+
   if (!unknown_fields().empty()) {
     target = ::google::protobuf::internal::WireFormat::SerializeUnknownFieldsToArray(
         unknown_fields(), target);
@@ -6278,6 +6338,20 @@ int QueryProfile::ByteSize() const {
           this->options_json());
     }
 
+    // optional int64 planEnd = 18;
+    if (has_planend()) {
+      total_size += 2 +
+        ::google::protobuf::internal::WireFormatLite::Int64Size(
+          this->planend());
+    }
+
+    // optional int64 queueWaitEnd = 19;
+    if (has_queuewaitend()) {
+      total_size += 2 +
+        ::google::protobuf::internal::WireFormatLite::Int64Size(
+          this->queuewaitend());
+    }
+
   }
   // repeated .exec.shared.MajorFragmentProfile fragment_profile = 11;
   total_size += 1 * this->fragment_profile_size();
@@ -6366,6 +6440,12 @@ void QueryProfile::MergeFrom(const QueryProfile& from) {
     if (from.has_options_json()) {
       set_options_json(from.options_json());
     }
+    if (from.has_planend()) {
+      set_planend(from.planend());
+    }
+    if (from.has_queuewaitend()) {
+      set_queuewaitend(from.queuewaitend());
+    }
   }
   mutable_unknown_fields()->MergeFrom(from.unknown_fields());
 }
@@ -6406,6 +6486,8 @@ void QueryProfile::Swap(QueryProfile* other) {
     std::swap(error_id_, other->error_id_);
     std::swap(error_node_, other->error_node_);
     std::swap(options_json_, other->options_json_);
+    std::swap(planend_, other->planend_);
+    std::swap(queuewaitend_, other->queuewaitend_);
     std::swap(_has_bits_[0], other->_has_bits_[0]);
     _unknown_fields_.Swap(&other->_unknown_fields_);
     std::swap(_cached_size_, other->_cached_size_);

http://git-wip-us.apache.org/repos/asf/drill/blob/16aa0810/contrib/native/client/src/protobuf/UserBitShared.pb.h
----------------------------------------------------------------------
diff --git a/contrib/native/client/src/protobuf/UserBitShared.pb.h b/contrib/native/client/src/protobuf/UserBitShared.pb.h
index a2d0607..ad32959 100644
--- a/contrib/native/client/src/protobuf/UserBitShared.pb.h
+++ b/contrib/native/client/src/protobuf/UserBitShared.pb.h
@@ -2096,6 +2096,20 @@ class QueryProfile : public ::google::protobuf::Message {
   inline ::std::string* release_options_json();
   inline void set_allocated_options_json(::std::string* options_json);
 
+  // optional int64 planEnd = 18;
+  inline bool has_planend() const;
+  inline void clear_planend();
+  static const int kPlanEndFieldNumber = 18;
+  inline ::google::protobuf::int64 planend() const;
+  inline void set_planend(::google::protobuf::int64 value);
+
+  // optional int64 queueWaitEnd = 19;
+  inline bool has_queuewaitend() const;
+  inline void clear_queuewaitend();
+  static const int kQueueWaitEndFieldNumber = 19;
+  inline ::google::protobuf::int64 queuewaitend() const;
+  inline void set_queuewaitend(::google::protobuf::int64 value);
+
   // @@protoc_insertion_point(class_scope:exec.shared.QueryProfile)
  private:
   inline void set_has_id();
@@ -2130,6 +2144,10 @@ class QueryProfile : public ::google::protobuf::Message {
   inline void clear_has_error_node();
   inline void set_has_options_json();
   inline void clear_has_options_json();
+  inline void set_has_planend();
+  inline void clear_has_planend();
+  inline void set_has_queuewaitend();
+  inline void clear_has_queuewaitend();
 
   ::google::protobuf::UnknownFieldSet _unknown_fields_;
 
@@ -2151,9 +2169,11 @@ class QueryProfile : public ::google::protobuf::Message {
   ::std::string* error_id_;
   ::std::string* error_node_;
   ::std::string* options_json_;
+  ::google::protobuf::int64 planend_;
+  ::google::protobuf::int64 queuewaitend_;
 
   mutable int _cached_size_;
-  ::google::protobuf::uint32 _has_bits_[(17 + 31) / 32];
+  ::google::protobuf::uint32 _has_bits_[(19 + 31) / 32];
 
   friend void  protobuf_AddDesc_UserBitShared_2eproto();
   friend void protobuf_AssignDesc_UserBitShared_2eproto();
@@ -5774,6 +5794,50 @@ inline void QueryProfile::set_allocated_options_json(::std::string* options_json
   }
 }
 
+// optional int64 planEnd = 18;
+inline bool QueryProfile::has_planend() const {
+  return (_has_bits_[0] & 0x00020000u) != 0;
+}
+inline void QueryProfile::set_has_planend() {
+  _has_bits_[0] |= 0x00020000u;
+}
+inline void QueryProfile::clear_has_planend() {
+  _has_bits_[0] &= ~0x00020000u;
+}
+inline void QueryProfile::clear_planend() {
+  planend_ = GOOGLE_LONGLONG(0);
+  clear_has_planend();
+}
+inline ::google::protobuf::int64 QueryProfile::planend() const {
+  return planend_;
+}
+inline void QueryProfile::set_planend(::google::protobuf::int64 value) {
+  set_has_planend();
+  planend_ = value;
+}
+
+// optional int64 queueWaitEnd = 19;
+inline bool QueryProfile::has_queuewaitend() const {
+  return (_has_bits_[0] & 0x00040000u) != 0;
+}
+inline void QueryProfile::set_has_queuewaitend() {
+  _has_bits_[0] |= 0x00040000u;
+}
+inline void QueryProfile::clear_has_queuewaitend() {
+  _has_bits_[0] &= ~0x00040000u;
+}
+inline void QueryProfile::clear_queuewaitend() {
+  queuewaitend_ = GOOGLE_LONGLONG(0);
+  clear_has_queuewaitend();
+}
+inline ::google::protobuf::int64 QueryProfile::queuewaitend() const {
+  return queuewaitend_;
+}
+inline void QueryProfile::set_queuewaitend(::google::protobuf::int64 value) {
+  set_has_queuewaitend();
+  queuewaitend_ = value;
+}
+
 // -------------------------------------------------------------------
 
 // MajorFragmentProfile

http://git-wip-us.apache.org/repos/asf/drill/blob/16aa0810/exec/java-exec/src/main/codegen/templates/DrillVersionInfo.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/codegen/templates/DrillVersionInfo.java b/exec/java-exec/src/main/codegen/templates/DrillVersionInfo.java
index 7b4b222..c7ff4c7 100644
--- a/exec/java-exec/src/main/codegen/templates/DrillVersionInfo.java
+++ b/exec/java-exec/src/main/codegen/templates/DrillVersionInfo.java
@@ -24,8 +24,7 @@
 
 package org.apache.drill.common.util;
 
-import java.text.MessageFormat;
-import java.text.ParseException;
+import org.apache.drill.common.Version;
 
 /*
  * This file is generated with Freemarker using the template src/main/codegen/templates/DrillVersionInfo.java
@@ -39,18 +38,24 @@ import java.text.ParseException;
  *
  */
 public class DrillVersionInfo {
-  private static final String VERSION = "${maven.project.version}";
-
-  private static final int MAJOR_VERSION = ${maven.project.artifact.selectedVersion.majorVersion};
-  private static final int MINOR_VERSION = ${maven.project.artifact.selectedVersion.minorVersion};
-  private static final int PATCH_VERSION = ${maven.project.artifact.selectedVersion.incrementalVersion};
+  /**
+   * The version extracted from Maven POM file at build time.
+   */
+  public static final Version VERSION = new Version(
+      "${maven.project.version}",
+      ${maven.project.artifact.selectedVersion.majorVersion},
+      ${maven.project.artifact.selectedVersion.minorVersion},
+      ${maven.project.artifact.selectedVersion.incrementalVersion},
+      ${maven.project.artifact.selectedVersion.buildNumber},
+      "${maven.project.artifact.selectedVersion.qualifier!}"
+  );
 
   /**
    * Get the Drill version from pom
    * @return the version number as x.y.z
    */
   public static String getVersion() {
-    return VERSION;
+    return VERSION.getVersion();
   }
 
   /**
@@ -58,7 +63,7 @@ public class DrillVersionInfo {
    *  @return x if assuming the version number is x.y.z
    */
   public static int getMajorVersion() {
-    return MAJOR_VERSION;
+    return VERSION.getMajorVersion();
   }
 
   /**
@@ -66,7 +71,7 @@ public class DrillVersionInfo {
    *  @return y if assuming the version number is x.y.z
    */
   public static int getMinorVersion() {
-    return MINOR_VERSION;
+    return VERSION.getMinorVersion();
   }
 
   /**
@@ -74,6 +79,23 @@ public class DrillVersionInfo {
    *  @return z if assuming the version number is x.y.z(-suffix)
    */
   public static int getPatchVersion() {
-    return PATCH_VERSION;
+    return VERSION.getPatchVersion();
+  }
+
+  /**
+   *  Get the Drill build number from pom
+   *  @return z if assuming the version number is x.y.z(.b)(-suffix)
+   */
+  public static int getBuildNumber() {
+    return VERSION.getPatchVersion();
+  }
+
+  /**
+   *  Get the Drill version qualifier from pom
+   *  @return suffix if assuming the version number is x.y.z(-suffix), or an empty string
+   */
+  public static String getQualifier() {
+    return VERSION.getQualifier();
   }
 }
+

http://git-wip-us.apache.org/repos/asf/drill/blob/16aa0810/exec/java-exec/src/main/java/org/apache/drill/exec/client/DrillClient.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/client/DrillClient.java b/exec/java-exec/src/main/java/org/apache/drill/exec/client/DrillClient.java
index 508c722..0b5bf30 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/client/DrillClient.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/client/DrillClient.java
@@ -23,15 +23,13 @@ import static com.google.common.base.Preconditions.checkState;
 import static org.apache.drill.exec.proto.UserProtos.QueryResultsMode.STREAM_FULL;
 import static org.apache.drill.exec.proto.UserProtos.RunQuery.newBuilder;
 
-import com.google.common.base.Strings;
-import io.netty.channel.EventLoopGroup;
-
 import java.io.Closeable;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.List;
 import java.util.Properties;
+import java.util.Set;
 import java.util.Vector;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.SynchronousQueue;
@@ -39,8 +37,9 @@ import java.util.concurrent.ThreadPoolExecutor;
 import java.util.concurrent.TimeUnit;
 
 import org.apache.drill.common.DrillAutoCloseables;
-import org.apache.drill.common.config.DrillProperties;
+import org.apache.drill.common.Version;
 import org.apache.drill.common.config.DrillConfig;
+import org.apache.drill.common.config.DrillProperties;
 import org.apache.drill.common.exceptions.UserException;
 import org.apache.drill.exec.ExecConstants;
 import org.apache.drill.exec.coord.ClusterCoordinator;
@@ -78,20 +77,23 @@ import org.apache.drill.exec.rpc.ChannelClosedException;
 import org.apache.drill.exec.rpc.ConnectionThrottle;
 import org.apache.drill.exec.rpc.DrillRpcFuture;
 import org.apache.drill.exec.rpc.NamedThreadFactory;
+import org.apache.drill.exec.rpc.NonTransientRpcException;
 import org.apache.drill.exec.rpc.RpcException;
 import org.apache.drill.exec.rpc.TransportCheck;
-import org.apache.drill.exec.rpc.NonTransientRpcException;
 import org.apache.drill.exec.rpc.user.QueryDataBatch;
 import org.apache.drill.exec.rpc.user.UserClient;
 import org.apache.drill.exec.rpc.user.UserResultsListener;
+import org.apache.drill.exec.rpc.user.UserRpcUtils;
 
 import com.fasterxml.jackson.core.JsonProcessingException;
 import com.fasterxml.jackson.databind.ObjectMapper;
 import com.fasterxml.jackson.databind.node.ArrayNode;
 import com.google.common.annotations.VisibleForTesting;
-
+import com.google.common.base.Strings;
 import com.google.common.util.concurrent.SettableFuture;
 
+import io.netty.channel.EventLoopGroup;
+
 /**
  * Thin wrapper around a UserClient that handles connect/close and transforms
  * String into ByteBuf.
@@ -475,12 +477,48 @@ public class DrillClient implements Closeable, ConnectionThrottle {
    *
    * @return the server informations, or null if not connected or if the server
    *         doesn't provide the information
+   * @deprecated use {@code DrillClient#getServerVersion()}
    */
+  @Deprecated
   public RpcEndpointInfos getServerInfos() {
     return client != null ? client.getServerInfos() : null;
   }
 
   /**
+   * Return the server name. Only available after connecting
+   *
+   * The result might be null if the server doesn't provide the name information.
+   *
+   * @return the server name, or null if not connected or if the server
+   *         doesn't provide the name
+   * @return
+   */
+  public String getServerName() {
+    return (client != null && client.getServerInfos() != null) ? client.getServerInfos().getName() : null;
+  }
+
+  /**
+   * Return the server version. Only available after connecting
+   *
+   * The result might be null if the server doesn't provide the version information.
+   *
+   * @return the server version, or null if not connected or if the server
+   *         doesn't provide the version
+   * @return
+   */
+  public Version getServerVersion() {
+    return (client != null && client.getServerInfos() != null) ? UserRpcUtils.getVersion(client.getServerInfos()) : null;
+  }
+  /**
+   * Returns the list of methods supported by the server based on its advertised information.
+   *
+   * @return a immutable set of capabilities
+   */
+  public Set<ServerMethod> getSupportedMethods() {
+    return client != null ? ServerMethod.getSupportedMethods(client.getSupportedMethods(), client.getServerInfos()) : null;
+  }
+
+  /**
    * Submits a string based query plan for execution and returns the result batches. Supported query types are:
    * <p><ul>
    *  <li>{@link QueryType#LOGICAL}

http://git-wip-us.apache.org/repos/asf/drill/blob/16aa0810/exec/java-exec/src/main/java/org/apache/drill/exec/client/ServerMethod.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/client/ServerMethod.java b/exec/java-exec/src/main/java/org/apache/drill/exec/client/ServerMethod.java
new file mode 100644
index 0000000..5c6640d
--- /dev/null
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/client/ServerMethod.java
@@ -0,0 +1,144 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.client;
+
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.drill.common.Version;
+import org.apache.drill.exec.proto.UserProtos.RpcEndpointInfos;
+import org.apache.drill.exec.proto.UserProtos.RpcType;
+import org.apache.drill.exec.rpc.user.UserRpcUtils;
+
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.Maps;
+import com.google.common.collect.Sets;
+
+/**
+ * A enumeration of server methods, and the version they were introduced
+ *
+ * it allows to introduce new methods without changing the protocol, with client
+ * being able to gracefully handle cases were method is not handled by the server.
+ */
+public enum ServerMethod {
+  /**
+   * Submitting a query
+   */
+  RUN_QUERY(RpcType.RUN_QUERY, Constants.DRILL_0_0_0),
+
+  /**
+   * Plan a query without executing it
+   */
+  PLAN_QUERY(RpcType.QUERY_PLAN_FRAGMENTS, Constants.DRILL_0_0_0),
+
+  /**
+   * Cancel an existing query
+   */
+  CANCEL_QUERY(RpcType.CANCEL_QUERY, Constants.DRILL_0_0_0),
+
+  /**
+   * Resume a query
+   */
+  RESUME_PAUSED_QUERY(RpcType.RESUME_PAUSED_QUERY, Constants.DRILL_0_0_0),
+
+  /**
+   * Prepare a query for deferred execution
+   */
+  PREPARED_STATEMENT(RpcType.CREATE_PREPARED_STATEMENT, Constants.DRILL_1_8_0),
+
+  /**
+   * Get catalog metadata
+   */
+  GET_CATALOGS(RpcType.GET_CATALOGS, Constants.DRILL_1_8_0),
+
+  /**
+   * Get schemas metadata
+   */
+  GET_SCHEMAS(RpcType.GET_SCHEMAS, Constants.DRILL_1_8_0),
+
+  /**
+   * Get tables metadata
+   */
+  GET_TABLES(RpcType.GET_TABLES, Constants.DRILL_1_8_0),
+
+  /**
+   * Get columns metadata
+   */
+  GET_COLUMNS(RpcType.GET_COLUMNS, Constants.DRILL_1_8_0);
+
+  private static class Constants {
+    private static final Version DRILL_0_0_0 = new Version("0.0.0", 0, 0, 0, 0, "");
+
+    private static final Version DRILL_1_8_0 = new Version("1.8.0", 1, 8, 0, 0, "");
+  }
+
+  private static final Map<RpcType, ServerMethod> REVERSE_MAPPING;
+  static {
+    ImmutableMap.Builder<RpcType, ServerMethod> builder = ImmutableMap.builder();
+    for(ServerMethod method: values()) {
+      builder.put(method.rpcType, method);
+    }
+    REVERSE_MAPPING = Maps.immutableEnumMap(builder.build());
+  }
+
+  private final RpcType rpcType;
+  private final Version minVersion;
+
+  private ServerMethod(RpcType rpcType, Version minVersion) {
+    this.rpcType = rpcType;
+    this.minVersion = minVersion;
+  }
+
+  public Version getMinVersion() {
+    return minVersion;
+  }
+
+  /**
+   * Returns the list of methods supported by the server based on its advertised information.
+   *
+   * @param serverInfos the server information
+   * @return a immutable set of capabilities
+   */
+  static final Set<ServerMethod> getSupportedMethods(Iterable<RpcType> supportedMethods, RpcEndpointInfos serverInfos) {
+    ImmutableSet.Builder<ServerMethod> builder = ImmutableSet.builder();
+
+    for(RpcType supportedMethod: supportedMethods) {
+      ServerMethod method = REVERSE_MAPPING.get(supportedMethod);
+      if (method == null) {
+        // The server might have newer methods we don't know how to handle yet.
+        continue;
+      }
+      builder.add(method);
+    }
+
+    // Fallback to version detection to cover the gap between Drill 1.8.0 and Drill 1.10.0
+    if (serverInfos == null) {
+      return Sets.immutableEnumSet(builder.build());
+    }
+
+    Version serverVersion = UserRpcUtils.getVersion(serverInfos);
+    for(ServerMethod capability: ServerMethod.values()) {
+      if (serverVersion.compareTo(capability.getMinVersion()) >= 0) {
+        builder.add(capability);
+      }
+    }
+
+    return Sets.immutableEnumSet(builder.build());
+  }
+}


[05/27] drill git commit: DRILL-4730: Update JDBC DatabaseMetaData implementation to use new Metadata APIs

Posted by jn...@apache.org.
DRILL-4730: Update JDBC DatabaseMetaData implementation to use new Metadata APIs

Update JDBC driver to use Metadata APIs instead of executing SQL queries

close #613


Project: http://git-wip-us.apache.org/repos/asf/drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/drill/commit/17f888d9
Tree: http://git-wip-us.apache.org/repos/asf/drill/tree/17f888d9
Diff: http://git-wip-us.apache.org/repos/asf/drill/diff/17f888d9

Branch: refs/heads/master
Commit: 17f888d9058be2be8953cb1ea5b37297b7d2fef3
Parents: 16aa081
Author: Laurent Goujon <la...@dremio.com>
Authored: Fri Nov 4 13:32:44 2016 -0700
Committer: Jinfeng Ni <jn...@apache.org>
Committed: Wed Mar 1 23:15:31 2017 -0800

----------------------------------------------------------------------
 .../org/apache/drill/exec/ops/QueryContext.java |  10 +-
 .../drill/exec/ops/ViewExpansionContext.java    |  22 +-
 .../apache/drill/exec/store/SchemaConfig.java   |  10 +-
 .../drill/exec/store/SchemaTreeProvider.java    |  14 +-
 .../drill/exec/store/ischema/Records.java       |  12 +
 .../exec/work/metadata/MetadataProvider.java    |  39 +-
 .../drill/jdbc/DrillConnectionConfig.java       |   6 +-
 .../drill/jdbc/impl/DrillConnectionImpl.java    |   8 +
 .../jdbc/impl/DrillDatabaseMetaDataImpl.java    |  31 +-
 .../apache/drill/jdbc/impl/DrillMetaImpl.java   | 663 ++++++++++++++++++-
 .../drill/jdbc/impl/DrillResultSetImpl.java     |  24 +-
 .../apache/drill/jdbc/impl/WrappedAccessor.java | 448 +++++++++++++
 .../jdbc/DatabaseMetaDataGetColumnsTest.java    |  30 +-
 .../apache/drill/jdbc/DatabaseMetaDataTest.java |  28 +-
 .../LegacyDatabaseMetaDataGetColumnsTest.java   |  73 ++
 .../drill/jdbc/LegacyDatabaseMetaDataTest.java  |  39 ++
 .../drill/jdbc/LegacyPreparedStatementTest.java |   4 +-
 .../drill/jdbc/test/TestJdbcMetadata.java       |   7 +
 .../drill/jdbc/test/TestLegacyJdbcMetadata.java |  36 +
 19 files changed, 1424 insertions(+), 80 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/drill/blob/17f888d9/exec/java-exec/src/main/java/org/apache/drill/exec/ops/QueryContext.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/ops/QueryContext.java b/exec/java-exec/src/main/java/org/apache/drill/exec/ops/QueryContext.java
index 4ee8a9d..264af29 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/ops/QueryContext.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/ops/QueryContext.java
@@ -17,10 +17,6 @@
  */
 package org.apache.drill.exec.ops;
 
-import com.google.common.base.Function;
-import com.google.common.collect.Maps;
-import io.netty.buffer.DrillBuf;
-
 import java.util.Collection;
 import java.util.List;
 import java.util.Map;
@@ -55,7 +51,11 @@ import org.apache.drill.exec.store.StoragePluginRegistry;
 import org.apache.drill.exec.testing.ExecutionControls;
 import org.apache.drill.exec.util.Utilities;
 
+import com.google.common.base.Function;
 import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+
+import io.netty.buffer.DrillBuf;
 
 // TODO - consider re-name to PlanningContext, as the query execution context actually appears
 // in fragment contexts
@@ -151,6 +151,7 @@ public class QueryContext implements AutoCloseable, OptimizerRulesContext, Schem
    * @param userName User who owns the schema tree.
    * @return Root of the schema tree.
    */
+  @Override
   public SchemaPlus getRootSchema(final String userName) {
     return schemaTreeProvider.createRootSchema(userName, this);
   }
@@ -168,6 +169,7 @@ public class QueryContext implements AutoCloseable, OptimizerRulesContext, Schem
    * Get the user name of the user who issued the query that is managed by this QueryContext.
    * @return
    */
+  @Override
   public String getQueryUserName() {
     return session.getCredentials().getUserName();
   }

http://git-wip-us.apache.org/repos/asf/drill/blob/17f888d9/exec/java-exec/src/main/java/org/apache/drill/exec/ops/ViewExpansionContext.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/ops/ViewExpansionContext.java b/exec/java-exec/src/main/java/org/apache/drill/exec/ops/ViewExpansionContext.java
index e5d565c..57c1a71 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/ops/ViewExpansionContext.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/ops/ViewExpansionContext.java
@@ -22,7 +22,10 @@ import static org.apache.drill.exec.ExecConstants.IMPERSONATION_MAX_CHAINED_USER
 import org.apache.calcite.plan.RelOptTable;
 import org.apache.calcite.plan.RelOptTable.ToRelContext;
 import org.apache.calcite.schema.SchemaPlus;
+import org.apache.drill.common.config.DrillConfig;
 import org.apache.drill.common.exceptions.UserException;
+import org.apache.drill.exec.ExecConstants;
+import org.apache.drill.exec.store.SchemaConfig.SchemaConfigInfoProvider;
 
 import com.carrotsearch.hppc.ObjectIntHashMap;
 import com.google.common.base.Preconditions;
@@ -70,20 +73,25 @@ import com.google.common.base.Preconditions;
 public class ViewExpansionContext {
   private static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(ViewExpansionContext.class);
 
-  private final QueryContext queryContext;
+  private final SchemaConfigInfoProvider schemaConfigInfoProvider;
   private final int maxChainedUserHops;
   private final String queryUser;
   private final ObjectIntHashMap<String> userTokens = new ObjectIntHashMap<>();
+  private final boolean impersonationEnabled;
 
   public ViewExpansionContext(QueryContext queryContext) {
-    this.queryContext = queryContext;
-    this.maxChainedUserHops =
-        queryContext.getConfig().getInt(IMPERSONATION_MAX_CHAINED_USER_HOPS);
-    this.queryUser = queryContext.getQueryUserName();
+    this(queryContext.getConfig(), queryContext);
+  }
+
+  public ViewExpansionContext(DrillConfig config, SchemaConfigInfoProvider schemaConfigInfoProvider) {
+    this.schemaConfigInfoProvider = schemaConfigInfoProvider;
+    this.maxChainedUserHops = config.getInt(IMPERSONATION_MAX_CHAINED_USER_HOPS);
+    this.queryUser = schemaConfigInfoProvider.getQueryUserName();
+    this.impersonationEnabled = config.getBoolean(ExecConstants.IMPERSONATION_ENABLED);
   }
 
   public boolean isImpersonationEnabled() {
-    return queryContext.isImpersonationEnabled();
+    return impersonationEnabled;
   }
 
   /**
@@ -160,7 +168,7 @@ public class ViewExpansionContext {
      */
     public SchemaPlus getSchemaTree() {
       Preconditions.checkState(!released, "Trying to use released token.");
-      return queryContext.getRootSchema(viewOwner);
+      return schemaConfigInfoProvider.getRootSchema(viewOwner);
     }
 
     /**

http://git-wip-us.apache.org/repos/asf/drill/blob/17f888d9/exec/java-exec/src/main/java/org/apache/drill/exec/store/SchemaConfig.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/SchemaConfig.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/SchemaConfig.java
index 3e8f1c2..fa720f3 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/SchemaConfig.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/SchemaConfig.java
@@ -17,13 +17,13 @@
  */
 package org.apache.drill.exec.store;
 
-import com.google.common.base.Preconditions;
-import com.google.common.base.Strings;
-
 import org.apache.calcite.schema.SchemaPlus;
 import org.apache.drill.exec.ops.ViewExpansionContext;
 import org.apache.drill.exec.server.options.OptionValue;
 
+import com.google.common.base.Preconditions;
+import com.google.common.base.Strings;
+
 /**
  * Contains information needed by {@link org.apache.drill.exec.store.AbstractSchema} implementations.
  */
@@ -100,6 +100,10 @@ public class SchemaConfig {
   public interface SchemaConfigInfoProvider {
     ViewExpansionContext getViewExpansionContext();
 
+    SchemaPlus getRootSchema(String userName);
+
+    String getQueryUserName();
+
     OptionValue getOption(String optionKey);
   }
 }

http://git-wip-us.apache.org/repos/asf/drill/blob/17f888d9/exec/java-exec/src/main/java/org/apache/drill/exec/store/SchemaTreeProvider.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/SchemaTreeProvider.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/SchemaTreeProvider.java
index 4f426bb..5a8bfb2 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/SchemaTreeProvider.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/SchemaTreeProvider.java
@@ -17,6 +17,9 @@
  */
 package org.apache.drill.exec.store;
 
+import java.io.IOException;
+import java.util.List;
+
 import org.apache.calcite.jdbc.SimpleCalciteSchema;
 import org.apache.calcite.schema.SchemaPlus;
 import org.apache.drill.common.AutoCloseables;
@@ -31,9 +34,6 @@ import org.apache.drill.exec.util.ImpersonationUtil;
 
 import com.google.common.collect.Lists;
 
-import java.io.IOException;
-import java.util.List;
-
 /**
  * Class which creates new schema trees. It keeps track of newly created schema trees and closes them safely as
  * part of {@link #close()}.
@@ -69,6 +69,14 @@ public class SchemaTreeProvider implements AutoCloseable {
       public OptionValue getOption(String optionKey) {
         return options.getOption(optionKey);
       }
+
+      @Override public SchemaPlus getRootSchema(String userName) {
+        return createRootSchema(userName, this);
+      }
+
+      @Override public String getQueryUserName() {
+        return ImpersonationUtil.getProcessUserName();
+      }
     };
 
     final SchemaConfig schemaConfig = SchemaConfig.newBuilder(

http://git-wip-us.apache.org/repos/asf/drill/blob/17f888d9/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/Records.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/Records.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/Records.java
index 2ff9bc6..49d1423 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/Records.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/Records.java
@@ -182,6 +182,18 @@ public class Records {
           this.INTERVAL_PRECISION = null;
           break;
 
+        case BOOLEAN:
+          this.COLUMN_SIZE = 1;
+          this.CHARACTER_MAXIMUM_LENGTH = null;
+          this.CHARACTER_OCTET_LENGTH = null;
+          this.NUMERIC_PRECISION = null;
+          this.NUMERIC_PRECISION_RADIX = null;
+          this.NUMERIC_SCALE = null;
+          this.DATETIME_PRECISION = null;
+          this.INTERVAL_TYPE = null;
+          this.INTERVAL_PRECISION = null;
+          break;
+
         case TINYINT:
         case SMALLINT:
         case INTEGER:

http://git-wip-us.apache.org/repos/asf/drill/blob/17f888d9/exec/java-exec/src/main/java/org/apache/drill/exec/work/metadata/MetadataProvider.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/work/metadata/MetadataProvider.java b/exec/java-exec/src/main/java/org/apache/drill/exec/work/metadata/MetadataProvider.java
index 8365418..6ababf4 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/work/metadata/MetadataProvider.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/work/metadata/MetadataProvider.java
@@ -34,6 +34,7 @@ import java.util.List;
 import java.util.UUID;
 
 import org.apache.calcite.schema.SchemaPlus;
+import org.apache.drill.common.config.DrillConfig;
 import org.apache.drill.common.exceptions.ErrorHelper;
 import org.apache.drill.exec.ops.ViewExpansionContext;
 import org.apache.drill.exec.proto.UserBitShared.DrillPBError;
@@ -148,6 +149,10 @@ public class MetadataProvider {
      * @return A {@link Response} message. Response must be returned in any case.
      */
     protected abstract Response runInternal(UserSession session, SchemaTreeProvider schemaProvider);
+
+    public DrillConfig getConfig() {
+      return dContext.getConfig();
+    }
   }
 
   /**
@@ -177,7 +182,7 @@ public class MetadataProvider {
 
       try {
         final PojoRecordReader<Catalog> records =
-            getPojoRecordReader(CATALOGS, filter, schemaProvider, session);
+            getPojoRecordReader(CATALOGS, filter, getConfig(), schemaProvider, session);
 
         List<CatalogMetadata> metadata = new ArrayList<>();
         for(Catalog c : records) {
@@ -233,7 +238,7 @@ public class MetadataProvider {
 
       try {
         final PojoRecordReader<Schema> records =
-            getPojoRecordReader(SCHEMATA, filter, schemaProvider, session);
+            getPojoRecordReader(SCHEMATA, filter, getConfig(), schemaProvider, session);
 
         List<SchemaMetadata> metadata = new ArrayList<>();
         for(Schema s : records) {
@@ -293,7 +298,7 @@ public class MetadataProvider {
 
       try {
         final PojoRecordReader<Table> records =
-            getPojoRecordReader(TABLES, filter, schemaProvider, session);
+            getPojoRecordReader(TABLES, filter, getConfig(), schemaProvider, session);
 
         List<TableMetadata> metadata = new ArrayList<>();
         for(Table t : records) {
@@ -354,7 +359,7 @@ public class MetadataProvider {
 
       try {
         final PojoRecordReader<Column> records =
-            getPojoRecordReader(COLUMNS, filter, schemaProvider, session);
+            getPojoRecordReader(COLUMNS, filter, getConfig(), schemaProvider, session);
 
         List<ColumnMetadata> metadata = new ArrayList<>();
         for(Column c : records) {
@@ -382,6 +387,10 @@ public class MetadataProvider {
             columnBuilder.setCharOctetLength(c.CHARACTER_OCTET_LENGTH);
           }
 
+          if (c.NUMERIC_SCALE != null) {
+            columnBuilder.setNumericScale(c.NUMERIC_SCALE);
+          }
+
           if (c.NUMERIC_PRECISION != null) {
             columnBuilder.setNumericPrecision(c.NUMERIC_PRECISION);
           }
@@ -531,30 +540,42 @@ public class MetadataProvider {
    * @param userSession
    * @return
    */
-  private static <S> PojoRecordReader<S> getPojoRecordReader(final InfoSchemaTableType tableType, final InfoSchemaFilter filter,
+  private static <S> PojoRecordReader<S> getPojoRecordReader(final InfoSchemaTableType tableType, final InfoSchemaFilter filter, final DrillConfig config,
       final SchemaTreeProvider provider, final UserSession userSession) {
     final SchemaPlus rootSchema =
-        provider.createRootSchema(userSession.getCredentials().getUserName(), newSchemaConfigInfoProvider(userSession));
+        provider.createRootSchema(userSession.getCredentials().getUserName(), newSchemaConfigInfoProvider(config, userSession, provider));
     return tableType.getRecordReader(rootSchema, filter, userSession.getOptions());
   }
 
   /**
    * Helper method to create a {@link SchemaConfigInfoProvider} instance for metadata purposes.
    * @param session
+   * @param schemaTreeProvider
    * @return
    */
-  private static SchemaConfigInfoProvider newSchemaConfigInfoProvider(final UserSession session) {
+  private static SchemaConfigInfoProvider newSchemaConfigInfoProvider(final DrillConfig config, final UserSession session, final SchemaTreeProvider schemaTreeProvider) {
     return new SchemaConfigInfoProvider() {
+      private final ViewExpansionContext viewExpansionContext = new ViewExpansionContext(config, this);
+
       @Override
       public ViewExpansionContext getViewExpansionContext() {
-        // Metadata APIs don't expect to expand the views.
-        throw new UnsupportedOperationException("View expansion context is not supported");
+        return viewExpansionContext;
+      }
+
+      @Override
+      public SchemaPlus getRootSchema(String userName) {
+        return schemaTreeProvider.createRootSchema(userName, this);
       }
 
       @Override
       public OptionValue getOption(String optionKey) {
         return session.getOptions().getOption(optionKey);
       }
+
+      @Override
+      public String getQueryUserName() {
+        return session.getCredentials().getUserName();
+      }
     };
   }
 

http://git-wip-us.apache.org/repos/asf/drill/blob/17f888d9/exec/jdbc/src/main/java/org/apache/drill/jdbc/DrillConnectionConfig.java
----------------------------------------------------------------------
diff --git a/exec/jdbc/src/main/java/org/apache/drill/jdbc/DrillConnectionConfig.java b/exec/jdbc/src/main/java/org/apache/drill/jdbc/DrillConnectionConfig.java
index 55cb1ff..15f676c 100644
--- a/exec/jdbc/src/main/java/org/apache/drill/jdbc/DrillConnectionConfig.java
+++ b/exec/jdbc/src/main/java/org/apache/drill/jdbc/DrillConnectionConfig.java
@@ -67,7 +67,11 @@ public class DrillConnectionConfig extends ConnectionConfigImpl {
   }
 
   public boolean disableServerPreparedStatement() {
-    return Boolean.valueOf(props.getProperty("preparedstatement.server.disabled"));
+    return Boolean.valueOf(props.getProperty("server.preparedstatement.disabled"));
+  }
+
+  public boolean disableServerMetadata() {
+    return Boolean.valueOf(props.getProperty("server.metadata.disabled"));
   }
 
 }

http://git-wip-us.apache.org/repos/asf/drill/blob/17f888d9/exec/jdbc/src/main/java/org/apache/drill/jdbc/impl/DrillConnectionImpl.java
----------------------------------------------------------------------
diff --git a/exec/jdbc/src/main/java/org/apache/drill/jdbc/impl/DrillConnectionImpl.java b/exec/jdbc/src/main/java/org/apache/drill/jdbc/impl/DrillConnectionImpl.java
index 830f137..94d5dd8 100644
--- a/exec/jdbc/src/main/java/org/apache/drill/jdbc/impl/DrillConnectionImpl.java
+++ b/exec/jdbc/src/main/java/org/apache/drill/jdbc/impl/DrillConnectionImpl.java
@@ -25,6 +25,7 @@ import java.sql.Connection;
 import java.sql.DatabaseMetaData;
 import java.sql.NClob;
 import java.sql.PreparedStatement;
+import java.sql.ResultSet;
 import java.sql.SQLClientInfoException;
 import java.sql.SQLException;
 import java.sql.SQLFeatureNotSupportedException;
@@ -43,6 +44,7 @@ import org.apache.calcite.avatica.AvaticaConnection;
 import org.apache.calcite.avatica.AvaticaFactory;
 import org.apache.calcite.avatica.AvaticaStatement;
 import org.apache.calcite.avatica.Meta.ExecuteResult;
+import org.apache.calcite.avatica.Meta.MetaResultSet;
 import org.apache.calcite.avatica.UnregisteredDriver;
 import org.apache.drill.common.config.DrillConfig;
 import org.apache.drill.common.exceptions.DrillRuntimeException;
@@ -165,6 +167,12 @@ class DrillConnectionImpl extends AvaticaConnection
     }
   }
 
+
+  @Override
+  protected ResultSet createResultSet(MetaResultSet metaResultSet) throws SQLException {
+    return super.createResultSet(metaResultSet);
+  }
+
   @Override
   protected ExecuteResult prepareAndExecuteInternal(AvaticaStatement statement, String sql, long maxRowCount)
       throws SQLException {

http://git-wip-us.apache.org/repos/asf/drill/blob/17f888d9/exec/jdbc/src/main/java/org/apache/drill/jdbc/impl/DrillDatabaseMetaDataImpl.java
----------------------------------------------------------------------
diff --git a/exec/jdbc/src/main/java/org/apache/drill/jdbc/impl/DrillDatabaseMetaDataImpl.java b/exec/jdbc/src/main/java/org/apache/drill/jdbc/impl/DrillDatabaseMetaDataImpl.java
index 1c350f3..3d19f82 100644
--- a/exec/jdbc/src/main/java/org/apache/drill/jdbc/impl/DrillDatabaseMetaDataImpl.java
+++ b/exec/jdbc/src/main/java/org/apache/drill/jdbc/impl/DrillDatabaseMetaDataImpl.java
@@ -27,9 +27,12 @@ import java.sql.SQLFeatureNotSupportedException;
 import org.apache.calcite.avatica.AvaticaConnection;
 import org.apache.calcite.avatica.AvaticaDatabaseMetaData;
 import org.apache.drill.common.Version;
+import org.apache.drill.common.exceptions.DrillRuntimeException;
 import org.apache.drill.jdbc.AlreadyClosedSqlException;
 import org.apache.drill.jdbc.DrillDatabaseMetaData;
 
+import com.google.common.base.Throwables;
+
 
 /**
  * Drill's implementation of {@link DatabaseMetaData}.
@@ -818,20 +821,35 @@ class DrillDatabaseMetaDataImpl extends AvaticaDatabaseMetaData
                              String tableNamePattern,
                              String[] types) throws SQLException {
     throwIfClosed();
-    return super.getTables(catalog, schemaPattern,tableNamePattern, types);
+    try {
+      return super.getTables(catalog, schemaPattern,tableNamePattern, types);
+    } catch(DrillRuntimeException e) {
+      Throwables.propagateIfInstanceOf(e.getCause(), SQLException.class);
+      throw e;
+    }
   }
 
 
   @Override
   public ResultSet getSchemas() throws SQLException {
     throwIfClosed();
-    return super.getSchemas();
+    try {
+      return super.getSchemas();
+    } catch(DrillRuntimeException e) {
+      Throwables.propagateIfInstanceOf(e.getCause(), SQLException.class);
+      throw e;
+    }
   }
 
   @Override
   public ResultSet getCatalogs() throws SQLException {
     throwIfClosed();
-    return super.getCatalogs();
+    try {
+      return super.getCatalogs();
+    } catch(DrillRuntimeException e) {
+      Throwables.propagateIfInstanceOf(e.getCause(), SQLException.class);
+      throw e;
+    }
   }
 
   @Override
@@ -844,7 +862,12 @@ class DrillDatabaseMetaDataImpl extends AvaticaDatabaseMetaData
   public ResultSet getColumns(String catalog, String schema, String table,
                               String columnNamePattern) throws SQLException {
     throwIfClosed();
-    return super.getColumns(catalog, schema, table, columnNamePattern);
+    try {
+      return super.getColumns(catalog, schema, table, columnNamePattern);
+    } catch(DrillRuntimeException e) {
+      Throwables.propagateIfInstanceOf(e.getCause(), SQLException.class);
+      throw e;
+    }
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/drill/blob/17f888d9/exec/jdbc/src/main/java/org/apache/drill/jdbc/impl/DrillMetaImpl.java
----------------------------------------------------------------------
diff --git a/exec/jdbc/src/main/java/org/apache/drill/jdbc/impl/DrillMetaImpl.java b/exec/jdbc/src/main/java/org/apache/drill/jdbc/impl/DrillMetaImpl.java
index 096b4f0..10d4225 100644
--- a/exec/jdbc/src/main/java/org/apache/drill/jdbc/impl/DrillMetaImpl.java
+++ b/exec/jdbc/src/main/java/org/apache/drill/jdbc/impl/DrillMetaImpl.java
@@ -17,17 +17,48 @@
  */
 package org.apache.drill.jdbc.impl;
 
+import java.lang.reflect.Field;
+import java.lang.reflect.Modifier;
 import java.sql.DatabaseMetaData;
 import java.sql.SQLException;
+import java.sql.Time;
+import java.sql.Timestamp;
 import java.sql.Types;
+import java.util.ArrayList;
 import java.util.Collections;
 import java.util.List;
+import java.util.Map;
+
+import javax.validation.constraints.NotNull;
 
 import org.apache.calcite.avatica.AvaticaParameter;
 import org.apache.calcite.avatica.AvaticaStatement;
+import org.apache.calcite.avatica.AvaticaUtils;
+import org.apache.calcite.avatica.ColumnMetaData;
+import org.apache.calcite.avatica.ColumnMetaData.StructType;
+import org.apache.calcite.avatica.Meta;
 import org.apache.calcite.avatica.MetaImpl;
 import org.apache.drill.common.exceptions.DrillRuntimeException;
 import org.apache.drill.common.util.DrillStringUtils;
+import org.apache.drill.exec.client.ServerMethod;
+import org.apache.drill.exec.proto.UserBitShared.DrillPBError;
+import org.apache.drill.exec.proto.UserProtos.CatalogMetadata;
+import org.apache.drill.exec.proto.UserProtos.ColumnMetadata;
+import org.apache.drill.exec.proto.UserProtos.GetCatalogsResp;
+import org.apache.drill.exec.proto.UserProtos.GetColumnsResp;
+import org.apache.drill.exec.proto.UserProtos.GetSchemasResp;
+import org.apache.drill.exec.proto.UserProtos.GetTablesResp;
+import org.apache.drill.exec.proto.UserProtos.LikeFilter;
+import org.apache.drill.exec.proto.UserProtos.RequestStatus;
+import org.apache.drill.exec.proto.UserProtos.SchemaMetadata;
+import org.apache.drill.exec.proto.UserProtos.TableMetadata;
+import org.apache.drill.exec.rpc.DrillRpcFuture;
+import org.apache.drill.exec.rpc.RpcException;
+
+import com.google.common.base.Function;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.Lists;
+
 
 
 class DrillMetaImpl extends MetaImpl {
@@ -61,7 +92,8 @@ class DrillMetaImpl extends MetaImpl {
         sql,
         Collections.<AvaticaParameter> emptyList(),
         Collections.<String, Object>emptyMap(),
-        CursorFactory.OBJECT);
+        null // CursorFactory set to null, as SQL requests use DrillCursor
+        );
   }
 
   private MetaResultSet s(String s) {
@@ -78,19 +110,237 @@ class DrillMetaImpl extends MetaImpl {
     }
   }
 
+  /** Information about type mapping. */
+  private static class TypeInfo {
+    private static final Map<Class<?>, TypeInfo> MAPPING = ImmutableMap.<Class<?>, TypeInfo> builder()
+        .put(boolean.class, of(Types.BOOLEAN, "BOOLEAN"))
+        .put(Boolean.class, of(Types.BOOLEAN, "BOOLEAN"))
+        .put(Byte.TYPE, of(Types.TINYINT, "TINYINT"))
+        .put(Byte.class, of(Types.TINYINT, "TINYINT"))
+        .put(Short.TYPE, of(Types.SMALLINT, "SMALLINT"))
+        .put(Short.class, of(Types.SMALLINT, "SMALLINT"))
+        .put(Integer.TYPE, of(Types.INTEGER, "INTEGER"))
+        .put(Integer.class, of(Types.INTEGER, "INTEGER"))
+        .put(Long.TYPE,  of(Types.BIGINT, "BIGINT"))
+        .put(Long.class, of(Types.BIGINT, "BIGINT"))
+        .put(Float.TYPE, of(Types.FLOAT, "FLOAT"))
+        .put(Float.class,  of(Types.FLOAT, "FLOAT"))
+        .put(Double.TYPE,  of(Types.DOUBLE, "DOUBLE"))
+        .put(Double.class, of(Types.DOUBLE, "DOUBLE"))
+        .put(String.class, of(Types.VARCHAR, "CHARACTER VARYING"))
+        .put(java.sql.Date.class, of(Types.DATE, "DATE"))
+        .put(Time.class, of(Types.TIME, "TIME"))
+        .put(Timestamp.class, of(Types.TIMESTAMP, "TIMESTAMP"))
+        .build();
+
+    private final int sqlType;
+    private final String sqlTypeName;
+
+    public TypeInfo(int sqlType, String sqlTypeName) {
+      this.sqlType = sqlType;
+      this.sqlTypeName = sqlTypeName;
+    }
 
+    private static TypeInfo of(int sqlType, String sqlTypeName) {
+      return new TypeInfo(sqlType, sqlTypeName);
+    }
 
-  @Override
-  protected <E> MetaResultSet createEmptyResultSet(Class<E> clazz) {
-    return s(
-        "SELECT '' AS `Interim zero-row result set` "  // dummy row type
-        + "FROM INFORMATION_SCHEMA.CATALOGS "          // any table
-        + "LIMIT 0"                                    // zero rows
-        );
+    public static TypeInfo get(Class<?> clazz) {
+      return MAPPING.get(clazz);
+    }
   }
 
-  @Override
-  public MetaResultSet getTables(String catalog, final Pat schemaPattern, final Pat tableNamePattern,
+  /** Metadata describing a column.
+   * Copied from Avatica with several fixes
+   * */
+  public static class MetaColumn implements Named {
+    public final String tableCat;
+    public final String tableSchem;
+    public final String tableName;
+    public final String columnName;
+    public final int dataType;
+    public final String typeName;
+    public final Integer columnSize;
+    public final Integer bufferLength = null;
+    public final Integer decimalDigits;
+    public final Integer numPrecRadix;
+    public final int nullable;
+    public final String remarks = null;
+    public final String columnDef = null;
+    public final Integer sqlDataType = null;
+    public final Integer sqlDatetimeSub = null;
+    public final Integer charOctetLength;
+    public final int ordinalPosition;
+    @NotNull
+    public final String isNullable;
+    public final String scopeCatalog = null;
+    public final String scopeSchema = null;
+    public final String scopeTable = null;
+    public final Short sourceDataType = null;
+    @NotNull
+    public final String isAutoincrement = "";
+    @NotNull
+    public final String isGeneratedcolumn = "";
+
+    public MetaColumn(
+        String tableCat,
+        String tableSchem,
+        String tableName,
+        String columnName,
+        int dataType,
+        String typeName,
+        Integer columnSize,
+        Integer decimalDigits,
+        Integer numPrecRadix,
+        int nullable,
+        Integer charOctetLength,
+        int ordinalPosition,
+        String isNullable) {
+      this.tableCat = tableCat;
+      this.tableSchem = tableSchem;
+      this.tableName = tableName;
+      this.columnName = columnName;
+      this.dataType = dataType;
+      this.typeName = typeName;
+      this.columnSize = columnSize;
+      this.decimalDigits = decimalDigits;
+      this.numPrecRadix = numPrecRadix;
+      this.nullable = nullable;
+      this.charOctetLength = charOctetLength;
+      this.ordinalPosition = ordinalPosition;
+      this.isNullable = isNullable;
+    }
+
+    @Override
+    public String getName() {
+      return columnName;
+    }
+  }
+
+  private static LikeFilter newLikeFilter(final Pat pattern) {
+    if (pattern == null || pattern.s == null) {
+      return null;
+    }
+
+    return LikeFilter.newBuilder().setPattern(pattern.s).setEscape("\\").build();
+  }
+
+  /**
+   * Quote the provided string as a LIKE pattern
+   *
+   * @param v the value to quote
+   * @return a LIKE pattern matching exactly v, or {@code null} if v is {@code null}
+   */
+  private static Pat quote(String v) {
+    if (v == null) {
+      return null;
+    }
+
+    StringBuilder sb = new StringBuilder(v.length());
+    for(int index = 0; index<v.length(); index++) {
+      char c = v.charAt(index);
+      switch(c) {
+      case '%':
+      case '_':
+      case '\\':
+        sb.append('\\').append(c);
+        break;
+
+      default:
+        sb.append(c);
+      }
+    }
+
+    return Pat.of(sb.toString());
+  }
+
+  // Overriding fieldMetaData as Calcite version create ColumnMetaData with invalid offset
+  protected static ColumnMetaData.StructType drillFieldMetaData(Class<?> clazz) {
+    final List<ColumnMetaData> list = new ArrayList<>();
+    for (Field field : clazz.getFields()) {
+      if (Modifier.isPublic(field.getModifiers())
+          && !Modifier.isStatic(field.getModifiers())) {
+        NotNull notNull = field.getAnnotation(NotNull.class);
+        boolean notNullable = (notNull != null || field.getType().isPrimitive());
+        list.add(
+            drillColumnMetaData(
+                AvaticaUtils.camelToUpper(field.getName()),
+                list.size(), field.getType(), notNullable));
+      }
+    }
+    return ColumnMetaData.struct(list);
+  }
+
+
+  protected static ColumnMetaData drillColumnMetaData(String name, int index,
+      Class<?> type, boolean notNullable) {
+    TypeInfo pair = TypeInfo.get(type);
+    ColumnMetaData.Rep rep =
+        ColumnMetaData.Rep.VALUE_MAP.get(type);
+    ColumnMetaData.AvaticaType scalarType =
+        ColumnMetaData.scalar(pair.sqlType, pair.sqlTypeName, rep);
+    return new ColumnMetaData(
+        index, false, true, false, false,
+        notNullable
+            ? DatabaseMetaData.columnNoNulls
+            : DatabaseMetaData.columnNullable,
+        true, -1, name, name, null,
+        0, 0, null, null, scalarType, true, false, false,
+        scalarType.columnClassName());
+  }
+
+  abstract private class MetadataAdapter<CalciteMetaType, Response, ResponseValue> {
+    private final Class<? extends CalciteMetaType> clazz;
+
+    public MetadataAdapter(Class<? extends CalciteMetaType> clazz) {
+      this.clazz = clazz;
+    }
+
+    MetaResultSet getMeta(DrillRpcFuture<Response> future) {
+      Response response;
+      try {
+        response = future.checkedGet();
+      } catch (RpcException e) {
+        throw new DrillRuntimeException(new SQLException("Failure getting metadata", e));
+      }
+
+      // Manage errors
+      if (getStatus(response) != RequestStatus.OK) {
+        DrillPBError error = getError(response);
+        throw new DrillRuntimeException(new SQLException("Failure getting metadata: " + error.getMessage()));
+      }
+
+      try {
+        List<Object> tables = Lists.transform(getResult(response), new Function<ResponseValue, Object>() {
+          @Override
+          public Object apply(ResponseValue input) {
+            return adapt(input);
+          }
+        });
+
+        Meta.Frame frame = Meta.Frame.create(0, true, tables);
+        StructType fieldMetaData = drillFieldMetaData(clazz);
+        Meta.Signature signature = Meta.Signature.create(
+            fieldMetaData.columns, "",
+            Collections.<AvaticaParameter>emptyList(), CursorFactory.record(clazz));
+
+        AvaticaStatement statement = connection.createStatement();
+        return MetaResultSet.create(connection.id, statement.getId(), true,
+            signature, frame);
+      } catch (SQLException e) {
+        // Wrap in RuntimeException because Avatica's abstract method declarations
+        // didn't allow for SQLException!
+        throw new DrillRuntimeException(new SQLException("Failure while attempting to get DatabaseMetadata.", e));
+      }
+    }
+
+    abstract protected RequestStatus getStatus(Response response);
+    abstract protected DrillPBError getError(Response response);
+    abstract protected List<ResponseValue> getResult(Response response);
+    abstract protected CalciteMetaType adapt(ResponseValue protoValue);
+  }
+
+  private MetaResultSet clientGetTables(String catalog, final Pat schemaPattern, final Pat tableNamePattern,
       final List<String> typeList) {
     StringBuilder sb = new StringBuilder();
     sb.append("select "
@@ -134,11 +384,51 @@ class DrillMetaImpl extends MetaImpl {
     return s(sb.toString());
   }
 
+  private MetaResultSet serverGetTables(String catalog, final Pat schemaPattern, final Pat tableNamePattern,
+      final List<String> typeList) {
+    // Catalog is not a pattern
+    final LikeFilter catalogNameFilter = newLikeFilter(quote(catalog));
+    final LikeFilter schemaNameFilter = newLikeFilter(schemaPattern);
+    final LikeFilter tableNameFilter = newLikeFilter(tableNamePattern);
+
+    return new MetadataAdapter<MetaImpl.MetaTable, GetTablesResp, TableMetadata>(MetaTable.class) {
+
+      @Override
+      protected RequestStatus getStatus(GetTablesResp response) {
+        return response.getStatus();
+      };
+
+      @Override
+      protected DrillPBError getError(GetTablesResp response) {
+        return response.getError();
+      };
+
+      @Override
+      protected List<TableMetadata> getResult(GetTablesResp response) {
+        return response.getTablesList();
+      }
+
+      @Override
+      protected MetaImpl.MetaTable adapt(TableMetadata protoValue) {
+        return new MetaImpl.MetaTable(protoValue.getCatalogName(), protoValue.getSchemaName(), protoValue.getTableName(), protoValue.getType());
+      };
+    }.getMeta(connection.getClient().getTables(catalogNameFilter, schemaNameFilter, tableNameFilter, typeList));
+  }
+
   /**
-   * Implements {@link DatabaseMetaData#getColumns}.
+   * Implements {@link DatabaseMetaData#getTables}.
    */
   @Override
-  public MetaResultSet getColumns(String catalog, Pat schemaPattern,
+  public MetaResultSet getTables(String catalog, final Pat schemaPattern, final Pat tableNamePattern,
+      final List<String> typeList) {
+    if (connection.getConfig().disableServerMetadata() || ! connection.getClient().getSupportedMethods().contains(ServerMethod.GET_TABLES)) {
+      return clientGetTables(catalog, schemaPattern, tableNamePattern, typeList);
+    }
+
+    return serverGetTables(catalog, schemaPattern, tableNamePattern, typeList);
+  }
+
+  private MetaResultSet clientGetColumns(String catalog, Pat schemaPattern,
                               Pat tableNamePattern, Pat columnNamePattern) {
     StringBuilder sb = new StringBuilder();
     // TODO:  Resolve the various questions noted below.
@@ -257,6 +547,9 @@ class DrillMetaImpl extends MetaImpl {
          * characters needed to display a value).
          */
         + "\n  CASE DATA_TYPE "
+        // 0. "For boolean and bit ... 1":
+        + "\n    WHEN 'BOOLEAN', 'BIT'"
+        + "\n                         THEN 1 "
 
         // 1. "For numeric data, ... the maximum precision":
         + "\n    WHEN 'TINYINT', 'SMALLINT', 'INTEGER', 'BIGINT', "
@@ -410,8 +703,304 @@ class DrillMetaImpl extends MetaImpl {
     return s(sb.toString());
   }
 
+  private MetaResultSet serverGetColumns(String catalog, Pat schemaPattern,
+                              Pat tableNamePattern, Pat columnNamePattern) {
+    final LikeFilter catalogNameFilter = newLikeFilter(quote(catalog));
+    final LikeFilter schemaNameFilter = newLikeFilter(schemaPattern);
+    final LikeFilter tableNameFilter = newLikeFilter(tableNamePattern);
+    final LikeFilter columnNameFilter = newLikeFilter(columnNamePattern);
+
+    return new MetadataAdapter<MetaColumn, GetColumnsResp, ColumnMetadata>(MetaColumn.class) {
+      @Override
+      protected RequestStatus getStatus(GetColumnsResp response) {
+        return response.getStatus();
+      }
+
+      @Override
+      protected DrillPBError getError(GetColumnsResp response) {
+        return response.getError();
+      }
+
+      @Override
+      protected List<ColumnMetadata> getResult(GetColumnsResp response) {
+        return response.getColumnsList();
+      };
+
+      private int getDataType(ColumnMetadata value) {
+        switch (value.getDataType()) {
+        case "ARRAY":
+          return Types.ARRAY;
+
+        case "BIGINT":
+          return Types.BIGINT;
+        case "BINARY":
+          return Types.BINARY;
+        case "BINARY LARGE OBJECT":
+          return Types.BLOB;
+        case "BINARY VARYING":
+          return Types.VARBINARY;
+        case "BIT":
+          return Types.BIT;
+        case "BOOLEAN":
+          return Types.BOOLEAN;
+        case "CHARACTER":
+          return Types.CHAR;
+        // Resolve: Not seen in Drill yet. Can it appear?:
+        case "CHARACTER LARGE OBJECT":
+          return Types.CLOB;
+        case "CHARACTER VARYING":
+          return Types.VARCHAR;
+
+        // Resolve: Not seen in Drill yet. Can it appear?:
+        case "DATALINK":
+          return Types.DATALINK;
+        case "DATE":
+          return Types.DATE;
+        case "DECIMAL":
+          return Types.DECIMAL;
+        // Resolve: Not seen in Drill yet. Can it appear?:
+        case "DISTINCT":
+          return Types.DISTINCT;
+        case "DOUBLE":
+        case "DOUBLE PRECISION":
+          return Types.DOUBLE;
+
+        case "FLOAT":
+          return Types.FLOAT;
+
+        case "INTEGER":
+          return Types.INTEGER;
+        case "INTERVAL":
+          return Types.OTHER;
+
+        // Resolve: Not seen in Drill yet. Can it ever appear?:
+        case "JAVA_OBJECT":
+          return Types.JAVA_OBJECT;
+
+        // Resolve: Not seen in Drill yet. Can it appear?:
+        case "LONGNVARCHAR":
+          return Types.LONGNVARCHAR;
+        // Resolve: Not seen in Drill yet. Can it appear?:
+        case "LONGVARBINARY":
+          return Types.LONGVARBINARY;
+        // Resolve: Not seen in Drill yet. Can it appear?:
+        case "LONGVARCHAR":
+          return Types.LONGVARCHAR;
+
+        case "MAP":
+          return Types.OTHER;
+
+        // Resolve: Not seen in Drill yet. Can it appear?:
+        case "NATIONAL CHARACTER":
+          return Types.NCHAR;
+        // Resolve: Not seen in Drill yet. Can it appear?:
+        case "NATIONAL CHARACTER LARGE OBJECT":
+          return Types.NCLOB;
+        // Resolve: Not seen in Drill yet. Can it appear?:
+        case "NATIONAL CHARACTER VARYING":
+          return Types.NVARCHAR;
+
+        // TODO: Resolve following about NULL (and then update comment and
+        // code):
+        // It is not clear whether Types.NULL can represent a type (perhaps the
+        // type of the literal NULL when no further type information is known?)
+        // or
+        // whether 'NULL' can appear in INFORMATION_SCHEMA.COLUMNS.DATA_TYPE.
+        // For now, since it shouldn't hurt, include 'NULL'/Types.NULL in
+        // mapping.
+        case "NULL":
+          return Types.NULL;
+        // (No NUMERIC--Drill seems to map any to DECIMAL currently.)
+        case "NUMERIC":
+          return Types.NUMERIC;
+
+        // Resolve: Unexpectedly, has appeared in Drill. Should it?
+        case "OTHER":
+          return Types.OTHER;
+
+        case "REAL":
+          return Types.REAL;
+        // Resolve: Not seen in Drill yet. Can it appear?:
+        case "REF":
+          return Types.REF;
+        // Resolve: Not seen in Drill yet. Can it appear?:
+        case "ROWID":
+          return Types.ROWID;
+
+        case "SMALLINT":
+          return Types.SMALLINT;
+        // Resolve: Not seen in Drill yet. Can it appear?:
+        case "SQLXML":
+          return Types.SQLXML;
+        case "STRUCT":
+          return Types.STRUCT;
+
+        case "TIME":
+          return Types.TIME;
+        case "TIMESTAMP":
+          return Types.TIMESTAMP;
+        case "TINYINT":
+          return Types.TINYINT;
+
+        default:
+          return Types.OTHER;
+        }
+      }
+
+      Integer getDecimalDigits(ColumnMetadata value) {
+        switch(value.getDataType()) {
+        case "TINYINT":
+        case "SMALLINT":
+        case "INTEGER":
+        case "BIGINT":
+        case "DECIMAL":
+        case "NUMERIC":
+          return value.hasNumericScale() ? value.getNumericScale() : null;
+
+        case "REAL":
+          return DECIMAL_DIGITS_REAL;
+
+        case "FLOAT":
+          return DECIMAL_DIGITS_FLOAT;
+
+        case "DOUBLE":
+          return DECIMAL_DIGITS_DOUBLE;
+
+        case "DATE":
+        case "TIME":
+        case "TIMESTAMP":
+        case "INTERVAL":
+          return value.getDateTimePrecision();
+
+        default:
+          return null;
+        }
+      }
+
+      private Integer getNumPrecRadix(ColumnMetadata value) {
+        switch(value.getDataType()) {
+        case "TINYINT":
+        case "SMALLINT":
+        case "INTEGER":
+        case "BIGINT":
+        case "DECIMAL":
+        case "NUMERIC":
+        case "REAL":
+        case "FLOAT":
+        case "DOUBLE":
+          return value.getNumericPrecisionRadix();
+
+        case "INTERVAL":
+          return RADIX_INTERVAL;
+
+        case "DATE":
+        case "TIME":
+        case "TIMESTAMP":
+          return RADIX_DATETIME;
+
+        default:
+          return null;
+        }
+      }
+
+      private int getNullable(ColumnMetadata value) {
+        if (!value.hasIsNullable()) {
+          return DatabaseMetaData.columnNullableUnknown;
+        }
+        return  value.getIsNullable() ? DatabaseMetaData.columnNullable : DatabaseMetaData.columnNoNulls;
+      }
+
+      private String getIsNullable(ColumnMetadata value) {
+        if (!value.hasIsNullable()) {
+          return "";
+        }
+        return  value.getIsNullable() ? "YES" : "NO";
+      }
+
+      private Integer getCharOctetLength(ColumnMetadata value) {
+        if (!value.hasCharMaxLength()) {
+          return null;
+        }
+
+        switch(value.getDataType()) {
+        case "CHARACTER":
+        case "CHARACTER LARGE OBJECT":
+        case "CHARACTER VARYING":
+        case "LONGVARCHAR":
+        case "LONGNVARCHAR":
+        case "NATIONAL CHARACTER":
+        case "NATIONAL CHARACTER LARGE OBJECT":
+        case "NATIONAL CHARACTER VARYING":
+          return value.getCharOctetLength();
+
+        default:
+          return null;
+        }
+      }
+
+      @Override
+      protected MetaColumn adapt(ColumnMetadata value) {
+        return new MetaColumn(
+            value.getCatalogName(),
+            value.getSchemaName(),
+            value.getTableName(),
+            value.getColumnName(),
+            getDataType(value), // It might require the full SQL type
+            value.getDataType(),
+            value.getColumnSize(),
+            getDecimalDigits(value),
+            getNumPrecRadix(value),
+            getNullable(value),
+            getCharOctetLength(value),
+            value.getOrdinalPosition(),
+            getIsNullable(value));
+      }
+    }.getMeta(connection.getClient().getColumns(catalogNameFilter, schemaNameFilter, tableNameFilter, columnNameFilter));
+  }
+
+  /**
+   * Implements {@link DatabaseMetaData#getColumns}.
+   */
   @Override
-  public MetaResultSet getSchemas(String catalog, Pat schemaPattern) {
+  public MetaResultSet getColumns(String catalog, Pat schemaPattern,
+                              Pat tableNamePattern, Pat columnNamePattern) {
+    if (connection.getConfig().disableServerMetadata() || ! connection.getClient().getSupportedMethods().contains(ServerMethod.GET_COLUMNS)) {
+      return clientGetColumns(catalog, schemaPattern, tableNamePattern, columnNamePattern);
+    }
+
+    return serverGetColumns(catalog, schemaPattern, tableNamePattern, columnNamePattern);
+  }
+
+
+  private MetaResultSet serverGetSchemas(String catalog, Pat schemaPattern) {
+    final LikeFilter catalogNameFilter = newLikeFilter(quote(catalog));
+    final LikeFilter schemaNameFilter = newLikeFilter(schemaPattern);
+
+    return new MetadataAdapter<MetaImpl.MetaSchema, GetSchemasResp, SchemaMetadata>(MetaImpl.MetaSchema.class) {
+      @Override
+      protected RequestStatus getStatus(GetSchemasResp response) {
+        return response.getStatus();
+      }
+
+      @Override
+      protected List<SchemaMetadata> getResult(GetSchemasResp response) {
+        return response.getSchemasList();
+      }
+
+      @Override
+      protected DrillPBError getError(GetSchemasResp response) {
+        return response.getError();
+      }
+
+      @Override
+      protected MetaSchema adapt(SchemaMetadata value) {
+        return new MetaImpl.MetaSchema(value.getCatalogName(), value.getSchemaName());
+      }
+    }.getMeta(connection.getClient().getSchemas(catalogNameFilter, schemaNameFilter));
+  }
+
+
+  private MetaResultSet clientGetSchemas(String catalog, Pat schemaPattern) {
     StringBuilder sb = new StringBuilder();
     sb.append("select "
         + "SCHEMA_NAME as TABLE_SCHEM, "
@@ -429,8 +1018,43 @@ class DrillMetaImpl extends MetaImpl {
     return s(sb.toString());
   }
 
+  /**
+   * Implements {@link DatabaseMetaData#getSchemas}.
+   */
   @Override
-  public MetaResultSet getCatalogs() {
+  public MetaResultSet getSchemas(String catalog, Pat schemaPattern) {
+    if (connection.getConfig().disableServerMetadata() || ! connection.getClient().getSupportedMethods().contains(ServerMethod.GET_SCHEMAS)) {
+      return clientGetSchemas(catalog, schemaPattern);
+    }
+
+    return serverGetSchemas(catalog, schemaPattern);
+  }
+
+  private MetaResultSet serverGetCatalogs() {
+    return new MetadataAdapter<MetaImpl.MetaCatalog, GetCatalogsResp, CatalogMetadata>(MetaImpl.MetaCatalog.class) {
+      @Override
+      protected RequestStatus getStatus(GetCatalogsResp response) {
+        return response.getStatus();
+      }
+
+      @Override
+      protected List<CatalogMetadata> getResult(GetCatalogsResp response) {
+        return response.getCatalogsList();
+      }
+
+      @Override
+      protected DrillPBError getError(GetCatalogsResp response) {
+        return response.getError();
+      }
+
+      @Override
+      protected MetaImpl.MetaCatalog adapt(CatalogMetadata protoValue) {
+        return new MetaImpl.MetaCatalog(protoValue.getCatalogName());
+      }
+    }.getMeta(connection.getClient().getCatalogs(null));
+  }
+
+  private MetaResultSet clientGetCatalogs() {
     StringBuilder sb = new StringBuilder();
     sb.append("select "
         + "CATALOG_NAME as TABLE_CAT "
@@ -441,6 +1065,17 @@ class DrillMetaImpl extends MetaImpl {
     return s(sb.toString());
   }
 
+  /**
+   * Implements {@link DatabaseMetaData#getCatalogs}.
+   */
+  @Override
+  public MetaResultSet getCatalogs() {
+    if (connection.getConfig().disableServerMetadata() || ! connection.getClient().getSupportedMethods().contains(ServerMethod.GET_CATALOGS)) {
+      return clientGetCatalogs();
+    }
+
+    return serverGetCatalogs();
+  }
 
   interface Named {
     String getName();

http://git-wip-us.apache.org/repos/asf/drill/blob/17f888d9/exec/jdbc/src/main/java/org/apache/drill/jdbc/impl/DrillResultSetImpl.java
----------------------------------------------------------------------
diff --git a/exec/jdbc/src/main/java/org/apache/drill/jdbc/impl/DrillResultSetImpl.java b/exec/jdbc/src/main/java/org/apache/drill/jdbc/impl/DrillResultSetImpl.java
index e406348..c8b4e3d 100644
--- a/exec/jdbc/src/main/java/org/apache/drill/jdbc/impl/DrillResultSetImpl.java
+++ b/exec/jdbc/src/main/java/org/apache/drill/jdbc/impl/DrillResultSetImpl.java
@@ -37,7 +37,9 @@ import java.sql.SQLXML;
 import java.sql.Time;
 import java.sql.Timestamp;
 import java.sql.Types;
+import java.util.ArrayList;
 import java.util.Calendar;
+import java.util.List;
 import java.util.Map;
 import java.util.TimeZone;
 
@@ -47,6 +49,7 @@ import org.apache.calcite.avatica.AvaticaStatement;
 import org.apache.calcite.avatica.ColumnMetaData;
 import org.apache.calcite.avatica.Meta;
 import org.apache.calcite.avatica.util.Cursor;
+import org.apache.calcite.avatica.util.Cursor.Accessor;
 import org.apache.drill.jdbc.AlreadyClosedSqlException;
 import org.apache.drill.jdbc.DrillResultSet;
 import org.apache.drill.jdbc.ExecutionCanceledSqlException;
@@ -1874,12 +1877,23 @@ class DrillResultSetImpl extends AvaticaResultSet implements DrillResultSet {
   protected DrillResultSetImpl execute() throws SQLException{
     connection.getDriver().handler.onStatementExecute(statement, null);
 
-    DrillCursor drillCursor = new DrillCursor(connection, statement, signature);
-    super.execute2(drillCursor, this.signature.columns);
+    if (signature.cursorFactory != null) {
+      // Avatica accessors have to be wrapped to match Drill behaviour regarding exception thrown
+      super.execute();
+      List<Accessor> wrappedAccessorList = new ArrayList<>(accessorList.size());
+      for(Accessor accessor: accessorList) {
+        wrappedAccessorList.add(new WrappedAccessor(accessor));
+      }
+      this.accessorList = wrappedAccessorList;
+    }
+    else {
+      DrillCursor drillCursor = new DrillCursor(connection, statement, signature);
+      super.execute2(drillCursor, this.signature.columns);
 
-    // Read first (schema-only) batch to initialize result-set metadata from
-    // (initial) schema before Statement.execute...(...) returns result set:
-    drillCursor.loadInitialSchema();
+      // Read first (schema-only) batch to initialize result-set metadata from
+      // (initial) schema before Statement.execute...(...) returns result set:
+      drillCursor.loadInitialSchema();
+    }
 
     return this;
   }

http://git-wip-us.apache.org/repos/asf/drill/blob/17f888d9/exec/jdbc/src/main/java/org/apache/drill/jdbc/impl/WrappedAccessor.java
----------------------------------------------------------------------
diff --git a/exec/jdbc/src/main/java/org/apache/drill/jdbc/impl/WrappedAccessor.java b/exec/jdbc/src/main/java/org/apache/drill/jdbc/impl/WrappedAccessor.java
new file mode 100644
index 0000000..4cdc2ae
--- /dev/null
+++ b/exec/jdbc/src/main/java/org/apache/drill/jdbc/impl/WrappedAccessor.java
@@ -0,0 +1,448 @@
+/*******************************************************************************
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ ******************************************************************************/
+
+package org.apache.drill.jdbc.impl;
+
+import java.io.InputStream;
+import java.io.Reader;
+import java.math.BigDecimal;
+import java.net.URL;
+import java.sql.Array;
+import java.sql.Blob;
+import java.sql.Clob;
+import java.sql.Date;
+import java.sql.NClob;
+import java.sql.Ref;
+import java.sql.SQLException;
+import java.sql.SQLXML;
+import java.sql.Time;
+import java.sql.Timestamp;
+import java.util.Calendar;
+import java.util.Map;
+
+import org.apache.calcite.avatica.util.Cursor.Accessor;
+
+/**
+ * Wraps Avatica {@code Accessor} instances to catch convertion exception
+ * which are thrown as {@code RuntimeException} and throws {@code SQLException}
+ * instead
+ *
+ */
+class WrappedAccessor implements Accessor {
+  private final Accessor delegate;
+
+  public WrappedAccessor(Accessor delegate) {
+    this.delegate = delegate;
+  }
+
+  @Override
+  public boolean wasNull() throws SQLException {
+    return delegate.wasNull();
+  }
+
+  @Override
+  public String getString() throws SQLException {
+    try {
+      return delegate.getString();
+    } catch(RuntimeException e) {
+      String message = e.getMessage();
+      if (message != null && message.startsWith("cannot convert to")) {
+        throw new SQLException(e.getMessage(), e);
+      }
+      throw e;
+    }
+  }
+
+  @Override
+  public boolean getBoolean() throws SQLException {
+    try {
+      return delegate.getBoolean();
+    } catch(RuntimeException e) {
+      String message = e.getMessage();
+      if (message != null && message.startsWith("cannot convert to")) {
+        throw new SQLException(e.getMessage(), e);
+      }
+      throw e;
+    }
+  }
+
+  @Override
+  public byte getByte() throws SQLException {
+    try {
+      return delegate.getByte();
+    } catch(RuntimeException e) {
+      String message = e.getMessage();
+      if (message != null && message.startsWith("cannot convert to")) {
+        throw new SQLException(e.getMessage(), e);
+      }
+      throw e;
+    }
+  }
+
+  @Override
+  public short getShort() throws SQLException {
+    try {
+      return delegate.getShort();
+    } catch(RuntimeException e) {
+      String message = e.getMessage();
+      if (message != null && message.startsWith("cannot convert to")) {
+        throw new SQLException(e.getMessage(), e);
+      }
+      throw e;
+    }
+  }
+
+  @Override
+  public int getInt() throws SQLException {
+    try {
+      return delegate.getInt();
+    } catch(RuntimeException e) {
+      String message = e.getMessage();
+      if (message != null && message.startsWith("cannot convert to")) {
+        throw new SQLException(e.getMessage(), e);
+      }
+      throw e;
+    }
+  }
+
+  @Override
+  public long getLong() throws SQLException {
+    try {
+      return delegate.getLong();
+    } catch(RuntimeException e) {
+      String message = e.getMessage();
+      if (message != null && message.startsWith("cannot convert to")) {
+        throw new SQLException(e.getMessage(), e);
+      }
+      throw e;
+    }
+  }
+
+  @Override
+  public float getFloat() throws SQLException {
+    try {
+      return delegate.getFloat();
+    } catch(RuntimeException e) {
+      String message = e.getMessage();
+      if (message != null && message.startsWith("cannot convert to")) {
+        throw new SQLException(e.getMessage(), e);
+      }
+      throw e;
+    }
+  }
+
+  @Override
+  public double getDouble() throws SQLException {
+    try {
+      return delegate.getDouble();
+    } catch(RuntimeException e) {
+      String message = e.getMessage();
+      if (message != null && message.startsWith("cannot convert to")) {
+        throw new SQLException(e.getMessage(), e);
+      }
+      throw e;
+    }
+  }
+
+  @Override
+  public BigDecimal getBigDecimal() throws SQLException {
+    try {
+      return delegate.getBigDecimal();
+    } catch(RuntimeException e) {
+      String message = e.getMessage();
+      if (message != null && message.startsWith("cannot convert to")) {
+        throw new SQLException(e.getMessage(), e);
+      }
+      throw e;
+    }
+  }
+
+  @Override
+  public BigDecimal getBigDecimal(int scale) throws SQLException {
+    try {
+      return delegate.getBigDecimal(scale);
+    } catch(RuntimeException e) {
+      String message = e.getMessage();
+      if (message != null && message.startsWith("cannot convert to")) {
+        throw new SQLException(e.getMessage(), e);
+      }
+      throw e;
+    }
+  }
+
+  @Override
+  public byte[] getBytes() throws SQLException {
+    try {
+      return delegate.getBytes();
+    } catch(RuntimeException e) {
+      String message = e.getMessage();
+      if (message != null && message.startsWith("cannot convert to")) {
+        throw new SQLException(e.getMessage(), e);
+      }
+      throw e;
+    }
+  }
+
+  @Override
+  public InputStream getAsciiStream() throws SQLException {
+    try {
+      return delegate.getAsciiStream();
+    } catch(RuntimeException e) {
+      String message = e.getMessage();
+      if (message != null && message.startsWith("cannot convert to")) {
+        throw new SQLException(e.getMessage(), e);
+      }
+      throw e;
+    }
+  }
+
+  @Override
+  public InputStream getUnicodeStream() throws SQLException {
+    try {
+      return delegate.getUnicodeStream();
+    } catch(RuntimeException e) {
+      String message = e.getMessage();
+      if (message != null && message.startsWith("cannot convert to")) {
+        throw new SQLException(e.getMessage(), e);
+      }
+      throw e;
+    }
+  }
+
+  @Override
+  public InputStream getBinaryStream() throws SQLException {
+    try {
+      return delegate.getBinaryStream();
+    } catch(RuntimeException e) {
+      String message = e.getMessage();
+      if (message != null && message.startsWith("cannot convert to")) {
+        throw new SQLException(e.getMessage(), e);
+      }
+      throw e;
+    }
+  }
+
+  @Override
+  public Object getObject() throws SQLException {
+    try {
+      return delegate.getObject();
+    } catch(RuntimeException e) {
+      String message = e.getMessage();
+      if (message != null && message.startsWith("cannot convert to")) {
+        throw new SQLException(e.getMessage(), e);
+      }
+      throw e;
+    }
+  }
+
+  @Override
+  public Reader getCharacterStream() throws SQLException {
+    try {
+      return delegate.getCharacterStream();
+    } catch(RuntimeException e) {
+      String message = e.getMessage();
+      if (message != null && message.startsWith("cannot convert to")) {
+        throw new SQLException(e.getMessage(), e);
+      }
+      throw e;
+    }
+  }
+
+  @Override
+  public Object getObject(Map<String, Class<?>> map) throws SQLException {
+    try {
+      return delegate.getObject(map);
+    } catch(RuntimeException e) {
+      String message = e.getMessage();
+      if (message != null && message.startsWith("cannot convert to")) {
+        throw new SQLException(e.getMessage(), e);
+      }
+      throw e;
+    }
+  }
+
+  @Override
+  public Ref getRef() throws SQLException {
+    try {
+      return delegate.getRef();
+    } catch(RuntimeException e) {
+      String message = e.getMessage();
+      if (message != null && message.startsWith("cannot convert to")) {
+        throw new SQLException(e.getMessage(), e);
+      }
+      throw e;
+    }
+  }
+
+  @Override
+  public Blob getBlob() throws SQLException {
+    try {
+      return delegate.getBlob();
+    } catch(RuntimeException e) {
+      String message = e.getMessage();
+      if (message != null && message.startsWith("cannot convert to")) {
+        throw new SQLException(e.getMessage(), e);
+      }
+      throw e;
+    }
+  }
+
+  @Override
+  public Clob getClob() throws SQLException {
+    try {
+      return delegate.getClob();
+    } catch(RuntimeException e) {
+      String message = e.getMessage();
+      if (message != null && message.startsWith("cannot convert to")) {
+        throw new SQLException(e.getMessage(), e);
+      }
+      throw e;
+    }
+  }
+
+  @Override
+  public Array getArray() throws SQLException {
+    try {
+      return delegate.getArray();
+    } catch(RuntimeException e) {
+      String message = e.getMessage();
+      if (message != null && message.startsWith("cannot convert to")) {
+        throw new SQLException(e.getMessage(), e);
+      }
+      throw e;
+    }
+  }
+
+  @Override
+  public Date getDate(Calendar calendar) throws SQLException {
+    try {
+      return delegate.getDate(calendar);
+    } catch(RuntimeException e) {
+      String message = e.getMessage();
+      if (message != null && message.startsWith("cannot convert to")) {
+        throw new SQLException(e.getMessage(), e);
+      }
+      throw e;
+    }
+  }
+
+  @Override
+  public Time getTime(Calendar calendar) throws SQLException {
+    try {
+      return delegate.getTime(calendar);
+    } catch(RuntimeException e) {
+      String message = e.getMessage();
+      if (message != null && message.startsWith("cannot convert to")) {
+        throw new SQLException(e.getMessage(), e);
+      }
+      throw e;
+    }
+  }
+
+  @Override
+  public Timestamp getTimestamp(Calendar calendar) throws SQLException {
+    try {
+      return delegate.getTimestamp(calendar);
+    } catch(RuntimeException e) {
+      String message = e.getMessage();
+      if (message != null && message.startsWith("cannot convert to")) {
+        throw new SQLException(e.getMessage(), e);
+      }
+      throw e;
+    }
+  }
+
+  @Override
+  public URL getURL() throws SQLException {
+    try {
+      return delegate.getURL();
+    } catch(RuntimeException e) {
+      String message = e.getMessage();
+      if (message != null && message.startsWith("cannot convert to")) {
+        throw new SQLException(e.getMessage(), e);
+      }
+      throw e;
+    }
+  }
+
+  @Override
+  public NClob getNClob() throws SQLException {
+    try {
+      return delegate.getNClob();
+    } catch(RuntimeException e) {
+      String message = e.getMessage();
+      if (message != null && message.startsWith("cannot convert to")) {
+        throw new SQLException(e.getMessage(), e);
+      }
+      throw e;
+    }
+  }
+
+  @Override
+  public SQLXML getSQLXML() throws SQLException {
+    try {
+      return delegate.getSQLXML();
+    } catch(RuntimeException e) {
+      String message = e.getMessage();
+      if (message != null && message.startsWith("cannot convert to")) {
+        throw new SQLException(e.getMessage(), e);
+      }
+      throw e;
+    }
+  }
+
+  @Override
+  public String getNString() throws SQLException {
+    try {
+      return delegate.getNString();
+    } catch(RuntimeException e) {
+      String message = e.getMessage();
+      if (message != null && message.startsWith("cannot convert to")) {
+        throw new SQLException(e.getMessage(), e);
+      }
+      throw e;
+    }
+  }
+
+  @Override
+  public Reader getNCharacterStream() throws SQLException {
+    try {
+      return delegate.getNCharacterStream();
+    } catch(RuntimeException e) {
+      String message = e.getMessage();
+      if (message != null && message.startsWith("cannot convert to")) {
+        throw new SQLException(e.getMessage(), e);
+      }
+      throw e;
+    }
+  }
+
+  @Override
+  public <T> T getObject(Class<T> type) throws SQLException {
+    try {
+      return delegate.getObject(type);
+    } catch(RuntimeException e) {
+      String message = e.getMessage();
+      if (message != null && message.startsWith("cannot convert to")) {
+        throw new SQLException(e.getMessage(), e);
+      }
+      throw e;
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/drill/blob/17f888d9/exec/jdbc/src/test/java/org/apache/drill/jdbc/DatabaseMetaDataGetColumnsTest.java
----------------------------------------------------------------------
diff --git a/exec/jdbc/src/test/java/org/apache/drill/jdbc/DatabaseMetaDataGetColumnsTest.java b/exec/jdbc/src/test/java/org/apache/drill/jdbc/DatabaseMetaDataGetColumnsTest.java
index bca6325..8e65869 100644
--- a/exec/jdbc/src/test/java/org/apache/drill/jdbc/DatabaseMetaDataGetColumnsTest.java
+++ b/exec/jdbc/src/test/java/org/apache/drill/jdbc/DatabaseMetaDataGetColumnsTest.java
@@ -88,14 +88,14 @@ public class DatabaseMetaDataGetColumnsTest extends JdbcTestBase {
       DatabaseMetaDataGetColumnsTest.class.getSimpleName() + "_View";
 
   /** The one shared JDBC connection to Drill. */
-  private static Connection connection;
+  protected static Connection connection;
 
   /** Overall (connection-level) metadata. */
-  private static DatabaseMetaData dbMetadata;
+  protected static DatabaseMetaData dbMetadata;
 
   /** getColumns result metadata.  For checking columns themselves (not cell
    *  values or row order). */
-  private static ResultSetMetaData rowsMetadata;
+  protected static ResultSetMetaData rowsMetadata;
 
 
   ////////////////////
@@ -181,8 +181,7 @@ public class DatabaseMetaDataGetColumnsTest extends JdbcTestBase {
   }
 
   @BeforeClass
-  public static void setUpConnectionAndMetadataToCheck() throws Exception {
-
+  public static void setUpConnection() throws Exception {
     // Get JDBC connection to Drill:
     // (Note: Can't use JdbcTest's connect(...) because JdbcTest closes
     // Connection--and other JDBC objects--on test method failure, but this test
@@ -190,6 +189,11 @@ public class DatabaseMetaDataGetColumnsTest extends JdbcTestBase {
     connection = new Driver().connect( "jdbc:drill:zk=local",
                                        JdbcAssert.getDefaultProperties() );
     dbMetadata = connection.getMetaData();
+
+    setUpMetadataToCheck();
+  }
+
+  protected static void setUpMetadataToCheck() throws Exception {
     final Statement stmt = connection.createStatement();
 
     ResultSet util;
@@ -346,7 +350,7 @@ public class DatabaseMetaDataGetColumnsTest extends JdbcTestBase {
 
   @AfterClass
   public static void tearDownConnection() throws SQLException {
-    final ResultSet util =
+    ResultSet util =
         connection.createStatement().executeQuery( "DROP VIEW " + VIEW_NAME + "" );
     assertTrue( util.next() );
     assertTrue( "Error dropping temporary test-columns view " + VIEW_NAME + ": "
@@ -960,7 +964,7 @@ public class DatabaseMetaDataGetColumnsTest extends JdbcTestBase {
 
   @Test
   public void test_COLUMN_SIZE_hasRightValue_mdrOptBOOLEAN() throws SQLException {
-    assertThat( getIntOrNull( mdrOptBOOLEAN, "COLUMN_SIZE" ), nullValue() );
+    assertThat( getIntOrNull( mdrOptBOOLEAN, "COLUMN_SIZE" ), equalTo(1) );
   }
 
   @Ignore( "TODO(DRILL-2470): unignore when TINYINT is implemented" )
@@ -2702,7 +2706,7 @@ public class DatabaseMetaDataGetColumnsTest extends JdbcTestBase {
 
   @Test
   public void test_SOURCE_DATA_TYPE_hasRightValue_mdrOptBOOLEAN() throws SQLException {
-    assertThat( getIntOrNull( mdrOptBOOLEAN, "SOURCE_DATA_TYPE" ), nullValue() );
+    assertThat( mdrOptBOOLEAN.getString( "SOURCE_DATA_TYPE" ), nullValue() );
   }
 
   @Test
@@ -2712,22 +2716,18 @@ public class DatabaseMetaDataGetColumnsTest extends JdbcTestBase {
 
   @Test
   public void test_SOURCE_DATA_TYPE_hasRightTypeString() throws SQLException {
-    // TODO(DRILL-2135):  Resolve workaround:
-    //assertThat( rsMetadata.getColumnTypeName( 22 ), equalTo( "SMALLINT" ) );
-    assertThat( rowsMetadata.getColumnTypeName( 22 ), equalTo( "INTEGER" ) );
+    assertThat( rowsMetadata.getColumnTypeName( 22 ), equalTo( "SMALLINT" ) );
   }
 
   @Test
   public void test_SOURCE_DATA_TYPE_hasRightTypeCode() throws SQLException {
-    // TODO(DRILL-2135):  Resolve workaround:
-    //assertThat( rsMetadata.getColumnType( 22 ), equalTo( Types.SMALLINT ) );
-    assertThat( rowsMetadata.getColumnType( 22 ), equalTo( Types.INTEGER ) );
+    assertThat( rowsMetadata.getColumnType( 22 ), equalTo( Types.SMALLINT ) );
   }
 
   @Test
   public void test_SOURCE_DATA_TYPE_hasRightClass() throws SQLException {
     assertThat( rowsMetadata.getColumnClassName( 22 ),
-                equalTo( Integer.class.getName() ) );
+                equalTo( Short.class.getName() ) );
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/drill/blob/17f888d9/exec/jdbc/src/test/java/org/apache/drill/jdbc/DatabaseMetaDataTest.java
----------------------------------------------------------------------
diff --git a/exec/jdbc/src/test/java/org/apache/drill/jdbc/DatabaseMetaDataTest.java b/exec/jdbc/src/test/java/org/apache/drill/jdbc/DatabaseMetaDataTest.java
index 738f1a2..0ec5c8b 100644
--- a/exec/jdbc/src/test/java/org/apache/drill/jdbc/DatabaseMetaDataTest.java
+++ b/exec/jdbc/src/test/java/org/apache/drill/jdbc/DatabaseMetaDataTest.java
@@ -17,25 +17,25 @@
  */
 package org.apache.drill.jdbc;
 
-import static org.junit.Assert.assertTrue;
+import static java.sql.Connection.TRANSACTION_NONE;
+import static java.sql.Connection.TRANSACTION_READ_COMMITTED;
+import static java.sql.Connection.TRANSACTION_READ_UNCOMMITTED;
+import static java.sql.Connection.TRANSACTION_REPEATABLE_READ;
+import static java.sql.Connection.TRANSACTION_SERIALIZABLE;
+import static org.hamcrest.CoreMatchers.equalTo;
+import static org.hamcrest.CoreMatchers.notNullValue;
 import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.fail;
 import static org.junit.Assert.assertThat;
-import static org.hamcrest.CoreMatchers.*;
-
-import org.apache.drill.jdbc.Driver;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Ignore;
-import org.junit.Test;
+import static org.junit.Assert.assertTrue;
 
-import static java.sql.Connection.*;
 import java.sql.Connection;
 import java.sql.DatabaseMetaData;
-import java.sql.SQLFeatureNotSupportedException;
-import java.sql.Savepoint;
 import java.sql.SQLException;
 
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
 /**
  * Test for Drill's implementation of DatabaseMetaData's methods (other than
  * those tested separately, e.g., {@code getColumn(...)}, tested in
@@ -43,8 +43,8 @@ import java.sql.SQLException;
  */
 public class DatabaseMetaDataTest {
 
-  private static Connection connection;
-  private static DatabaseMetaData dbmd;
+  protected static Connection connection;
+  protected static DatabaseMetaData dbmd;
 
   @BeforeClass
   public static void setUpConnection() throws SQLException {

http://git-wip-us.apache.org/repos/asf/drill/blob/17f888d9/exec/jdbc/src/test/java/org/apache/drill/jdbc/LegacyDatabaseMetaDataGetColumnsTest.java
----------------------------------------------------------------------
diff --git a/exec/jdbc/src/test/java/org/apache/drill/jdbc/LegacyDatabaseMetaDataGetColumnsTest.java b/exec/jdbc/src/test/java/org/apache/drill/jdbc/LegacyDatabaseMetaDataGetColumnsTest.java
new file mode 100644
index 0000000..fbd9379
--- /dev/null
+++ b/exec/jdbc/src/test/java/org/apache/drill/jdbc/LegacyDatabaseMetaDataGetColumnsTest.java
@@ -0,0 +1,73 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.jdbc;
+
+import static org.hamcrest.CoreMatchers.equalTo;
+import static org.junit.Assert.assertThat;
+
+import java.sql.SQLException;
+import java.sql.Types;
+import java.util.Properties;
+
+import org.apache.drill.jdbc.test.JdbcAssert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+/**
+ * Test compatibility with older versions of the server
+ */
+public class LegacyDatabaseMetaDataGetColumnsTest extends DatabaseMetaDataGetColumnsTest {
+
+  @BeforeClass
+  public static void setUpConnection() throws Exception {
+    // Get JDBC connection to Drill:
+    // (Note: Can't use JdbcTest's connect(...) because JdbcTest closes
+    // Connection--and other JDBC objects--on test method failure, but this test
+    // class uses some objects across methods.)
+    Properties defaultProperties = JdbcAssert.getDefaultProperties();
+    defaultProperties.setProperty("server.metadata.disabled", "true");
+
+    connection = new Driver().connect( "jdbc:drill:zk=local",
+                                       defaultProperties );
+    dbMetadata = connection.getMetaData();
+
+    DatabaseMetaDataGetColumnsTest.setUpMetadataToCheck();
+  }
+
+
+  // Override because of DRILL-1959
+
+  @Override
+  @Test
+  public void test_SOURCE_DATA_TYPE_hasRightTypeString() throws SQLException {
+    assertThat( rowsMetadata.getColumnTypeName( 22 ), equalTo( "INTEGER" ) );
+  }
+
+  @Override
+  @Test
+  public void test_SOURCE_DATA_TYPE_hasRightTypeCode() throws SQLException {
+    assertThat( rowsMetadata.getColumnType( 22 ), equalTo( Types.INTEGER ) );
+  }
+
+  @Override
+  @Test
+  public void test_SOURCE_DATA_TYPE_hasRightClass() throws SQLException {
+    assertThat( rowsMetadata.getColumnClassName( 22 ),
+                equalTo( Integer.class.getName() ) );
+  }
+}

http://git-wip-us.apache.org/repos/asf/drill/blob/17f888d9/exec/jdbc/src/test/java/org/apache/drill/jdbc/LegacyDatabaseMetaDataTest.java
----------------------------------------------------------------------
diff --git a/exec/jdbc/src/test/java/org/apache/drill/jdbc/LegacyDatabaseMetaDataTest.java b/exec/jdbc/src/test/java/org/apache/drill/jdbc/LegacyDatabaseMetaDataTest.java
new file mode 100644
index 0000000..ba5b700
--- /dev/null
+++ b/exec/jdbc/src/test/java/org/apache/drill/jdbc/LegacyDatabaseMetaDataTest.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.jdbc;
+
+import java.sql.SQLException;
+import java.util.Properties;
+
+import org.junit.BeforeClass;
+
+/**
+ * Test compatibility with older versions of the server
+ */
+public class LegacyDatabaseMetaDataTest extends DatabaseMetaDataTest {
+  @BeforeClass
+  public static void setUpConnection() throws SQLException {
+    Properties properties = new Properties();
+    properties.setProperty("server.metadata.disabled", "true");
+    // (Note: Can't use JdbcTest's connect(...) because JdbcTest closes
+    // Connection--and other JDBC objects--on test method failure, but this test
+    // class uses some objects across methods.)
+    connection = new Driver().connect( "jdbc:drill:zk=local", properties );
+    dbmd = connection.getMetaData();
+  }
+}

http://git-wip-us.apache.org/repos/asf/drill/blob/17f888d9/exec/jdbc/src/test/java/org/apache/drill/jdbc/LegacyPreparedStatementTest.java
----------------------------------------------------------------------
diff --git a/exec/jdbc/src/test/java/org/apache/drill/jdbc/LegacyPreparedStatementTest.java b/exec/jdbc/src/test/java/org/apache/drill/jdbc/LegacyPreparedStatementTest.java
index 46d675f..b482835 100644
--- a/exec/jdbc/src/test/java/org/apache/drill/jdbc/LegacyPreparedStatementTest.java
+++ b/exec/jdbc/src/test/java/org/apache/drill/jdbc/LegacyPreparedStatementTest.java
@@ -57,9 +57,11 @@ public class LegacyPreparedStatementTest extends JdbcTestBase {
   public static void setUpConnection() throws SQLException {
     Driver.load();
     Properties properties = new Properties();
-    properties.setProperty("preparedstatement.server.disabled", "true");
+    properties.setProperty("server.preparedstatement.disabled", "true");
 
     connection = DriverManager.getConnection( "jdbc:drill:zk=local", properties);
+    assertTrue(((DrillConnection) connection).getConfig().isServerPreparedStatementDisabled());
+
   }
 
   @AfterClass

http://git-wip-us.apache.org/repos/asf/drill/blob/17f888d9/exec/jdbc/src/test/java/org/apache/drill/jdbc/test/TestJdbcMetadata.java
----------------------------------------------------------------------
diff --git a/exec/jdbc/src/test/java/org/apache/drill/jdbc/test/TestJdbcMetadata.java b/exec/jdbc/src/test/java/org/apache/drill/jdbc/test/TestJdbcMetadata.java
index 6d766bd..b859650 100644
--- a/exec/jdbc/src/test/java/org/apache/drill/jdbc/test/TestJdbcMetadata.java
+++ b/exec/jdbc/src/test/java/org/apache/drill/jdbc/test/TestJdbcMetadata.java
@@ -37,6 +37,7 @@ public class TestJdbcMetadata extends JdbcTestActionBase {
   @Test
   public void catalogs() throws Exception{
     this.testAction(new JdbcAction(){
+      @Override
       public ResultSet getResult(Connection c) throws SQLException {
         return c.getMetaData().getCatalogs();
       }
@@ -46,6 +47,7 @@ public class TestJdbcMetadata extends JdbcTestActionBase {
   @Test
   public void allSchemas() throws Exception{
     this.testAction(new JdbcAction(){
+      @Override
       public ResultSet getResult(Connection c) throws SQLException {
         return c.getMetaData().getSchemas();
       }
@@ -55,6 +57,7 @@ public class TestJdbcMetadata extends JdbcTestActionBase {
   @Test
   public void schemasWithConditions() throws Exception{
     this.testAction(new JdbcAction(){
+      @Override
       public ResultSet getResult(Connection c) throws SQLException {
         return c.getMetaData().getSchemas("DRILL", "%fs%");
       }
@@ -64,6 +67,7 @@ public class TestJdbcMetadata extends JdbcTestActionBase {
   @Test
   public void allTables() throws Exception{
     this.testAction(new JdbcAction(){
+      @Override
       public ResultSet getResult(Connection c) throws SQLException {
         return c.getMetaData().getTables(null, null, null, null);
       }
@@ -73,6 +77,7 @@ public class TestJdbcMetadata extends JdbcTestActionBase {
   @Test
   public void tablesWithConditions() throws Exception{
     this.testAction(new JdbcAction(){
+      @Override
       public ResultSet getResult(Connection c) throws SQLException {
         return c.getMetaData().getTables("DRILL", "sys", "opt%", new String[]{"SYSTEM_TABLE", "SYSTEM_VIEW"});
       }
@@ -82,6 +87,7 @@ public class TestJdbcMetadata extends JdbcTestActionBase {
   @Test
   public void allColumns() throws Exception{
     this.testAction(new JdbcAction(){
+      @Override
       public ResultSet getResult(Connection c) throws SQLException {
         return c.getMetaData().getColumns(null, null, null, null);
       }
@@ -91,6 +97,7 @@ public class TestJdbcMetadata extends JdbcTestActionBase {
   @Test
   public void columnsWithConditions() throws Exception{
     this.testAction(new JdbcAction(){
+      @Override
       public ResultSet getResult(Connection c) throws SQLException {
         return c.getMetaData().getColumns("DRILL", "sys", "opt%", "%ame");
       }

http://git-wip-us.apache.org/repos/asf/drill/blob/17f888d9/exec/jdbc/src/test/java/org/apache/drill/jdbc/test/TestLegacyJdbcMetadata.java
----------------------------------------------------------------------
diff --git a/exec/jdbc/src/test/java/org/apache/drill/jdbc/test/TestLegacyJdbcMetadata.java b/exec/jdbc/src/test/java/org/apache/drill/jdbc/test/TestLegacyJdbcMetadata.java
new file mode 100644
index 0000000..97f7931
--- /dev/null
+++ b/exec/jdbc/src/test/java/org/apache/drill/jdbc/test/TestLegacyJdbcMetadata.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.jdbc.test;
+
+import java.sql.DriverManager;
+import java.util.Properties;
+
+import org.junit.BeforeClass;
+
+/**
+ * Test compatibility with older versions of the server
+ */
+public class TestLegacyJdbcMetadata extends TestJdbcMetadata {
+  @BeforeClass
+  public static void openClient() throws Exception {
+    Properties defaultProperties = JdbcAssert.getDefaultProperties();
+    defaultProperties.setProperty("server.metadata.disabled", "true");
+
+    connection = DriverManager.getConnection("jdbc:drill:zk=local", defaultProperties);
+  }
+}


[03/27] drill git commit: DRILL-4994: Add back JDBC prepared statement for older servers

Posted by jn...@apache.org.
http://git-wip-us.apache.org/repos/asf/drill/blob/16aa0810/exec/java-exec/src/main/java/org/apache/drill/exec/rpc/user/UserClient.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/rpc/user/UserClient.java b/exec/java-exec/src/main/java/org/apache/drill/exec/rpc/user/UserClient.java
index 96b5669..847b726 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/rpc/user/UserClient.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/rpc/user/UserClient.java
@@ -17,14 +17,6 @@
  */
 package org.apache.drill.exec.rpc.user;
 
-import com.google.common.base.Strings;
-import com.google.common.base.Throwables;
-import com.google.common.collect.ImmutableList;
-import com.google.common.util.concurrent.AbstractCheckedFuture;
-import com.google.common.util.concurrent.CheckedFuture;
-import com.google.common.util.concurrent.SettableFuture;
-import io.netty.buffer.ByteBuf;
-
 import java.io.IOException;
 import java.util.List;
 import java.util.Map;
@@ -32,10 +24,12 @@ import java.util.Set;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.Executor;
 
-import io.netty.channel.socket.SocketChannel;
+import javax.security.sasl.SaslClient;
+import javax.security.sasl.SaslException;
+
 import org.apache.drill.common.KerberosUtil;
-import org.apache.drill.common.config.DrillProperties;
 import org.apache.drill.common.config.DrillConfig;
+import org.apache.drill.common.config.DrillProperties;
 import org.apache.drill.exec.memory.BufferAllocator;
 import org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint;
 import org.apache.drill.exec.proto.GeneralRPCProtos.Ack;
@@ -60,29 +54,35 @@ import org.apache.drill.exec.proto.UserProtos.UserToBitHandshake;
 import org.apache.drill.exec.rpc.AbstractClientConnection;
 import org.apache.drill.exec.rpc.Acks;
 import org.apache.drill.exec.rpc.BasicClient;
-import org.apache.drill.exec.rpc.NonTransientRpcException;
-import org.apache.drill.exec.rpc.security.AuthenticationOutcomeListener;
 import org.apache.drill.exec.rpc.DrillRpcFuture;
+import org.apache.drill.exec.rpc.NonTransientRpcException;
 import org.apache.drill.exec.rpc.OutOfMemoryHandler;
 import org.apache.drill.exec.rpc.ProtobufLengthDecoder;
 import org.apache.drill.exec.rpc.Response;
 import org.apache.drill.exec.rpc.ResponseSender;
 import org.apache.drill.exec.rpc.RpcConnectionHandler;
 import org.apache.drill.exec.rpc.RpcException;
-
-import com.google.protobuf.MessageLite;
 import org.apache.drill.exec.rpc.RpcOutcomeListener;
 import org.apache.drill.exec.rpc.security.AuthStringUtil;
+import org.apache.drill.exec.rpc.security.AuthenticationOutcomeListener;
 import org.apache.drill.exec.rpc.security.AuthenticatorFactory;
-import org.apache.drill.exec.rpc.security.plain.PlainFactory;
 import org.apache.drill.exec.rpc.security.ClientAuthenticatorProvider;
+import org.apache.drill.exec.rpc.security.plain.PlainFactory;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.slf4j.Logger;
 
-import javax.security.sasl.SaslClient;
-import javax.security.sasl.SaslException;
+import com.google.common.base.Strings;
+import com.google.common.base.Throwables;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Sets;
+import com.google.common.util.concurrent.AbstractCheckedFuture;
+import com.google.common.util.concurrent.CheckedFuture;
+import com.google.common.util.concurrent.SettableFuture;
+import com.google.protobuf.MessageLite;
 
+import io.netty.buffer.ByteBuf;
 import io.netty.channel.EventLoopGroup;
-import org.slf4j.Logger;
+import io.netty.channel.socket.SocketChannel;
 
 public class UserClient extends BasicClient<RpcType, UserClient.UserToBitConnection,
     UserToBitHandshake, BitToUserHandshake> {
@@ -91,9 +91,10 @@ public class UserClient extends BasicClient<RpcType, UserClient.UserToBitConnect
   private final BufferAllocator allocator;
   private final QueryResultHandler queryResultHandler = new QueryResultHandler();
   private final String clientName;
+  private final boolean supportComplexTypes;
 
   private RpcEndpointInfos serverInfos = null;
-  private boolean supportComplexTypes = true;
+  private Set<RpcType> supportedMethods = null;
 
   // these are used for authentication
   private volatile List<String> serverAuthMechanisms = null;
@@ -117,6 +118,10 @@ public class UserClient extends BasicClient<RpcType, UserClient.UserToBitConnect
     return serverInfos;
   }
 
+  public Set<RpcType> getSupportedMethods() {
+    return supportedMethods;
+  }
+
   public void submitQuery(UserResultsListener resultsListener, RunQuery query) {
     send(queryResultHandler.getWrappedListener(resultsListener), RpcType.RUN_QUERY, query, QueryId.class);
   }
@@ -346,6 +351,8 @@ public class UserClient extends BasicClient<RpcType, UserClient.UserToBitConnect
     if (inbound.hasServerInfos()) {
       serverInfos = inbound.getServerInfos();
     }
+    supportedMethods = Sets.immutableEnumSet(inbound.getSupportedMethodsList());
+
     switch (inbound.getStatus()) {
     case SUCCESS:
       break;

http://git-wip-us.apache.org/repos/asf/drill/blob/16aa0810/exec/java-exec/src/main/java/org/apache/drill/exec/rpc/user/UserRpcConfig.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/rpc/user/UserRpcConfig.java b/exec/java-exec/src/main/java/org/apache/drill/exec/rpc/user/UserRpcConfig.java
index 645ded5..ecf15dd 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/rpc/user/UserRpcConfig.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/rpc/user/UserRpcConfig.java
@@ -17,6 +17,7 @@
  */
 package org.apache.drill.exec.rpc.user;
 
+import java.util.Set;
 import java.util.concurrent.Executor;
 
 import org.apache.drill.common.config.DrillConfig;
@@ -29,8 +30,8 @@ import org.apache.drill.exec.proto.UserBitShared.SaslMessage;
 import org.apache.drill.exec.proto.UserProtos.BitToUserHandshake;
 import org.apache.drill.exec.proto.UserProtos.CreatePreparedStatementReq;
 import org.apache.drill.exec.proto.UserProtos.CreatePreparedStatementResp;
-import org.apache.drill.exec.proto.UserProtos.GetCatalogsResp;
 import org.apache.drill.exec.proto.UserProtos.GetCatalogsReq;
+import org.apache.drill.exec.proto.UserProtos.GetCatalogsResp;
 import org.apache.drill.exec.proto.UserProtos.GetColumnsReq;
 import org.apache.drill.exec.proto.UserProtos.GetColumnsResp;
 import org.apache.drill.exec.proto.UserProtos.GetQueryPlanFragments;
@@ -44,6 +45,9 @@ import org.apache.drill.exec.proto.UserProtos.RunQuery;
 import org.apache.drill.exec.proto.UserProtos.UserToBitHandshake;
 import org.apache.drill.exec.rpc.RpcConfig;
 
+import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.Sets;
+
 public class UserRpcConfig {
 //  private static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(UserRpcConfig.class);
 
@@ -75,4 +79,16 @@ public class UserRpcConfig {
   // prevent instantiation
   private UserRpcConfig() {
   }
+
+  /**
+   * Contains the list of methods supported by the server (from user to bit)
+   */
+  public static final Set<RpcType> SUPPORTED_SERVER_METHODS = Sets.immutableEnumSet(
+      ImmutableSet
+        .<RpcType> builder()
+        .add(RpcType.RUN_QUERY, RpcType.CANCEL_QUERY, RpcType.GET_QUERY_PLAN_FRAGMENTS, RpcType.RESUME_PAUSED_QUERY,
+          RpcType.GET_CATALOGS, RpcType.GET_SCHEMAS, RpcType.GET_TABLES, RpcType.GET_COLUMNS,
+          RpcType.CREATE_PREPARED_STATEMENT)
+        .build()
+        );
 }

http://git-wip-us.apache.org/repos/asf/drill/blob/16aa0810/exec/java-exec/src/main/java/org/apache/drill/exec/rpc/user/UserRpcUtils.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/rpc/user/UserRpcUtils.java b/exec/java-exec/src/main/java/org/apache/drill/exec/rpc/user/UserRpcUtils.java
index e7e9ffd..c513d11 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/rpc/user/UserRpcUtils.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/rpc/user/UserRpcUtils.java
@@ -20,6 +20,7 @@ package org.apache.drill.exec.rpc.user;
 import java.lang.management.ManagementFactory;
 import java.lang.management.RuntimeMXBean;
 
+import org.apache.drill.common.Version;
 import org.apache.drill.common.util.DrillVersionInfo;
 import org.apache.drill.exec.proto.UserProtos.RpcEndpointInfos;
 
@@ -52,8 +53,23 @@ public final class UserRpcUtils {
         .setMajorVersion(DrillVersionInfo.getMajorVersion())
         .setMinorVersion(DrillVersionInfo.getMinorVersion())
         .setPatchVersion(DrillVersionInfo.getPatchVersion())
+        .setBuildNumber(DrillVersionInfo.getBuildNumber())
+        .setVersionQualifier(DrillVersionInfo.getQualifier())
         .build();
 
     return infos;
   }
+
+  /**
+   * Get the version from a {@code RpcEndpointInfos} instance
+   */
+  public static Version getVersion(RpcEndpointInfos infos) {
+    return new Version(
+        infos.getVersion(),
+        infos.getMajorVersion(),
+        infos.getMinorVersion(),
+        infos.getPatchVersion(),
+        infos.getBuildNumber(),
+        infos.getVersionQualifier());
+  }
 }

http://git-wip-us.apache.org/repos/asf/drill/blob/16aa0810/exec/java-exec/src/main/java/org/apache/drill/exec/rpc/user/UserServer.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/rpc/user/UserServer.java b/exec/java-exec/src/main/java/org/apache/drill/exec/rpc/user/UserServer.java
index 6854a3e..e917b3e 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/rpc/user/UserServer.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/rpc/user/UserServer.java
@@ -275,7 +275,8 @@ public class UserServer extends BasicServer<RpcType, BitToUserConnection> {
 
         BitToUserHandshake.Builder respBuilder = BitToUserHandshake.newBuilder()
             .setRpcVersion(UserRpcConfig.RPC_VERSION)
-            .setServerInfos(UserRpcUtils.getRpcEndpointInfos(SERVER_NAME));
+            .setServerInfos(UserRpcUtils.getRpcEndpointInfos(SERVER_NAME))
+            .addAllSupportedMethods(UserRpcConfig.SUPPORTED_SERVER_METHODS);
 
         try {
           if (inbound.getRpcVersion() != UserRpcConfig.RPC_VERSION) {

http://git-wip-us.apache.org/repos/asf/drill/blob/16aa0810/exec/jdbc/src/main/java/org/apache/drill/jdbc/DrillConnectionConfig.java
----------------------------------------------------------------------
diff --git a/exec/jdbc/src/main/java/org/apache/drill/jdbc/DrillConnectionConfig.java b/exec/jdbc/src/main/java/org/apache/drill/jdbc/DrillConnectionConfig.java
index ca20c01..55cb1ff 100644
--- a/exec/jdbc/src/main/java/org/apache/drill/jdbc/DrillConnectionConfig.java
+++ b/exec/jdbc/src/main/java/org/apache/drill/jdbc/DrillConnectionConfig.java
@@ -66,4 +66,8 @@ public class DrillConnectionConfig extends ConnectionConfigImpl {
     return TimeZone.getDefault();
   }
 
+  public boolean disableServerPreparedStatement() {
+    return Boolean.valueOf(props.getProperty("preparedstatement.server.disabled"));
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/drill/blob/16aa0810/exec/jdbc/src/main/java/org/apache/drill/jdbc/impl/DrillCursor.java
----------------------------------------------------------------------
diff --git a/exec/jdbc/src/main/java/org/apache/drill/jdbc/impl/DrillCursor.java b/exec/jdbc/src/main/java/org/apache/drill/jdbc/impl/DrillCursor.java
index ed279a3..9b9a4c8 100644
--- a/exec/jdbc/src/main/java/org/apache/drill/jdbc/impl/DrillCursor.java
+++ b/exec/jdbc/src/main/java/org/apache/drill/jdbc/impl/DrillCursor.java
@@ -43,6 +43,7 @@ import org.apache.drill.exec.exception.SchemaChangeException;
 import org.apache.drill.exec.proto.UserBitShared.QueryId;
 import org.apache.drill.exec.proto.UserBitShared.QueryResult.QueryState;
 import org.apache.drill.exec.proto.UserBitShared.QueryType;
+import org.apache.drill.exec.proto.UserProtos.PreparedStatement;
 import org.apache.drill.exec.proto.helper.QueryIdHelper;
 import org.apache.drill.exec.record.BatchSchema;
 import org.apache.drill.exec.record.RecordBatchLoader;
@@ -527,10 +528,18 @@ class DrillCursor implements Cursor {
         : "currentBatchHolder.getRecordCount() not 0 (is "
           + currentBatchHolder.getRecordCount() + " in loadInitialSchema()";
 
+    final PreparedStatement preparedStatement;
     if (statement instanceof DrillPreparedStatementImpl) {
       DrillPreparedStatementImpl drillPreparedStatement = (DrillPreparedStatementImpl) statement;
-      connection.getClient().executePreparedStatement(drillPreparedStatement.getPreparedStatementHandle().getServerHandle(), resultsListener);
+      preparedStatement = drillPreparedStatement.getPreparedStatementHandle();
     } else {
+      preparedStatement = null;
+    }
+
+    if (preparedStatement != null) {
+      connection.getClient().executePreparedStatement(preparedStatement.getServerHandle(), resultsListener);
+    }
+    else {
       connection.getClient().runQuery(QueryType.SQL, signature.sql, resultsListener);
     }
 

http://git-wip-us.apache.org/repos/asf/drill/blob/16aa0810/exec/jdbc/src/main/java/org/apache/drill/jdbc/impl/DrillDatabaseMetaDataImpl.java
----------------------------------------------------------------------
diff --git a/exec/jdbc/src/main/java/org/apache/drill/jdbc/impl/DrillDatabaseMetaDataImpl.java b/exec/jdbc/src/main/java/org/apache/drill/jdbc/impl/DrillDatabaseMetaDataImpl.java
index 43c6c21..1c350f3 100644
--- a/exec/jdbc/src/main/java/org/apache/drill/jdbc/impl/DrillDatabaseMetaDataImpl.java
+++ b/exec/jdbc/src/main/java/org/apache/drill/jdbc/impl/DrillDatabaseMetaDataImpl.java
@@ -26,7 +26,7 @@ import java.sql.SQLFeatureNotSupportedException;
 
 import org.apache.calcite.avatica.AvaticaConnection;
 import org.apache.calcite.avatica.AvaticaDatabaseMetaData;
-import org.apache.drill.exec.proto.UserProtos.RpcEndpointInfos;
+import org.apache.drill.common.Version;
 import org.apache.drill.jdbc.AlreadyClosedSqlException;
 import org.apache.drill.jdbc.DrillDatabaseMetaData;
 
@@ -55,9 +55,14 @@ class DrillDatabaseMetaDataImpl extends AvaticaDatabaseMetaData
     }
   }
 
-  private RpcEndpointInfos getServerInfos() throws SQLException {
+  private String getServerName() throws SQLException {
     DrillConnectionImpl connection = (DrillConnectionImpl) getConnection();
-    return connection.getClient().getServerInfos();
+    return connection.getClient().getServerName();
+  }
+
+  private Version getServerVersion() throws SQLException {
+    DrillConnectionImpl connection = (DrillConnectionImpl) getConnection();
+    return connection.getClient().getServerVersion();
   }
 
   // Note:  Dynamic proxies could be used to reduce the quantity (450?) of
@@ -130,21 +135,21 @@ class DrillDatabaseMetaDataImpl extends AvaticaDatabaseMetaData
   @Override
   public String getDatabaseProductName() throws SQLException {
     throwIfClosed();
-    RpcEndpointInfos infos = getServerInfos();
-    if (infos == null) {
+    String name = getServerName();
+    if (name == null) {
       return super.getDatabaseProductName();
     }
-    return infos.getName();
+    return name;
   }
 
   @Override
   public String getDatabaseProductVersion() throws SQLException {
     throwIfClosed();
-    RpcEndpointInfos infos = getServerInfos();
-    if (infos == null) {
+    Version version = getServerVersion();
+    if (version == null) {
       return super.getDatabaseProductVersion();
     }
-    return infos.getVersion();
+    return version.getVersion();
   }
 
   @Override
@@ -1184,21 +1189,21 @@ class DrillDatabaseMetaDataImpl extends AvaticaDatabaseMetaData
   @Override
   public int getDatabaseMajorVersion() throws SQLException {
     throwIfClosed();
-    RpcEndpointInfos infos = getServerInfos();
-    if (infos == null) {
+    Version version = getServerVersion();
+    if (version == null) {
       return super.getDatabaseMajorVersion();
     }
-    return infos.getMajorVersion();
+    return version.getMajorVersion();
   }
 
   @Override
   public int getDatabaseMinorVersion() throws SQLException {
     throwIfClosed();
-    RpcEndpointInfos infos = getServerInfos();
-    if (infos == null) {
+    Version version = getServerVersion();
+    if (version == null) {
       return super.getDatabaseMinorVersion();
     }
-    return infos.getMinorVersion();
+    return version.getMinorVersion();
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/drill/blob/16aa0810/exec/jdbc/src/main/java/org/apache/drill/jdbc/impl/DrillJdbc41Factory.java
----------------------------------------------------------------------
diff --git a/exec/jdbc/src/main/java/org/apache/drill/jdbc/impl/DrillJdbc41Factory.java b/exec/jdbc/src/main/java/org/apache/drill/jdbc/impl/DrillJdbc41Factory.java
index 670a5f2..28a4372 100644
--- a/exec/jdbc/src/main/java/org/apache/drill/jdbc/impl/DrillJdbc41Factory.java
+++ b/exec/jdbc/src/main/java/org/apache/drill/jdbc/impl/DrillJdbc41Factory.java
@@ -34,6 +34,8 @@ import org.apache.calcite.avatica.AvaticaStatement;
 import org.apache.calcite.avatica.Helper;
 import org.apache.calcite.avatica.Meta;
 import org.apache.calcite.avatica.Meta.StatementHandle;
+import org.apache.drill.exec.client.DrillClient;
+import org.apache.drill.exec.client.ServerMethod;
 import org.apache.drill.exec.proto.UserProtos.CreatePreparedStatementResp;
 import org.apache.drill.exec.proto.UserProtos.RequestStatus;
 import org.apache.drill.exec.rpc.DrillRpcFuture;
@@ -97,10 +99,27 @@ public class DrillJdbc41Factory extends DrillFactory {
                                                        int resultSetConcurrency,
                                                        int resultSetHoldability)
       throws SQLException {
-    String sql = signature.sql;
     DrillConnectionImpl drillConnection = (DrillConnectionImpl) connection;
+    DrillClient client = drillConnection.getClient();
+    if (drillConnection.getConfig().disableServerPreparedStatement() || !client.getSupportedMethods().contains(ServerMethod.PREPARED_STATEMENT)) {
+      // fallback to client side prepared statement
+      return new DrillJdbc41PreparedStatement(drillConnection, h, signature, null, resultSetType, resultSetConcurrency, resultSetHoldability);
+    }
+    return newServerPreparedStatement(drillConnection, h, signature, resultSetType,
+        resultSetConcurrency, resultSetHoldability);
+  }
+
+  private DrillJdbc41PreparedStatement newServerPreparedStatement(DrillConnectionImpl connection,
+                                                                  StatementHandle h,
+                                                                  Meta.Signature signature,
+                                                                  int resultSetType,
+                                                                  int resultSetConcurrency,
+                                                                  int resultSetHoldability
+      ) throws SQLException {
+    String sql = signature.sql;
+
     try {
-      DrillRpcFuture<CreatePreparedStatementResp> respFuture = drillConnection.getClient().createPreparedStatement(signature.sql);
+      DrillRpcFuture<CreatePreparedStatementResp> respFuture = connection.getClient().createPreparedStatement(signature.sql);
 
       CreatePreparedStatementResp resp;
       try {
@@ -133,7 +152,7 @@ public class DrillJdbc41Factory extends DrillFactory {
             "Failed to create prepared statement. Unknown status: %s, Error: %s", status, errMsgFromServer));
       }
 
-      return new DrillJdbc41PreparedStatement((DrillConnectionImpl) connection,
+      return new DrillJdbc41PreparedStatement(connection,
           h,
           signature,
           resp.getPreparedStatement(),
@@ -147,7 +166,6 @@ public class DrillJdbc41Factory extends DrillFactory {
     } catch (Exception e) {
       throw Helper.INSTANCE.createException("Error while preparing statement [" + sql + "]", e);
     }
-
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/drill/blob/16aa0810/exec/jdbc/src/main/java/org/apache/drill/jdbc/impl/DrillPreparedStatementImpl.java
----------------------------------------------------------------------
diff --git a/exec/jdbc/src/main/java/org/apache/drill/jdbc/impl/DrillPreparedStatementImpl.java b/exec/jdbc/src/main/java/org/apache/drill/jdbc/impl/DrillPreparedStatementImpl.java
index 2894f61..f1ba4c1 100644
--- a/exec/jdbc/src/main/java/org/apache/drill/jdbc/impl/DrillPreparedStatementImpl.java
+++ b/exec/jdbc/src/main/java/org/apache/drill/jdbc/impl/DrillPreparedStatementImpl.java
@@ -58,9 +58,13 @@ abstract class DrillPreparedStatementImpl extends AvaticaPreparedStatement
           resultSetType, resultSetConcurrency, resultSetHoldability);
     connection.openStatementsRegistry.addStatement(this);
     this.preparedStatementHandle = preparedStatementHandle;
-    ((DrillColumnMetaDataList) signature.columns).updateColumnMetaData(preparedStatementHandle.getColumnsList());
+    if (preparedStatementHandle != null) {
+      ((DrillColumnMetaDataList) signature.columns).updateColumnMetaData(preparedStatementHandle.getColumnsList());
+    }
   }
 
+
+
   /**
    * Throws AlreadyClosedSqlException <i>iff</i> this PreparedStatement is closed.
    *

http://git-wip-us.apache.org/repos/asf/drill/blob/16aa0810/exec/jdbc/src/test/java/org/apache/drill/jdbc/LegacyPreparedStatementTest.java
----------------------------------------------------------------------
diff --git a/exec/jdbc/src/test/java/org/apache/drill/jdbc/LegacyPreparedStatementTest.java b/exec/jdbc/src/test/java/org/apache/drill/jdbc/LegacyPreparedStatementTest.java
new file mode 100644
index 0000000..46d675f
--- /dev/null
+++ b/exec/jdbc/src/test/java/org/apache/drill/jdbc/LegacyPreparedStatementTest.java
@@ -0,0 +1,130 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.jdbc;
+
+import static org.hamcrest.CoreMatchers.allOf;
+import static org.hamcrest.CoreMatchers.containsString;
+import static org.hamcrest.CoreMatchers.equalTo;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertThat;
+import static org.junit.Assert.assertTrue;
+
+import java.sql.Clob;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.SQLFeatureNotSupportedException;
+import java.sql.Statement;
+import java.util.Properties;
+
+import org.apache.drill.exec.planner.physical.PlannerSettings;
+import org.hamcrest.Matcher;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+/**
+ * Test that prepared statements works even if not supported on server, to some extent.
+ */
+public class LegacyPreparedStatementTest extends JdbcTestBase {
+  /** Fuzzy matcher for parameters-not-supported message assertions.  (Based on
+   *  current "Prepared-statement dynamic parameters are not supported.") */
+  private static final Matcher<String> PARAMETERS_NOT_SUPPORTED_MSG_MATCHER =
+      allOf( containsString( "arameter" ),   // allows "Parameter"
+             containsString( "not" ),        // (could have false matches)
+             containsString( "support" ) );  // allows "supported"
+
+  private static Connection connection;
+
+  @BeforeClass
+  public static void setUpConnection() throws SQLException {
+    Driver.load();
+    Properties properties = new Properties();
+    properties.setProperty("preparedstatement.server.disabled", "true");
+
+    connection = DriverManager.getConnection( "jdbc:drill:zk=local", properties);
+  }
+
+  @AfterClass
+  public static void tearDownConnection() throws SQLException {
+    if (connection != null) {
+      try (Statement stmt = connection.createStatement()) {
+        stmt.execute(String.format("alter session set `%s` = false", PlannerSettings.ENABLE_DECIMAL_DATA_TYPE_KEY));
+      }
+    }
+    connection.close();
+  }
+
+  //////////
+  // Basic querying-works test:
+
+  /** Tests that basic executeQuery() (with query statement) works. */
+  @Test
+  public void testExecuteQueryBasicCaseWorks() throws SQLException {
+    try (PreparedStatement stmt = connection.prepareStatement( "VALUES 11" )) {
+      try(ResultSet rs = stmt.executeQuery()) {
+        assertThat("Unexpected column count",
+            rs.getMetaData().getColumnCount(), equalTo(1)
+        );
+        assertTrue("No expected first row", rs.next());
+        assertThat(rs.getInt(1), equalTo(11));
+        assertFalse("Unexpected second row", rs.next());
+      }
+    }
+  }
+
+  //////////
+  // Parameters-not-implemented tests:
+
+  /** Tests that "not supported" has priority over possible "no parameters"
+   *  check. */
+  @Test( expected = SQLFeatureNotSupportedException.class )
+  public void testParamSettingWhenNoParametersIndexSaysUnsupported() throws SQLException {
+    try(PreparedStatement prepStmt = connection.prepareStatement( "VALUES 1" )) {
+      try {
+        prepStmt.setBytes(4, null);
+      } catch (final SQLFeatureNotSupportedException e) {
+        assertThat(
+            "Check whether params.-unsupported wording changed or checks changed.",
+            e.toString(), PARAMETERS_NOT_SUPPORTED_MSG_MATCHER
+        );
+        throw e;
+      }
+    }
+  }
+
+  /** Tests that "not supported" has priority over possible "type not supported"
+   *  check. */
+  @Test( expected = SQLFeatureNotSupportedException.class )
+  public void testParamSettingWhenUnsupportedTypeSaysUnsupported() throws SQLException {
+    try(PreparedStatement prepStmt = connection.prepareStatement( "VALUES 1" )) {
+      try {
+        prepStmt.setClob(2, (Clob) null);
+      } catch (final SQLFeatureNotSupportedException e) {
+        assertThat(
+            "Check whether params.-unsupported wording changed or checks changed.",
+            e.toString(), PARAMETERS_NOT_SUPPORTED_MSG_MATCHER
+        );
+        throw e;
+      }
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/drill/blob/16aa0810/protocol/src/main/java/org/apache/drill/exec/proto/SchemaUserProtos.java
----------------------------------------------------------------------
diff --git a/protocol/src/main/java/org/apache/drill/exec/proto/SchemaUserProtos.java b/protocol/src/main/java/org/apache/drill/exec/proto/SchemaUserProtos.java
index 1adf7a7..dd8c684 100644
--- a/protocol/src/main/java/org/apache/drill/exec/proto/SchemaUserProtos.java
+++ b/protocol/src/main/java/org/apache/drill/exec/proto/SchemaUserProtos.java
@@ -278,6 +278,10 @@ public final class SchemaUserProtos
                     output.writeUInt32(5, message.getPatchVersion(), false);
                 if(message.hasApplication())
                     output.writeString(6, message.getApplication(), false);
+                if(message.hasBuildNumber())
+                    output.writeUInt32(7, message.getBuildNumber(), false);
+                if(message.hasVersionQualifier())
+                    output.writeString(8, message.getVersionQualifier(), false);
             }
             public boolean isInitialized(org.apache.drill.exec.proto.UserProtos.RpcEndpointInfos message)
             {
@@ -335,6 +339,12 @@ public final class SchemaUserProtos
                         case 6:
                             builder.setApplication(input.readString());
                             break;
+                        case 7:
+                            builder.setBuildNumber(input.readUInt32());
+                            break;
+                        case 8:
+                            builder.setVersionQualifier(input.readString());
+                            break;
                         default:
                             input.handleUnknownField(number, this);
                     }
@@ -381,6 +391,8 @@ public final class SchemaUserProtos
                 case 4: return "minorVersion";
                 case 5: return "patchVersion";
                 case 6: return "application";
+                case 7: return "buildNumber";
+                case 8: return "versionQualifier";
                 default: return null;
             }
         }
@@ -398,6 +410,8 @@ public final class SchemaUserProtos
             fieldMap.put("minorVersion", 4);
             fieldMap.put("patchVersion", 5);
             fieldMap.put("application", 6);
+            fieldMap.put("buildNumber", 7);
+            fieldMap.put("versionQualifier", 8);
         }
     }
 
@@ -981,6 +995,8 @@ public final class SchemaUserProtos
 
                 for(String authenticationMechanisms : message.getAuthenticationMechanismsList())
                     output.writeString(7, authenticationMechanisms, true);
+                for(org.apache.drill.exec.proto.UserProtos.RpcType supportedMethods : message.getSupportedMethodsList())
+                    output.writeEnum(8, supportedMethods.getNumber(), true);
             }
             public boolean isInitialized(org.apache.drill.exec.proto.UserProtos.BitToUserHandshake message)
             {
@@ -1039,6 +1055,9 @@ public final class SchemaUserProtos
                         case 7:
                             builder.addAuthenticationMechanisms(input.readString());
                             break;
+                        case 8:
+                            builder.addSupportedMethods(org.apache.drill.exec.proto.UserProtos.RpcType.valueOf(input.readEnum()));
+                            break;
                         default:
                             input.handleUnknownField(number, this);
                     }
@@ -1085,6 +1104,7 @@ public final class SchemaUserProtos
                 case 5: return "errorMessage";
                 case 6: return "serverInfos";
                 case 7: return "authenticationMechanisms";
+                case 8: return "supportedMethods";
                 default: return null;
             }
         }
@@ -1102,6 +1122,7 @@ public final class SchemaUserProtos
             fieldMap.put("errorMessage", 5);
             fieldMap.put("serverInfos", 6);
             fieldMap.put("authenticationMechanisms", 7);
+            fieldMap.put("supportedMethods", 8);
         }
     }
 

http://git-wip-us.apache.org/repos/asf/drill/blob/16aa0810/protocol/src/main/java/org/apache/drill/exec/proto/UserProtos.java
----------------------------------------------------------------------
diff --git a/protocol/src/main/java/org/apache/drill/exec/proto/UserProtos.java b/protocol/src/main/java/org/apache/drill/exec/proto/UserProtos.java
index e82d22b..daa3903 100644
--- a/protocol/src/main/java/org/apache/drill/exec/proto/UserProtos.java
+++ b/protocol/src/main/java/org/apache/drill/exec/proto/UserProtos.java
@@ -2597,6 +2597,51 @@ public final class UserProtos {
      */
     com.google.protobuf.ByteString
         getApplicationBytes();
+
+    // optional uint32 buildNumber = 7;
+    /**
+     * <code>optional uint32 buildNumber = 7;</code>
+     *
+     * <pre>
+     * example: 32
+     * </pre>
+     */
+    boolean hasBuildNumber();
+    /**
+     * <code>optional uint32 buildNumber = 7;</code>
+     *
+     * <pre>
+     * example: 32
+     * </pre>
+     */
+    int getBuildNumber();
+
+    // optional string versionQualifier = 8;
+    /**
+     * <code>optional string versionQualifier = 8;</code>
+     *
+     * <pre>
+     * example: SNAPSHOT
+     * </pre>
+     */
+    boolean hasVersionQualifier();
+    /**
+     * <code>optional string versionQualifier = 8;</code>
+     *
+     * <pre>
+     * example: SNAPSHOT
+     * </pre>
+     */
+    java.lang.String getVersionQualifier();
+    /**
+     * <code>optional string versionQualifier = 8;</code>
+     *
+     * <pre>
+     * example: SNAPSHOT
+     * </pre>
+     */
+    com.google.protobuf.ByteString
+        getVersionQualifierBytes();
   }
   /**
    * Protobuf type {@code exec.user.RpcEndpointInfos}
@@ -2679,6 +2724,16 @@ public final class UserProtos {
               application_ = input.readBytes();
               break;
             }
+            case 56: {
+              bitField0_ |= 0x00000040;
+              buildNumber_ = input.readUInt32();
+              break;
+            }
+            case 66: {
+              bitField0_ |= 0x00000080;
+              versionQualifier_ = input.readBytes();
+              break;
+            }
           }
         }
       } catch (com.google.protobuf.InvalidProtocolBufferException e) {
@@ -2956,6 +3011,85 @@ public final class UserProtos {
       }
     }
 
+    // optional uint32 buildNumber = 7;
+    public static final int BUILDNUMBER_FIELD_NUMBER = 7;
+    private int buildNumber_;
+    /**
+     * <code>optional uint32 buildNumber = 7;</code>
+     *
+     * <pre>
+     * example: 32
+     * </pre>
+     */
+    public boolean hasBuildNumber() {
+      return ((bitField0_ & 0x00000040) == 0x00000040);
+    }
+    /**
+     * <code>optional uint32 buildNumber = 7;</code>
+     *
+     * <pre>
+     * example: 32
+     * </pre>
+     */
+    public int getBuildNumber() {
+      return buildNumber_;
+    }
+
+    // optional string versionQualifier = 8;
+    public static final int VERSIONQUALIFIER_FIELD_NUMBER = 8;
+    private java.lang.Object versionQualifier_;
+    /**
+     * <code>optional string versionQualifier = 8;</code>
+     *
+     * <pre>
+     * example: SNAPSHOT
+     * </pre>
+     */
+    public boolean hasVersionQualifier() {
+      return ((bitField0_ & 0x00000080) == 0x00000080);
+    }
+    /**
+     * <code>optional string versionQualifier = 8;</code>
+     *
+     * <pre>
+     * example: SNAPSHOT
+     * </pre>
+     */
+    public java.lang.String getVersionQualifier() {
+      java.lang.Object ref = versionQualifier_;
+      if (ref instanceof java.lang.String) {
+        return (java.lang.String) ref;
+      } else {
+        com.google.protobuf.ByteString bs = 
+            (com.google.protobuf.ByteString) ref;
+        java.lang.String s = bs.toStringUtf8();
+        if (bs.isValidUtf8()) {
+          versionQualifier_ = s;
+        }
+        return s;
+      }
+    }
+    /**
+     * <code>optional string versionQualifier = 8;</code>
+     *
+     * <pre>
+     * example: SNAPSHOT
+     * </pre>
+     */
+    public com.google.protobuf.ByteString
+        getVersionQualifierBytes() {
+      java.lang.Object ref = versionQualifier_;
+      if (ref instanceof java.lang.String) {
+        com.google.protobuf.ByteString b = 
+            com.google.protobuf.ByteString.copyFromUtf8(
+                (java.lang.String) ref);
+        versionQualifier_ = b;
+        return b;
+      } else {
+        return (com.google.protobuf.ByteString) ref;
+      }
+    }
+
     private void initFields() {
       name_ = "";
       version_ = "";
@@ -2963,6 +3097,8 @@ public final class UserProtos {
       minorVersion_ = 0;
       patchVersion_ = 0;
       application_ = "";
+      buildNumber_ = 0;
+      versionQualifier_ = "";
     }
     private byte memoizedIsInitialized = -1;
     public final boolean isInitialized() {
@@ -2994,6 +3130,12 @@ public final class UserProtos {
       if (((bitField0_ & 0x00000020) == 0x00000020)) {
         output.writeBytes(6, getApplicationBytes());
       }
+      if (((bitField0_ & 0x00000040) == 0x00000040)) {
+        output.writeUInt32(7, buildNumber_);
+      }
+      if (((bitField0_ & 0x00000080) == 0x00000080)) {
+        output.writeBytes(8, getVersionQualifierBytes());
+      }
       getUnknownFields().writeTo(output);
     }
 
@@ -3027,6 +3169,14 @@ public final class UserProtos {
         size += com.google.protobuf.CodedOutputStream
           .computeBytesSize(6, getApplicationBytes());
       }
+      if (((bitField0_ & 0x00000040) == 0x00000040)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeUInt32Size(7, buildNumber_);
+      }
+      if (((bitField0_ & 0x00000080) == 0x00000080)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeBytesSize(8, getVersionQualifierBytes());
+      }
       size += getUnknownFields().getSerializedSize();
       memoizedSerializedSize = size;
       return size;
@@ -3155,6 +3305,10 @@ public final class UserProtos {
         bitField0_ = (bitField0_ & ~0x00000010);
         application_ = "";
         bitField0_ = (bitField0_ & ~0x00000020);
+        buildNumber_ = 0;
+        bitField0_ = (bitField0_ & ~0x00000040);
+        versionQualifier_ = "";
+        bitField0_ = (bitField0_ & ~0x00000080);
         return this;
       }
 
@@ -3207,6 +3361,14 @@ public final class UserProtos {
           to_bitField0_ |= 0x00000020;
         }
         result.application_ = application_;
+        if (((from_bitField0_ & 0x00000040) == 0x00000040)) {
+          to_bitField0_ |= 0x00000040;
+        }
+        result.buildNumber_ = buildNumber_;
+        if (((from_bitField0_ & 0x00000080) == 0x00000080)) {
+          to_bitField0_ |= 0x00000080;
+        }
+        result.versionQualifier_ = versionQualifier_;
         result.bitField0_ = to_bitField0_;
         onBuilt();
         return result;
@@ -3247,6 +3409,14 @@ public final class UserProtos {
           application_ = other.application_;
           onChanged();
         }
+        if (other.hasBuildNumber()) {
+          setBuildNumber(other.getBuildNumber());
+        }
+        if (other.hasVersionQualifier()) {
+          bitField0_ |= 0x00000080;
+          versionQualifier_ = other.versionQualifier_;
+          onChanged();
+        }
         this.mergeUnknownFields(other.getUnknownFields());
         return this;
       }
@@ -3715,6 +3885,153 @@ public final class UserProtos {
         return this;
       }
 
+      // optional uint32 buildNumber = 7;
+      private int buildNumber_ ;
+      /**
+       * <code>optional uint32 buildNumber = 7;</code>
+       *
+       * <pre>
+       * example: 32
+       * </pre>
+       */
+      public boolean hasBuildNumber() {
+        return ((bitField0_ & 0x00000040) == 0x00000040);
+      }
+      /**
+       * <code>optional uint32 buildNumber = 7;</code>
+       *
+       * <pre>
+       * example: 32
+       * </pre>
+       */
+      public int getBuildNumber() {
+        return buildNumber_;
+      }
+      /**
+       * <code>optional uint32 buildNumber = 7;</code>
+       *
+       * <pre>
+       * example: 32
+       * </pre>
+       */
+      public Builder setBuildNumber(int value) {
+        bitField0_ |= 0x00000040;
+        buildNumber_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>optional uint32 buildNumber = 7;</code>
+       *
+       * <pre>
+       * example: 32
+       * </pre>
+       */
+      public Builder clearBuildNumber() {
+        bitField0_ = (bitField0_ & ~0x00000040);
+        buildNumber_ = 0;
+        onChanged();
+        return this;
+      }
+
+      // optional string versionQualifier = 8;
+      private java.lang.Object versionQualifier_ = "";
+      /**
+       * <code>optional string versionQualifier = 8;</code>
+       *
+       * <pre>
+       * example: SNAPSHOT
+       * </pre>
+       */
+      public boolean hasVersionQualifier() {
+        return ((bitField0_ & 0x00000080) == 0x00000080);
+      }
+      /**
+       * <code>optional string versionQualifier = 8;</code>
+       *
+       * <pre>
+       * example: SNAPSHOT
+       * </pre>
+       */
+      public java.lang.String getVersionQualifier() {
+        java.lang.Object ref = versionQualifier_;
+        if (!(ref instanceof java.lang.String)) {
+          java.lang.String s = ((com.google.protobuf.ByteString) ref)
+              .toStringUtf8();
+          versionQualifier_ = s;
+          return s;
+        } else {
+          return (java.lang.String) ref;
+        }
+      }
+      /**
+       * <code>optional string versionQualifier = 8;</code>
+       *
+       * <pre>
+       * example: SNAPSHOT
+       * </pre>
+       */
+      public com.google.protobuf.ByteString
+          getVersionQualifierBytes() {
+        java.lang.Object ref = versionQualifier_;
+        if (ref instanceof String) {
+          com.google.protobuf.ByteString b = 
+              com.google.protobuf.ByteString.copyFromUtf8(
+                  (java.lang.String) ref);
+          versionQualifier_ = b;
+          return b;
+        } else {
+          return (com.google.protobuf.ByteString) ref;
+        }
+      }
+      /**
+       * <code>optional string versionQualifier = 8;</code>
+       *
+       * <pre>
+       * example: SNAPSHOT
+       * </pre>
+       */
+      public Builder setVersionQualifier(
+          java.lang.String value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000080;
+        versionQualifier_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>optional string versionQualifier = 8;</code>
+       *
+       * <pre>
+       * example: SNAPSHOT
+       * </pre>
+       */
+      public Builder clearVersionQualifier() {
+        bitField0_ = (bitField0_ & ~0x00000080);
+        versionQualifier_ = getDefaultInstance().getVersionQualifier();
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>optional string versionQualifier = 8;</code>
+       *
+       * <pre>
+       * example: SNAPSHOT
+       * </pre>
+       */
+      public Builder setVersionQualifierBytes(
+          com.google.protobuf.ByteString value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000080;
+        versionQualifier_ = value;
+        onChanged();
+        return this;
+      }
+
       // @@protoc_insertion_point(builder_scope:exec.user.RpcEndpointInfos)
     }
 
@@ -7616,6 +7933,20 @@ public final class UserProtos {
      */
     com.google.protobuf.ByteString
         getAuthenticationMechanismsBytes(int index);
+
+    // repeated .exec.user.RpcType supported_methods = 8;
+    /**
+     * <code>repeated .exec.user.RpcType supported_methods = 8;</code>
+     */
+    java.util.List<org.apache.drill.exec.proto.UserProtos.RpcType> getSupportedMethodsList();
+    /**
+     * <code>repeated .exec.user.RpcType supported_methods = 8;</code>
+     */
+    int getSupportedMethodsCount();
+    /**
+     * <code>repeated .exec.user.RpcType supported_methods = 8;</code>
+     */
+    org.apache.drill.exec.proto.UserProtos.RpcType getSupportedMethods(int index);
   }
   /**
    * Protobuf type {@code exec.user.BitToUserHandshake}
@@ -7715,6 +8046,39 @@ public final class UserProtos {
               authenticationMechanisms_.add(input.readBytes());
               break;
             }
+            case 64: {
+              int rawValue = input.readEnum();
+              org.apache.drill.exec.proto.UserProtos.RpcType value = org.apache.drill.exec.proto.UserProtos.RpcType.valueOf(rawValue);
+              if (value == null) {
+                unknownFields.mergeVarintField(8, rawValue);
+              } else {
+                if (!((mutable_bitField0_ & 0x00000040) == 0x00000040)) {
+                  supportedMethods_ = new java.util.ArrayList<org.apache.drill.exec.proto.UserProtos.RpcType>();
+                  mutable_bitField0_ |= 0x00000040;
+                }
+                supportedMethods_.add(value);
+              }
+              break;
+            }
+            case 66: {
+              int length = input.readRawVarint32();
+              int oldLimit = input.pushLimit(length);
+              while(input.getBytesUntilLimit() > 0) {
+                int rawValue = input.readEnum();
+                org.apache.drill.exec.proto.UserProtos.RpcType value = org.apache.drill.exec.proto.UserProtos.RpcType.valueOf(rawValue);
+                if (value == null) {
+                  unknownFields.mergeVarintField(8, rawValue);
+                } else {
+                  if (!((mutable_bitField0_ & 0x00000040) == 0x00000040)) {
+                    supportedMethods_ = new java.util.ArrayList<org.apache.drill.exec.proto.UserProtos.RpcType>();
+                    mutable_bitField0_ |= 0x00000040;
+                  }
+                  supportedMethods_.add(value);
+                }
+              }
+              input.popLimit(oldLimit);
+              break;
+            }
           }
         }
       } catch (com.google.protobuf.InvalidProtocolBufferException e) {
@@ -7726,6 +8090,9 @@ public final class UserProtos {
         if (((mutable_bitField0_ & 0x00000020) == 0x00000020)) {
           authenticationMechanisms_ = new com.google.protobuf.UnmodifiableLazyStringList(authenticationMechanisms_);
         }
+        if (((mutable_bitField0_ & 0x00000040) == 0x00000040)) {
+          supportedMethods_ = java.util.Collections.unmodifiableList(supportedMethods_);
+        }
         this.unknownFields = unknownFields.build();
         makeExtensionsImmutable();
       }
@@ -7928,6 +8295,28 @@ public final class UserProtos {
       return authenticationMechanisms_.getByteString(index);
     }
 
+    // repeated .exec.user.RpcType supported_methods = 8;
+    public static final int SUPPORTED_METHODS_FIELD_NUMBER = 8;
+    private java.util.List<org.apache.drill.exec.proto.UserProtos.RpcType> supportedMethods_;
+    /**
+     * <code>repeated .exec.user.RpcType supported_methods = 8;</code>
+     */
+    public java.util.List<org.apache.drill.exec.proto.UserProtos.RpcType> getSupportedMethodsList() {
+      return supportedMethods_;
+    }
+    /**
+     * <code>repeated .exec.user.RpcType supported_methods = 8;</code>
+     */
+    public int getSupportedMethodsCount() {
+      return supportedMethods_.size();
+    }
+    /**
+     * <code>repeated .exec.user.RpcType supported_methods = 8;</code>
+     */
+    public org.apache.drill.exec.proto.UserProtos.RpcType getSupportedMethods(int index) {
+      return supportedMethods_.get(index);
+    }
+
     private void initFields() {
       rpcVersion_ = 0;
       status_ = org.apache.drill.exec.proto.UserProtos.HandshakeStatus.SUCCESS;
@@ -7935,6 +8324,7 @@ public final class UserProtos {
       errorMessage_ = "";
       serverInfos_ = org.apache.drill.exec.proto.UserProtos.RpcEndpointInfos.getDefaultInstance();
       authenticationMechanisms_ = com.google.protobuf.LazyStringArrayList.EMPTY;
+      supportedMethods_ = java.util.Collections.emptyList();
     }
     private byte memoizedIsInitialized = -1;
     public final boolean isInitialized() {
@@ -7966,6 +8356,9 @@ public final class UserProtos {
       for (int i = 0; i < authenticationMechanisms_.size(); i++) {
         output.writeBytes(7, authenticationMechanisms_.getByteString(i));
       }
+      for (int i = 0; i < supportedMethods_.size(); i++) {
+        output.writeEnum(8, supportedMethods_.get(i).getNumber());
+      }
       getUnknownFields().writeTo(output);
     }
 
@@ -8004,6 +8397,15 @@ public final class UserProtos {
         size += dataSize;
         size += 1 * getAuthenticationMechanismsList().size();
       }
+      {
+        int dataSize = 0;
+        for (int i = 0; i < supportedMethods_.size(); i++) {
+          dataSize += com.google.protobuf.CodedOutputStream
+            .computeEnumSizeNoTag(supportedMethods_.get(i).getNumber());
+        }
+        size += dataSize;
+        size += 1 * supportedMethods_.size();
+      }
       size += getUnknownFields().getSerializedSize();
       memoizedSerializedSize = size;
       return size;
@@ -8137,6 +8539,8 @@ public final class UserProtos {
         bitField0_ = (bitField0_ & ~0x00000010);
         authenticationMechanisms_ = com.google.protobuf.LazyStringArrayList.EMPTY;
         bitField0_ = (bitField0_ & ~0x00000020);
+        supportedMethods_ = java.util.Collections.emptyList();
+        bitField0_ = (bitField0_ & ~0x00000040);
         return this;
       }
 
@@ -8195,6 +8599,11 @@ public final class UserProtos {
           bitField0_ = (bitField0_ & ~0x00000020);
         }
         result.authenticationMechanisms_ = authenticationMechanisms_;
+        if (((bitField0_ & 0x00000040) == 0x00000040)) {
+          supportedMethods_ = java.util.Collections.unmodifiableList(supportedMethods_);
+          bitField0_ = (bitField0_ & ~0x00000040);
+        }
+        result.supportedMethods_ = supportedMethods_;
         result.bitField0_ = to_bitField0_;
         onBuilt();
         return result;
@@ -8240,6 +8649,16 @@ public final class UserProtos {
           }
           onChanged();
         }
+        if (!other.supportedMethods_.isEmpty()) {
+          if (supportedMethods_.isEmpty()) {
+            supportedMethods_ = other.supportedMethods_;
+            bitField0_ = (bitField0_ & ~0x00000040);
+          } else {
+            ensureSupportedMethodsIsMutable();
+            supportedMethods_.addAll(other.supportedMethods_);
+          }
+          onChanged();
+        }
         this.mergeUnknownFields(other.getUnknownFields());
         return this;
       }
@@ -8694,6 +9113,78 @@ public final class UserProtos {
         return this;
       }
 
+      // repeated .exec.user.RpcType supported_methods = 8;
+      private java.util.List<org.apache.drill.exec.proto.UserProtos.RpcType> supportedMethods_ =
+        java.util.Collections.emptyList();
+      private void ensureSupportedMethodsIsMutable() {
+        if (!((bitField0_ & 0x00000040) == 0x00000040)) {
+          supportedMethods_ = new java.util.ArrayList<org.apache.drill.exec.proto.UserProtos.RpcType>(supportedMethods_);
+          bitField0_ |= 0x00000040;
+        }
+      }
+      /**
+       * <code>repeated .exec.user.RpcType supported_methods = 8;</code>
+       */
+      public java.util.List<org.apache.drill.exec.proto.UserProtos.RpcType> getSupportedMethodsList() {
+        return java.util.Collections.unmodifiableList(supportedMethods_);
+      }
+      /**
+       * <code>repeated .exec.user.RpcType supported_methods = 8;</code>
+       */
+      public int getSupportedMethodsCount() {
+        return supportedMethods_.size();
+      }
+      /**
+       * <code>repeated .exec.user.RpcType supported_methods = 8;</code>
+       */
+      public org.apache.drill.exec.proto.UserProtos.RpcType getSupportedMethods(int index) {
+        return supportedMethods_.get(index);
+      }
+      /**
+       * <code>repeated .exec.user.RpcType supported_methods = 8;</code>
+       */
+      public Builder setSupportedMethods(
+          int index, org.apache.drill.exec.proto.UserProtos.RpcType value) {
+        if (value == null) {
+          throw new NullPointerException();
+        }
+        ensureSupportedMethodsIsMutable();
+        supportedMethods_.set(index, value);
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>repeated .exec.user.RpcType supported_methods = 8;</code>
+       */
+      public Builder addSupportedMethods(org.apache.drill.exec.proto.UserProtos.RpcType value) {
+        if (value == null) {
+          throw new NullPointerException();
+        }
+        ensureSupportedMethodsIsMutable();
+        supportedMethods_.add(value);
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>repeated .exec.user.RpcType supported_methods = 8;</code>
+       */
+      public Builder addAllSupportedMethods(
+          java.lang.Iterable<? extends org.apache.drill.exec.proto.UserProtos.RpcType> values) {
+        ensureSupportedMethodsIsMutable();
+        super.addAll(values, supportedMethods_);
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>repeated .exec.user.RpcType supported_methods = 8;</code>
+       */
+      public Builder clearSupportedMethods() {
+        supportedMethods_ = java.util.Collections.emptyList();
+        bitField0_ = (bitField0_ & ~0x00000040);
+        onChanged();
+        return this;
+      }
+
       // @@protoc_insertion_point(builder_scope:exec.user.BitToUserHandshake)
     }
 
@@ -29578,136 +30069,138 @@ public final class UserProtos {
       "tControl.proto\032\025ExecutionProtos.proto\"&\n" +
       "\010Property\022\013\n\003key\030\001 \002(\t\022\r\n\005value\030\002 \002(\t\"9\n" +
       "\016UserProperties\022\'\n\nproperties\030\001 \003(\0132\023.ex" +
-      "ec.user.Property\"\210\001\n\020RpcEndpointInfos\022\014\n" +
+      "ec.user.Property\"\267\001\n\020RpcEndpointInfos\022\014\n" +
       "\004name\030\001 \001(\t\022\017\n\007version\030\002 \001(\t\022\024\n\014majorVer" +
       "sion\030\003 \001(\r\022\024\n\014minorVersion\030\004 \001(\r\022\024\n\014patc" +
-      "hVersion\030\005 \001(\r\022\023\n\013application\030\006 \001(\t\"\375\002\n\022" +
-      "UserToBitHandshake\022.\n\007channel\030\001 \001(\0162\027.ex",
-      "ec.shared.RpcChannel:\004USER\022\031\n\021support_li" +
-      "stening\030\002 \001(\010\022\023\n\013rpc_version\030\003 \001(\005\0221\n\013cr" +
-      "edentials\030\004 \001(\0132\034.exec.shared.UserCreden" +
-      "tials\022-\n\nproperties\030\005 \001(\0132\031.exec.user.Us" +
-      "erProperties\022$\n\025support_complex_types\030\006 " +
-      "\001(\010:\005false\022\036\n\017support_timeout\030\007 \001(\010:\005fal" +
-      "se\0221\n\014client_infos\030\010 \001(\0132\033.exec.user.Rpc" +
-      "EndpointInfos\022,\n\014sasl_support\030\t \001(\0162\026.ex" +
-      "ec.user.SaslSupport\"S\n\016RequestResults\022&\n" +
-      "\010query_id\030\001 \001(\0132\024.exec.shared.QueryId\022\031\n",
-      "\021maximum_responses\030\002 \001(\005\"g\n\025GetQueryPlan" +
-      "Fragments\022\r\n\005query\030\001 \002(\t\022$\n\004type\030\002 \001(\0162\026" +
-      ".exec.shared.QueryType\022\031\n\nsplit_plan\030\003 \001" +
-      "(\010:\005false\"\316\001\n\022QueryPlanFragments\0223\n\006stat" +
-      "us\030\001 \002(\0162#.exec.shared.QueryResult.Query" +
-      "State\022&\n\010query_id\030\002 \001(\0132\024.exec.shared.Qu" +
-      "eryId\0221\n\tfragments\030\003 \003(\0132\036.exec.bit.cont" +
-      "rol.PlanFragment\022(\n\005error\030\004 \001(\0132\031.exec.s" +
-      "hared.DrillPBError\"\321\001\n\022BitToUserHandshak" +
-      "e\022\023\n\013rpc_version\030\002 \001(\005\022*\n\006status\030\003 \001(\0162\032",
-      ".exec.user.HandshakeStatus\022\017\n\007errorId\030\004 " +
-      "\001(\t\022\024\n\014errorMessage\030\005 \001(\t\0221\n\014server_info" +
-      "s\030\006 \001(\0132\033.exec.user.RpcEndpointInfos\022 \n\030" +
-      "authenticationMechanisms\030\007 \003(\t\"-\n\nLikeFi" +
-      "lter\022\017\n\007pattern\030\001 \001(\t\022\016\n\006escape\030\002 \001(\t\"D\n" +
-      "\016GetCatalogsReq\0222\n\023catalog_name_filter\030\001" +
-      " \001(\0132\025.exec.user.LikeFilter\"M\n\017CatalogMe" +
-      "tadata\022\024\n\014catalog_name\030\001 \001(\t\022\023\n\013descript" +
-      "ion\030\002 \001(\t\022\017\n\007connect\030\003 \001(\t\"\223\001\n\017GetCatalo" +
-      "gsResp\022(\n\006status\030\001 \001(\0162\030.exec.user.Reque",
-      "stStatus\022,\n\010catalogs\030\002 \003(\0132\032.exec.user.C" +
-      "atalogMetadata\022(\n\005error\030\003 \001(\0132\031.exec.sha" +
-      "red.DrillPBError\"v\n\rGetSchemasReq\0222\n\023cat" +
-      "alog_name_filter\030\001 \001(\0132\025.exec.user.LikeF" +
-      "ilter\0221\n\022schema_name_filter\030\002 \001(\0132\025.exec" +
-      ".user.LikeFilter\"i\n\016SchemaMetadata\022\024\n\014ca" +
-      "talog_name\030\001 \001(\t\022\023\n\013schema_name\030\002 \001(\t\022\r\n" +
-      "\005owner\030\003 \001(\t\022\014\n\004type\030\004 \001(\t\022\017\n\007mutable\030\005 " +
-      "\001(\t\"\220\001\n\016GetSchemasResp\022(\n\006status\030\001 \001(\0162\030" +
-      ".exec.user.RequestStatus\022*\n\007schemas\030\002 \003(",
-      "\0132\031.exec.user.SchemaMetadata\022(\n\005error\030\003 " +
-      "\001(\0132\031.exec.shared.DrillPBError\"\302\001\n\014GetTa" +
-      "blesReq\0222\n\023catalog_name_filter\030\001 \001(\0132\025.e" +
+      "hVersion\030\005 \001(\r\022\023\n\013application\030\006 \001(\t\022\023\n\013b" +
+      "uildNumber\030\007 \001(\r\022\030\n\020versionQualifier\030\010 \001",
+      "(\t\"\375\002\n\022UserToBitHandshake\022.\n\007channel\030\001 \001" +
+      "(\0162\027.exec.shared.RpcChannel:\004USER\022\031\n\021sup" +
+      "port_listening\030\002 \001(\010\022\023\n\013rpc_version\030\003 \001(" +
+      "\005\0221\n\013credentials\030\004 \001(\0132\034.exec.shared.Use" +
+      "rCredentials\022-\n\nproperties\030\005 \001(\0132\031.exec." +
+      "user.UserProperties\022$\n\025support_complex_t" +
+      "ypes\030\006 \001(\010:\005false\022\036\n\017support_timeout\030\007 \001" +
+      "(\010:\005false\0221\n\014client_infos\030\010 \001(\0132\033.exec.u" +
+      "ser.RpcEndpointInfos\022,\n\014sasl_support\030\t \001" +
+      "(\0162\026.exec.user.SaslSupport\"S\n\016RequestRes",
+      "ults\022&\n\010query_id\030\001 \001(\0132\024.exec.shared.Que" +
+      "ryId\022\031\n\021maximum_responses\030\002 \001(\005\"g\n\025GetQu" +
+      "eryPlanFragments\022\r\n\005query\030\001 \002(\t\022$\n\004type\030" +
+      "\002 \001(\0162\026.exec.shared.QueryType\022\031\n\nsplit_p" +
+      "lan\030\003 \001(\010:\005false\"\316\001\n\022QueryPlanFragments\022" +
+      "3\n\006status\030\001 \002(\0162#.exec.shared.QueryResul" +
+      "t.QueryState\022&\n\010query_id\030\002 \001(\0132\024.exec.sh" +
+      "ared.QueryId\0221\n\tfragments\030\003 \003(\0132\036.exec.b" +
+      "it.control.PlanFragment\022(\n\005error\030\004 \001(\0132\031" +
+      ".exec.shared.DrillPBError\"\200\002\n\022BitToUserH",
+      "andshake\022\023\n\013rpc_version\030\002 \001(\005\022*\n\006status\030" +
+      "\003 \001(\0162\032.exec.user.HandshakeStatus\022\017\n\007err" +
+      "orId\030\004 \001(\t\022\024\n\014errorMessage\030\005 \001(\t\0221\n\014serv" +
+      "er_infos\030\006 \001(\0132\033.exec.user.RpcEndpointIn" +
+      "fos\022 \n\030authenticationMechanisms\030\007 \003(\t\022-\n" +
+      "\021supported_methods\030\010 \003(\0162\022.exec.user.Rpc" +
+      "Type\"-\n\nLikeFilter\022\017\n\007pattern\030\001 \001(\t\022\016\n\006e" +
+      "scape\030\002 \001(\t\"D\n\016GetCatalogsReq\0222\n\023catalog" +
+      "_name_filter\030\001 \001(\0132\025.exec.user.LikeFilte" +
+      "r\"M\n\017CatalogMetadata\022\024\n\014catalog_name\030\001 \001",
+      "(\t\022\023\n\013description\030\002 \001(\t\022\017\n\007connect\030\003 \001(\t" +
+      "\"\223\001\n\017GetCatalogsResp\022(\n\006status\030\001 \001(\0162\030.e" +
+      "xec.user.RequestStatus\022,\n\010catalogs\030\002 \003(\013" +
+      "2\032.exec.user.CatalogMetadata\022(\n\005error\030\003 " +
+      "\001(\0132\031.exec.shared.DrillPBError\"v\n\rGetSch" +
+      "emasReq\0222\n\023catalog_name_filter\030\001 \001(\0132\025.e" +
       "xec.user.LikeFilter\0221\n\022schema_name_filte" +
-      "r\030\002 \001(\0132\025.exec.user.LikeFilter\0220\n\021table_" +
-      "name_filter\030\003 \001(\0132\025.exec.user.LikeFilter" +
-      "\022\031\n\021table_type_filter\030\004 \003(\t\"\\\n\rTableMeta" +
-      "data\022\024\n\014catalog_name\030\001 \001(\t\022\023\n\013schema_nam" +
-      "e\030\002 \001(\t\022\022\n\ntable_name\030\003 \001(\t\022\014\n\004type\030\004 \001(" +
-      "\t\"\215\001\n\rGetTablesResp\022(\n\006status\030\001 \001(\0162\030.ex",
-      "ec.user.RequestStatus\022(\n\006tables\030\002 \003(\0132\030." +
-      "exec.user.TableMetadata\022(\n\005error\030\003 \001(\0132\031" +
-      ".exec.shared.DrillPBError\"\333\001\n\rGetColumns" +
-      "Req\0222\n\023catalog_name_filter\030\001 \001(\0132\025.exec." +
-      "user.LikeFilter\0221\n\022schema_name_filter\030\002 " +
-      "\001(\0132\025.exec.user.LikeFilter\0220\n\021table_name" +
-      "_filter\030\003 \001(\0132\025.exec.user.LikeFilter\0221\n\022" +
-      "column_name_filter\030\004 \001(\0132\025.exec.user.Lik" +
-      "eFilter\"\251\003\n\016ColumnMetadata\022\024\n\014catalog_na" +
-      "me\030\001 \001(\t\022\023\n\013schema_name\030\002 \001(\t\022\022\n\ntable_n",
-      "ame\030\003 \001(\t\022\023\n\013column_name\030\004 \001(\t\022\030\n\020ordina" +
-      "l_position\030\005 \001(\005\022\025\n\rdefault_value\030\006 \001(\t\022" +
-      "\023\n\013is_nullable\030\007 \001(\010\022\021\n\tdata_type\030\010 \001(\t\022" +
-      "\027\n\017char_max_length\030\t \001(\005\022\031\n\021char_octet_l" +
-      "ength\030\n \001(\005\022\031\n\021numeric_precision\030\013 \001(\005\022\037" +
-      "\n\027numeric_precision_radix\030\014 \001(\005\022\025\n\rnumer" +
-      "ic_scale\030\r \001(\005\022\033\n\023date_time_precision\030\016 " +
-      "\001(\005\022\025\n\rinterval_type\030\017 \001(\t\022\032\n\022interval_p" +
-      "recision\030\020 \001(\005\022\023\n\013column_size\030\021 \001(\005\"\220\001\n\016" +
-      "GetColumnsResp\022(\n\006status\030\001 \001(\0162\030.exec.us",
-      "er.RequestStatus\022*\n\007columns\030\002 \003(\0132\031.exec" +
-      ".user.ColumnMetadata\022(\n\005error\030\003 \001(\0132\031.ex" +
-      "ec.shared.DrillPBError\"/\n\032CreatePrepared" +
-      "StatementReq\022\021\n\tsql_query\030\001 \001(\t\"\326\003\n\024Resu" +
-      "ltColumnMetadata\022\024\n\014catalog_name\030\001 \001(\t\022\023" +
-      "\n\013schema_name\030\002 \001(\t\022\022\n\ntable_name\030\003 \001(\t\022" +
-      "\023\n\013column_name\030\004 \001(\t\022\r\n\005label\030\005 \001(\t\022\021\n\td" +
-      "ata_type\030\006 \001(\t\022\023\n\013is_nullable\030\007 \001(\010\022\021\n\tp" +
-      "recision\030\010 \001(\005\022\r\n\005scale\030\t \001(\005\022\016\n\006signed\030" +
-      "\n \001(\010\022\024\n\014display_size\030\013 \001(\005\022\022\n\nis_aliase",
-      "d\030\014 \001(\010\0225\n\rsearchability\030\r \001(\0162\036.exec.us" +
-      "er.ColumnSearchability\0223\n\014updatability\030\016" +
-      " \001(\0162\035.exec.user.ColumnUpdatability\022\026\n\016a" +
-      "uto_increment\030\017 \001(\010\022\030\n\020case_sensitivity\030" +
-      "\020 \001(\010\022\020\n\010sortable\030\021 \001(\010\022\022\n\nclass_name\030\022 " +
-      "\001(\t\022\023\n\013is_currency\030\024 \001(\010\".\n\027PreparedStat" +
-      "ementHandle\022\023\n\013server_info\030\001 \001(\014\"\200\001\n\021Pre" +
-      "paredStatement\0220\n\007columns\030\001 \003(\0132\037.exec.u" +
-      "ser.ResultColumnMetadata\0229\n\rserver_handl" +
-      "e\030\002 \001(\0132\".exec.user.PreparedStatementHan",
-      "dle\"\253\001\n\033CreatePreparedStatementResp\022(\n\006s" +
-      "tatus\030\001 \001(\0162\030.exec.user.RequestStatus\0228\n" +
-      "\022prepared_statement\030\002 \001(\0132\034.exec.user.Pr" +
-      "eparedStatement\022(\n\005error\030\003 \001(\0132\031.exec.sh" +
-      "ared.DrillPBError\"\353\001\n\010RunQuery\0221\n\014result" +
-      "s_mode\030\001 \001(\0162\033.exec.user.QueryResultsMod" +
-      "e\022$\n\004type\030\002 \001(\0162\026.exec.shared.QueryType\022" +
-      "\014\n\004plan\030\003 \001(\t\0221\n\tfragments\030\004 \003(\0132\036.exec." +
-      "bit.control.PlanFragment\022E\n\031prepared_sta" +
-      "tement_handle\030\005 \001(\0132\".exec.user.Prepared",
-      "StatementHandle*\332\003\n\007RpcType\022\r\n\tHANDSHAKE" +
-      "\020\000\022\007\n\003ACK\020\001\022\013\n\007GOODBYE\020\002\022\r\n\tRUN_QUERY\020\003\022" +
-      "\020\n\014CANCEL_QUERY\020\004\022\023\n\017REQUEST_RESULTS\020\005\022\027" +
-      "\n\023RESUME_PAUSED_QUERY\020\013\022\034\n\030GET_QUERY_PLA" +
-      "N_FRAGMENTS\020\014\022\020\n\014GET_CATALOGS\020\016\022\017\n\013GET_S" +
-      "CHEMAS\020\017\022\016\n\nGET_TABLES\020\020\022\017\n\013GET_COLUMNS\020" +
-      "\021\022\035\n\031CREATE_PREPARED_STATEMENT\020\026\022\016\n\nQUER" +
-      "Y_DATA\020\006\022\020\n\014QUERY_HANDLE\020\007\022\030\n\024QUERY_PLAN" +
-      "_FRAGMENTS\020\r\022\014\n\010CATALOGS\020\022\022\013\n\007SCHEMAS\020\023\022" +
-      "\n\n\006TABLES\020\024\022\013\n\007COLUMNS\020\025\022\026\n\022PREPARED_STA",
-      "TEMENT\020\027\022\026\n\022REQ_META_FUNCTIONS\020\010\022\026\n\022RESP" +
-      "_FUNCTION_LIST\020\t\022\020\n\014QUERY_RESULT\020\n\022\020\n\014SA" +
-      "SL_MESSAGE\020\030*6\n\013SaslSupport\022\030\n\024UNKNOWN_S" +
-      "ASL_SUPPORT\020\000\022\r\n\tSASL_AUTH\020\001*#\n\020QueryRes" +
-      "ultsMode\022\017\n\013STREAM_FULL\020\001*q\n\017HandshakeSt" +
-      "atus\022\013\n\007SUCCESS\020\001\022\030\n\024RPC_VERSION_MISMATC" +
-      "H\020\002\022\017\n\013AUTH_FAILED\020\003\022\023\n\017UNKNOWN_FAILURE\020" +
-      "\004\022\021\n\rAUTH_REQUIRED\020\005*D\n\rRequestStatus\022\022\n" +
-      "\016UNKNOWN_STATUS\020\000\022\006\n\002OK\020\001\022\n\n\006FAILED\020\002\022\013\n" +
-      "\007TIMEOUT\020\003*Y\n\023ColumnSearchability\022\031\n\025UNK",
-      "NOWN_SEARCHABILITY\020\000\022\010\n\004NONE\020\001\022\010\n\004CHAR\020\002" +
-      "\022\n\n\006NUMBER\020\003\022\007\n\003ALL\020\004*K\n\022ColumnUpdatabil" +
-      "ity\022\030\n\024UNKNOWN_UPDATABILITY\020\000\022\r\n\tREAD_ON" +
-      "LY\020\001\022\014\n\010WRITABLE\020\002B+\n\033org.apache.drill.e" +
-      "xec.protoB\nUserProtosH\001"
+      "r\030\002 \001(\0132\025.exec.user.LikeFilter\"i\n\016Schema" +
+      "Metadata\022\024\n\014catalog_name\030\001 \001(\t\022\023\n\013schema" +
+      "_name\030\002 \001(\t\022\r\n\005owner\030\003 \001(\t\022\014\n\004type\030\004 \001(\t",
+      "\022\017\n\007mutable\030\005 \001(\t\"\220\001\n\016GetSchemasResp\022(\n\006" +
+      "status\030\001 \001(\0162\030.exec.user.RequestStatus\022*" +
+      "\n\007schemas\030\002 \003(\0132\031.exec.user.SchemaMetada" +
+      "ta\022(\n\005error\030\003 \001(\0132\031.exec.shared.DrillPBE" +
+      "rror\"\302\001\n\014GetTablesReq\0222\n\023catalog_name_fi" +
+      "lter\030\001 \001(\0132\025.exec.user.LikeFilter\0221\n\022sch" +
+      "ema_name_filter\030\002 \001(\0132\025.exec.user.LikeFi" +
+      "lter\0220\n\021table_name_filter\030\003 \001(\0132\025.exec.u" +
+      "ser.LikeFilter\022\031\n\021table_type_filter\030\004 \003(" +
+      "\t\"\\\n\rTableMetadata\022\024\n\014catalog_name\030\001 \001(\t",
+      "\022\023\n\013schema_name\030\002 \001(\t\022\022\n\ntable_name\030\003 \001(" +
+      "\t\022\014\n\004type\030\004 \001(\t\"\215\001\n\rGetTablesResp\022(\n\006sta" +
+      "tus\030\001 \001(\0162\030.exec.user.RequestStatus\022(\n\006t" +
+      "ables\030\002 \003(\0132\030.exec.user.TableMetadata\022(\n" +
+      "\005error\030\003 \001(\0132\031.exec.shared.DrillPBError\"" +
+      "\333\001\n\rGetColumnsReq\0222\n\023catalog_name_filter" +
+      "\030\001 \001(\0132\025.exec.user.LikeFilter\0221\n\022schema_" +
+      "name_filter\030\002 \001(\0132\025.exec.user.LikeFilter" +
+      "\0220\n\021table_name_filter\030\003 \001(\0132\025.exec.user." +
+      "LikeFilter\0221\n\022column_name_filter\030\004 \001(\0132\025",
+      ".exec.user.LikeFilter\"\251\003\n\016ColumnMetadata" +
+      "\022\024\n\014catalog_name\030\001 \001(\t\022\023\n\013schema_name\030\002 " +
+      "\001(\t\022\022\n\ntable_name\030\003 \001(\t\022\023\n\013column_name\030\004" +
+      " \001(\t\022\030\n\020ordinal_position\030\005 \001(\005\022\025\n\rdefaul" +
+      "t_value\030\006 \001(\t\022\023\n\013is_nullable\030\007 \001(\010\022\021\n\tda" +
+      "ta_type\030\010 \001(\t\022\027\n\017char_max_length\030\t \001(\005\022\031" +
+      "\n\021char_octet_length\030\n \001(\005\022\031\n\021numeric_pre" +
+      "cision\030\013 \001(\005\022\037\n\027numeric_precision_radix\030" +
+      "\014 \001(\005\022\025\n\rnumeric_scale\030\r \001(\005\022\033\n\023date_tim" +
+      "e_precision\030\016 \001(\005\022\025\n\rinterval_type\030\017 \001(\t",
+      "\022\032\n\022interval_precision\030\020 \001(\005\022\023\n\013column_s" +
+      "ize\030\021 \001(\005\"\220\001\n\016GetColumnsResp\022(\n\006status\030\001" +
+      " \001(\0162\030.exec.user.RequestStatus\022*\n\007column" +
+      "s\030\002 \003(\0132\031.exec.user.ColumnMetadata\022(\n\005er" +
+      "ror\030\003 \001(\0132\031.exec.shared.DrillPBError\"/\n\032" +
+      "CreatePreparedStatementReq\022\021\n\tsql_query\030" +
+      "\001 \001(\t\"\326\003\n\024ResultColumnMetadata\022\024\n\014catalo" +
+      "g_name\030\001 \001(\t\022\023\n\013schema_name\030\002 \001(\t\022\022\n\ntab" +
+      "le_name\030\003 \001(\t\022\023\n\013column_name\030\004 \001(\t\022\r\n\005la" +
+      "bel\030\005 \001(\t\022\021\n\tdata_type\030\006 \001(\t\022\023\n\013is_nulla",
+      "ble\030\007 \001(\010\022\021\n\tprecision\030\010 \001(\005\022\r\n\005scale\030\t " +
+      "\001(\005\022\016\n\006signed\030\n \001(\010\022\024\n\014display_size\030\013 \001(" +
+      "\005\022\022\n\nis_aliased\030\014 \001(\010\0225\n\rsearchability\030\r" +
+      " \001(\0162\036.exec.user.ColumnSearchability\0223\n\014" +
+      "updatability\030\016 \001(\0162\035.exec.user.ColumnUpd" +
+      "atability\022\026\n\016auto_increment\030\017 \001(\010\022\030\n\020cas" +
+      "e_sensitivity\030\020 \001(\010\022\020\n\010sortable\030\021 \001(\010\022\022\n" +
+      "\nclass_name\030\022 \001(\t\022\023\n\013is_currency\030\024 \001(\010\"." +
+      "\n\027PreparedStatementHandle\022\023\n\013server_info" +
+      "\030\001 \001(\014\"\200\001\n\021PreparedStatement\0220\n\007columns\030",
+      "\001 \003(\0132\037.exec.user.ResultColumnMetadata\0229" +
+      "\n\rserver_handle\030\002 \001(\0132\".exec.user.Prepar" +
+      "edStatementHandle\"\253\001\n\033CreatePreparedStat" +
+      "ementResp\022(\n\006status\030\001 \001(\0162\030.exec.user.Re" +
+      "questStatus\0228\n\022prepared_statement\030\002 \001(\0132" +
+      "\034.exec.user.PreparedStatement\022(\n\005error\030\003" +
+      " \001(\0132\031.exec.shared.DrillPBError\"\353\001\n\010RunQ" +
+      "uery\0221\n\014results_mode\030\001 \001(\0162\033.exec.user.Q" +
+      "ueryResultsMode\022$\n\004type\030\002 \001(\0162\026.exec.sha" +
+      "red.QueryType\022\014\n\004plan\030\003 \001(\t\0221\n\tfragments",
+      "\030\004 \003(\0132\036.exec.bit.control.PlanFragment\022E" +
+      "\n\031prepared_statement_handle\030\005 \001(\0132\".exec" +
+      ".user.PreparedStatementHandle*\332\003\n\007RpcTyp" +
+      "e\022\r\n\tHANDSHAKE\020\000\022\007\n\003ACK\020\001\022\013\n\007GOODBYE\020\002\022\r" +
+      "\n\tRUN_QUERY\020\003\022\020\n\014CANCEL_QUERY\020\004\022\023\n\017REQUE" +
+      "ST_RESULTS\020\005\022\027\n\023RESUME_PAUSED_QUERY\020\013\022\034\n" +
+      "\030GET_QUERY_PLAN_FRAGMENTS\020\014\022\020\n\014GET_CATAL" +
+      "OGS\020\016\022\017\n\013GET_SCHEMAS\020\017\022\016\n\nGET_TABLES\020\020\022\017" +
+      "\n\013GET_COLUMNS\020\021\022\035\n\031CREATE_PREPARED_STATE" +
+      "MENT\020\026\022\016\n\nQUERY_DATA\020\006\022\020\n\014QUERY_HANDLE\020\007",
+      "\022\030\n\024QUERY_PLAN_FRAGMENTS\020\r\022\014\n\010CATALOGS\020\022" +
+      "\022\013\n\007SCHEMAS\020\023\022\n\n\006TABLES\020\024\022\013\n\007COLUMNS\020\025\022\026" +
+      "\n\022PREPARED_STATEMENT\020\027\022\026\n\022REQ_META_FUNCT" +
+      "IONS\020\010\022\026\n\022RESP_FUNCTION_LIST\020\t\022\020\n\014QUERY_" +
+      "RESULT\020\n\022\020\n\014SASL_MESSAGE\020\030*6\n\013SaslSuppor" +
+      "t\022\030\n\024UNKNOWN_SASL_SUPPORT\020\000\022\r\n\tSASL_AUTH" +
+      "\020\001*#\n\020QueryResultsMode\022\017\n\013STREAM_FULL\020\001*" +
+      "q\n\017HandshakeStatus\022\013\n\007SUCCESS\020\001\022\030\n\024RPC_V" +
+      "ERSION_MISMATCH\020\002\022\017\n\013AUTH_FAILED\020\003\022\023\n\017UN" +
+      "KNOWN_FAILURE\020\004\022\021\n\rAUTH_REQUIRED\020\005*D\n\rRe",
+      "questStatus\022\022\n\016UNKNOWN_STATUS\020\000\022\006\n\002OK\020\001\022" +
+      "\n\n\006FAILED\020\002\022\013\n\007TIMEOUT\020\003*Y\n\023ColumnSearch" +
+      "ability\022\031\n\025UNKNOWN_SEARCHABILITY\020\000\022\010\n\004NO" +
+      "NE\020\001\022\010\n\004CHAR\020\002\022\n\n\006NUMBER\020\003\022\007\n\003ALL\020\004*K\n\022C" +
+      "olumnUpdatability\022\030\n\024UNKNOWN_UPDATABILIT" +
+      "Y\020\000\022\r\n\tREAD_ONLY\020\001\022\014\n\010WRITABLE\020\002B+\n\033org." +
+      "apache.drill.exec.protoB\nUserProtosH\001"
     };
     com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
       new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -29731,7 +30224,7 @@ public final class UserProtos {
           internal_static_exec_user_RpcEndpointInfos_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_exec_user_RpcEndpointInfos_descriptor,
-              new java.lang.String[] { "Name", "Version", "MajorVersion", "MinorVersion", "PatchVersion", "Application", });
+              new java.lang.String[] { "Name", "Version", "MajorVersion", "MinorVersion", "PatchVersion", "Application", "BuildNumber", "VersionQualifier", });
           internal_static_exec_user_UserToBitHandshake_descriptor =
             getDescriptor().getMessageTypes().get(3);
           internal_static_exec_user_UserToBitHandshake_fieldAccessorTable = new
@@ -29761,7 +30254,7 @@ public final class UserProtos {
           internal_static_exec_user_BitToUserHandshake_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_exec_user_BitToUserHandshake_descriptor,
-              new java.lang.String[] { "RpcVersion", "Status", "ErrorId", "ErrorMessage", "ServerInfos", "AuthenticationMechanisms", });
+              new java.lang.String[] { "RpcVersion", "Status", "ErrorId", "ErrorMessage", "ServerInfos", "AuthenticationMechanisms", "SupportedMethods", });
           internal_static_exec_user_LikeFilter_descriptor =
             getDescriptor().getMessageTypes().get(8);
           internal_static_exec_user_LikeFilter_fieldAccessorTable = new

http://git-wip-us.apache.org/repos/asf/drill/blob/16aa0810/protocol/src/main/java/org/apache/drill/exec/proto/beans/BitToUserHandshake.java
----------------------------------------------------------------------
diff --git a/protocol/src/main/java/org/apache/drill/exec/proto/beans/BitToUserHandshake.java b/protocol/src/main/java/org/apache/drill/exec/proto/beans/BitToUserHandshake.java
index 0025049..0cad1c0 100644
--- a/protocol/src/main/java/org/apache/drill/exec/proto/beans/BitToUserHandshake.java
+++ b/protocol/src/main/java/org/apache/drill/exec/proto/beans/BitToUserHandshake.java
@@ -55,6 +55,7 @@ public final class BitToUserHandshake implements Externalizable, Message<BitToUs
     private String errorMessage;
     private RpcEndpointInfos serverInfos;
     private List<String> authenticationMechanisms;
+    private List<RpcType> supportedMethods;
 
     public BitToUserHandshake()
     {
@@ -141,6 +142,19 @@ public final class BitToUserHandshake implements Externalizable, Message<BitToUs
         return this;
     }
 
+    // supportedMethods
+
+    public List<RpcType> getSupportedMethodsList()
+    {
+        return supportedMethods;
+    }
+
+    public BitToUserHandshake setSupportedMethodsList(List<RpcType> supportedMethods)
+    {
+        this.supportedMethods = supportedMethods;
+        return this;
+    }
+
     // java serialization
 
     public void readExternal(ObjectInput in) throws IOException
@@ -216,6 +230,11 @@ public final class BitToUserHandshake implements Externalizable, Message<BitToUs
                         message.authenticationMechanisms = new ArrayList<String>();
                     message.authenticationMechanisms.add(input.readString());
                     break;
+                case 8:
+                    if(message.supportedMethods == null)
+                        message.supportedMethods = new ArrayList<RpcType>();
+                    message.supportedMethods.add(RpcType.valueOf(input.readEnum()));
+                    break;
                 default:
                     input.handleUnknownField(number, this);
             }   
@@ -249,6 +268,15 @@ public final class BitToUserHandshake implements Externalizable, Message<BitToUs
                     output.writeString(7, authenticationMechanisms, true);
             }
         }
+
+        if(message.supportedMethods != null)
+        {
+            for(RpcType supportedMethods : message.supportedMethods)
+            {
+                if(supportedMethods != null)
+                    output.writeEnum(8, supportedMethods.number, true);
+            }
+        }
     }
 
     public String getFieldName(int number)
@@ -261,6 +289,7 @@ public final class BitToUserHandshake implements Externalizable, Message<BitToUs
             case 5: return "errorMessage";
             case 6: return "serverInfos";
             case 7: return "authenticationMechanisms";
+            case 8: return "supportedMethods";
             default: return null;
         }
     }
@@ -280,6 +309,7 @@ public final class BitToUserHandshake implements Externalizable, Message<BitToUs
         __fieldMap.put("errorMessage", 5);
         __fieldMap.put("serverInfos", 6);
         __fieldMap.put("authenticationMechanisms", 7);
+        __fieldMap.put("supportedMethods", 8);
     }
     
 }

http://git-wip-us.apache.org/repos/asf/drill/blob/16aa0810/protocol/src/main/java/org/apache/drill/exec/proto/beans/RpcEndpointInfos.java
----------------------------------------------------------------------
diff --git a/protocol/src/main/java/org/apache/drill/exec/proto/beans/RpcEndpointInfos.java b/protocol/src/main/java/org/apache/drill/exec/proto/beans/RpcEndpointInfos.java
index 9849e56..8a8ffa7 100644
--- a/protocol/src/main/java/org/apache/drill/exec/proto/beans/RpcEndpointInfos.java
+++ b/protocol/src/main/java/org/apache/drill/exec/proto/beans/RpcEndpointInfos.java
@@ -53,6 +53,8 @@ public final class RpcEndpointInfos implements Externalizable, Message<RpcEndpoi
     private int minorVersion;
     private int patchVersion;
     private String application;
+    private int buildNumber;
+    private String versionQualifier;
 
     public RpcEndpointInfos()
     {
@@ -139,6 +141,32 @@ public final class RpcEndpointInfos implements Externalizable, Message<RpcEndpoi
         return this;
     }
 
+    // buildNumber
+
+    public int getBuildNumber()
+    {
+        return buildNumber;
+    }
+
+    public RpcEndpointInfos setBuildNumber(int buildNumber)
+    {
+        this.buildNumber = buildNumber;
+        return this;
+    }
+
+    // versionQualifier
+
+    public String getVersionQualifier()
+    {
+        return versionQualifier;
+    }
+
+    public RpcEndpointInfos setVersionQualifier(String versionQualifier)
+    {
+        this.versionQualifier = versionQualifier;
+        return this;
+    }
+
     // java serialization
 
     public void readExternal(ObjectInput in) throws IOException
@@ -211,6 +239,12 @@ public final class RpcEndpointInfos implements Externalizable, Message<RpcEndpoi
                 case 6:
                     message.application = input.readString();
                     break;
+                case 7:
+                    message.buildNumber = input.readUInt32();
+                    break;
+                case 8:
+                    message.versionQualifier = input.readString();
+                    break;
                 default:
                     input.handleUnknownField(number, this);
             }   
@@ -237,6 +271,12 @@ public final class RpcEndpointInfos implements Externalizable, Message<RpcEndpoi
 
         if(message.application != null)
             output.writeString(6, message.application, false);
+
+        if(message.buildNumber != 0)
+            output.writeUInt32(7, message.buildNumber, false);
+
+        if(message.versionQualifier != null)
+            output.writeString(8, message.versionQualifier, false);
     }
 
     public String getFieldName(int number)
@@ -249,6 +289,8 @@ public final class RpcEndpointInfos implements Externalizable, Message<RpcEndpoi
             case 4: return "minorVersion";
             case 5: return "patchVersion";
             case 6: return "application";
+            case 7: return "buildNumber";
+            case 8: return "versionQualifier";
             default: return null;
         }
     }
@@ -268,6 +310,8 @@ public final class RpcEndpointInfos implements Externalizable, Message<RpcEndpoi
         __fieldMap.put("minorVersion", 4);
         __fieldMap.put("patchVersion", 5);
         __fieldMap.put("application", 6);
+        __fieldMap.put("buildNumber", 7);
+        __fieldMap.put("versionQualifier", 8);
     }
     
 }

http://git-wip-us.apache.org/repos/asf/drill/blob/16aa0810/protocol/src/main/protobuf/User.proto
----------------------------------------------------------------------
diff --git a/protocol/src/main/protobuf/User.proto b/protocol/src/main/protobuf/User.proto
index 971248e..ef993cc 100644
--- a/protocol/src/main/protobuf/User.proto
+++ b/protocol/src/main/protobuf/User.proto
@@ -57,12 +57,14 @@ message UserProperties {
 }
 
 message RpcEndpointInfos {
-    optional string name = 1;           // example: Apache Drill Server, Apache Drill C++ client
-    optional string version = 2;        // example: 1.9.0
-    optional uint32 majorVersion = 3;   // example: 1
-    optional uint32 minorVersion = 4;   // example: 9
-    optional uint32 patchVersion = 5;   // example: 0
-    optional string application = 6;    // example: Tableau 9.3
+    optional string name = 1;             // example: Apache Drill Server, Apache Drill C++ client
+    optional string version = 2;          // example: 1.9.0
+    optional uint32 majorVersion = 3;     // example: 1
+    optional uint32 minorVersion = 4;     // example: 9
+    optional uint32 patchVersion = 5;     // example: 0
+    optional string application = 6;      // example: Tableau 9.3
+    optional uint32 buildNumber = 7;      // example: 32
+    optional string versionQualifier = 8; // example: SNAPSHOT
 }
 
 enum SaslSupport {
@@ -121,6 +123,7 @@ message BitToUserHandshake {
   optional string errorMessage = 5;
   optional RpcEndpointInfos server_infos = 6;
   repeated string authenticationMechanisms = 7;
+  repeated RpcType supported_methods = 8;
 }
 
 /*


[11/27] drill git commit: DRILL-5301: Server metadata API

Posted by jn...@apache.org.
http://git-wip-us.apache.org/repos/asf/drill/blob/d2e0f415/protocol/src/main/protobuf/User.proto
----------------------------------------------------------------------
diff --git a/protocol/src/main/protobuf/User.proto b/protocol/src/main/protobuf/User.proto
index ef993cc..4b722b3 100644
--- a/protocol/src/main/protobuf/User.proto
+++ b/protocol/src/main/protobuf/User.proto
@@ -5,6 +5,7 @@ option java_outer_classname = "UserProtos";
 option optimize_for = SPEED;
 
 import "SchemaDef.proto";
+import "Types.proto";
 import "UserBitShared.proto";
 import "BitData.proto";
 import "BitControl.proto";
@@ -27,6 +28,7 @@ enum RpcType {
   GET_TABLES = 16; // user is requesting metadata of table(s)
   GET_COLUMNS = 17; // user is requesting metadata of column(s)
   CREATE_PREPARED_STATEMENT = 22; // user is sending a request to create prepared statement
+  GET_SERVER_META = 8; // user is sending a request to receive server metadata
 
   // bit to user
   QUERY_DATA = 6; // drillbit is sending a query result data batch to the user
@@ -37,9 +39,7 @@ enum RpcType {
   TABLES = 20; // return table metadata in response to GET_TABLES
   COLUMNS = 21; // return column metadata in response to GET_COLUMNS
   PREPARED_STATEMENT = 23; // return preparated statement in response to CREATE_PREPARED_STATEMENT
-
-  REQ_META_FUNCTIONS = 8;
-  RESP_FUNCTION_LIST = 9;
+  SERVER_META = 9; // return server infos in respose to GET_SERVER_META
 
   QUERY_RESULT = 10; // drillbit is reporting a query status change, most likely a terminal message, to the user
 
@@ -439,6 +439,219 @@ message CreatePreparedStatementResp {
 }
 
 /*
+ * Request message for getting server metadata
+ */
+message GetServerMetaReq {
+}
+
+enum CollateSupport {
+    CS_UNKNOWN      = 0;   // Unknown support (for forward compatibility)
+    CS_GROUP_BY     = 1;   // COLLATE clause can be added after each grouping column
+}
+message ConvertSupport {
+  required common.MinorType from = 1;
+  required common.MinorType to = 2;
+}
+
+enum CorrelationNamesSupport {
+    CN_NONE = 1;               // Correlation names are not supported
+    CN_DIFFERENT_NAMES = 2;    // Correlation names are supported, but names have to
+                               // be different from the tables they represent
+    CN_ANY = 3;                // Correlation names are supported without restriction
+}
+
+enum DateTimeLiteralsSupport {
+    DL_UNKNOWN = 0;                    // Unknown support (for forward compatibility)
+    DL_DATE = 1;                       // DATE literal is supported
+    DL_TIME = 2;                       // TIME literal is supported
+    DL_TIMESTAMP = 3;                  // TIMESTAMP literal is supported
+    DL_INTERVAL_YEAR = 4;              // INTERVAL YEAR literal is supported
+    DL_INTERVAL_MONTH = 5;             // INTERVAL MONTH literal is supported
+    DL_INTERVAL_DAY = 6;               // INTERVAL DAY literal is supported
+    DL_INTERVAL_HOUR = 7;              // INTERVAL HOUR literal is supported
+    DL_INTERVAL_MINUTE = 8;            // INTERVAL MINUTE literal is supported
+    DL_INTERVAL_SECOND = 9;            // INTERVAL SECOND literal is supported
+    DL_INTERVAL_YEAR_TO_MONTH = 10;    // INTERVAL YEAR TO MONTH literal is supported
+    DL_INTERVAL_DAY_TO_HOUR = 11;      // INTERVAL DAY TO HOUR literal is supported
+    DL_INTERVAL_DAY_TO_MINUTE = 12;    // INTERVAL DAY TO MINUTE literal is supported
+    DL_INTERVAL_DAY_TO_SECOND = 13;    // INTERVAL DAY TO SECOND literal is supported
+    DL_INTERVAL_HOUR_TO_MINUTE = 14;   // INTERVAL HOUR TO MINUTE literal is supported
+    DL_INTERVAL_HOUR_TO_SECOND = 15;   // INTERVAL HOUR TO SECOND literal is supported
+    DL_INTERVAL_MINUTE_TO_SECOND = 16; // INTERVAL MINUTE TO SECOND literal is supported
+}
+
+enum GroupBySupport {
+    GB_NONE = 1;           // Group by is not supported
+    GB_SELECT_ONLY = 2;    // Group by supported with non aggregated columns in select
+    GB_BEYOND_SELECT = 3;  /* Group by supported with columns absent from the select list
+                              if all the non-aggregated colums from the select list are also added */
+    GB_UNRELATED = 4;      // Group by supported with columns absent from the select list
+}
+
+enum IdentifierCasing {
+    IC_UNKNOWN = 0;        // Unknown support (for forward compatibility)
+    IC_STORES_LOWER = 1;   /* Mixed case identifier is treated as case insensitive
+                              and stored in lower case */
+    IC_STORES_MIXED = 2;   /* Mixed case identifier is treated as case insensitive
+                              and stored in mixed case */
+    IC_STORES_UPPER = 3;   /* Mixed case identifier is treated as case insensitive
+                              and stored in upper case */
+    IC_SUPPORTS_MIXED = 4; /* Mixed case identifier is treated as case sensitive
+                              and stored in mixed case */
+}
+
+enum NullCollation {
+    NC_UNKNOWN   = 0;  // Unknown support (for forward compatibility)
+    NC_AT_START  = 1;  // NULL values are sorted at the start regardless of the order
+    NC_AT_END = 2;     // NULL values are sorted at the end regardless of the order
+    NC_HIGH = 3;       // NULL is the highest value
+    NC_LOW = 4;        // NULL is the lowest value
+}
+
+enum OrderBySupport {
+    OB_UNKNOWN = 0;     // Unknown support (for forward compatibility)
+    OB_UNRELATED = 1;   // ORDER BY supported with columns not in SELECT list
+    OB_EXPRESSION = 2;  // ORDER BY with expressions is supported
+}
+
+enum OuterJoinSupport {
+    OJ_UNKNOWN = 0;            // Unknown support (for forward compatibility)
+    OJ_LEFT = 1;               // Left outer join is supported
+    OJ_RIGHT = 2;              // Right outer join is supported
+    OJ_FULL = 3;               // Full outer join is supported
+    OJ_NESTED = 4;             // Nested outer join is supported
+    OJ_NOT_ORDERED = 5;        /* Column names in the ON clause don't have to share the same order
+                                  as their respective table names in the OUTER JOIN clase */
+    OJ_INNER = 6;              // Inner table can also be used in an inner join
+    OJ_ALL_COMPARISON_OPS = 7; // Any comparison operator is supported in the ON clause
+}
+
+enum SubQuerySupport {
+    SQ_UNKNOWN    = 0;     // Unknown support (for forward compatibility)
+    SQ_CORRELATED = 1;     // Correlated subquery is supported
+    SQ_IN_COMPARISON = 2;  // Subquery in comparison expression is supported
+    SQ_IN_EXISTS = 3;      // Subquery in EXISTS expression is supported
+    SQ_IN_INSERT = 4;      // Subquery in INSERT expression is supported
+    SQ_IN_QUANTIFIED = 5;  // Subquery in quantified expression is supported
+}
+
+enum UnionSupport {
+    U_UNKNOWN = 0;    // Unknown support (for forward compatibility)
+    U_UNION = 1;      // UNION is supported
+    U_UNION_ALL = 2;  // UNION_ALL is supported
+}
+
+/*
+ * Response message for GetServerMetaReq
+ */
+message GetServerMetaResp {
+  optional RequestStatus status = 1;
+  optional ServerMeta server_meta = 2;
+  optional exec.shared.DrillPBError error = 3;
+}
+
+message ServerMeta {
+    // True if current user can use all tables returned by GetTables
+    optional bool                       all_tables_selectable = 1;
+    // True if BLOB are included into the max row size
+    optional bool                       blob_included_in_max_row_size = 2;
+    // True if catalog name is at the start of a fully qualified table
+    optional bool                       catalog_at_start = 3;
+    // The catalog separator
+    optional string                     catalog_separator = 4;
+    // The term used to designate catalogs
+    optional string                     catalog_term = 5;
+    // COLLATE support
+    repeated CollateSupport             collate_support = 6;
+    // True if column aliasing is supported
+    optional bool                       column_aliasing_supported = 7;
+    // CONVERT support
+    repeated ConvertSupport             convert_support = 8;
+    // Correlation names support
+    optional CorrelationNamesSupport    correlation_names_support = 9;
+    // Supported ODBC/JDBC Date Time scalar functions
+    repeated string                     date_time_functions = 10;
+    // Supported Date Time literals
+    repeated DateTimeLiteralsSupport    date_time_literals_support = 11;
+    // Group By support
+    optional GroupBySupport             group_by_support = 12;
+    // Unquoted Identifier casing
+    optional IdentifierCasing           identifier_casing = 13;
+    // Quote string for identifiers
+    optional string                     identifier_quote_string = 14;
+    // True if LIKE supports an ESCAPE clause
+    optional bool                       like_escape_clause_supported = 15;
+    // Maximum number of hexa characters for binary literals (0 if unlimited or unknown)
+    optional uint32                     max_binary_literal_length = 16;
+    // Maximum length of catalog names (0 if unlimited or unknown)
+    optional uint32                     max_catalog_name_length = 17;
+    // Maximum number of characters for string literals (0 if unlimited or unknown)
+    optional uint32                     max_char_literal_length = 18;
+    // Maximum length of column names (0 if unlimited or unknown)
+    optional uint32                     max_column_name_length = 19;
+    // Maximum number of columns in GROUP BY expressions (0 if unlimited or unknown)
+    optional uint32                     max_columns_in_group_by = 20;
+    // Maximum number of columns in ORDER BY expressions (0 if unlimited or unknown)
+    optional uint32                     max_columns_in_order_by = 21;
+    // Maximum number of columns in SELECT expressions (0 if unlimited or unknown)
+    optional uint32                     max_columns_in_select = 22;
+    // Maximum length of cursor names (0 if unlimited or unknown)
+    optional uint32                     max_cursor_name_length = 23;
+    // Maximum logical size for LOB types (0 if unlimited or unknown)
+    optional uint32                     max_logical_lob_size = 24;
+    // Maximum number of bytes for a single row (0 if unlimited or unknown)
+    optional uint32                     max_row_size = 25;
+    // Maximum length of schema names (0 if unlimited or unknown)
+    optional uint32                     max_schema_name_length = 26;
+    // Maximum length for statements (0 if unlimited or unknown)
+    optional uint32                     max_statement_length = 27;
+    // Maximum number of statements (0 if unlimited or unknown)
+    optional uint32                     max_statements = 28;
+    // Maximum length of table names (0 if unlimited or unknown)
+    optional uint32                     max_table_name_length = 29;
+    // Maximum number of tables in a SELECT expression (0 if unlimited or unknown)
+    optional uint32                     max_tables_in_select = 30;
+    // Maximum length of user names (0 if unlimited or unknown)
+    optional uint32                     max_user_name_length = 31;
+    // How NULL are sorted
+    optional NullCollation              null_collation = 32;
+    // True if NULL + non NULL is NULL
+    optional bool                       null_plus_non_null_equals_null = 33;
+    // Supported ODBC/JDBC numeric scalar functions
+    repeated string                     numeric_functions = 34;
+    // Outer join suport
+    repeated OrderBySupport             order_by_support = 35;
+    // Outer join suport
+    repeated OuterJoinSupport           outer_join_support = 36;
+    // Quoted identifier casing
+    optional IdentifierCasing           quoted_identifier_casing = 37;
+    // True if connection access is read only
+    optional bool                       read_only = 38;
+    // The term used to designate a schema
+    optional string                     schema_term = 39;
+    // Characters used for escaping (empty if not suported)
+    optional string                     search_escape_string = 40;
+    // True if SELECT FOR UPDATE is supported
+    optional bool                       select_for_update_supported = 41;
+    // List of extra characters that can be used in identifier names
+    optional string                     special_characters = 42;
+    // list of SQL keywords
+    repeated string                     sql_keywords = 43;
+    // Supported ODBC/JDBC string scalar functions
+    repeated string                     string_functions = 44;
+    // Subquery support
+    repeated SubQuerySupport            subquery_support = 45;
+    // Supported ODBC/JDBC systen scalar functions
+    repeated string                     system_functions = 46;
+    // The term used to designate a table
+    optional string                     table_term = 47;
+    // True if transaction is supported
+    optional bool                       transaction_supported = 48;
+    // UNION support
+    repeated UnionSupport               union_support = 49;
+}
+
+/*
  * Request message for running a query.
  */
 message RunQuery {


[21/27] drill git commit: DRILL-4963: Fix issues with dynamically loaded overloaded functions

Posted by jn...@apache.org.
http://git-wip-us.apache.org/repos/asf/drill/blob/dcbcb94f/exec/java-exec/src/test/java/org/apache/drill/TestDynamicUDFSupport.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/TestDynamicUDFSupport.java b/exec/java-exec/src/test/java/org/apache/drill/TestDynamicUDFSupport.java
index 10a03b7..25c01b8 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/TestDynamicUDFSupport.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/TestDynamicUDFSupport.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -134,7 +134,8 @@ public class TestDynamicUDFSupport extends BaseTestQuery {
   @Test
   public void testAbsentSourceInStaging() throws Exception {
     Path staging = getDrillbitContext().getRemoteFunctionRegistry().getStagingArea();
-    copyJar(getDrillbitContext().getRemoteFunctionRegistry().getFs(), new Path(jars.toURI()), staging, default_binary_name);
+    copyJar(getDrillbitContext().getRemoteFunctionRegistry().getFs(), new Path(jars.toURI()),
+        staging, default_binary_name);
 
     String summary = String.format("File %s does not exist", new Path(staging, default_source_name).toUri().getPath());
 
@@ -157,7 +158,8 @@ public class TestDynamicUDFSupport extends BaseTestQuery {
         .sqlQuery("create function using jar '%s'", jarWithNoMarkerFile)
         .unOrdered()
         .baselineColumns("ok", "summary")
-        .baselineValues(false, String.format(summary, CommonConstants.DRILL_JAR_MARKER_FILE_RESOURCE_PATHNAME, jarWithNoMarkerFile))
+        .baselineValues(false, String.format(summary,
+            CommonConstants.DRILL_JAR_MARKER_FILE_RESOURCE_PATHNAME, jarWithNoMarkerFile))
         .go();
   }
 
@@ -201,7 +203,7 @@ public class TestDynamicUDFSupport extends BaseTestQuery {
     assertTrue("Source should be present in registry area",
         fs.exists(new Path(remoteFunctionRegistry.getRegistryArea(), default_source_name)));
 
-    Registry registry = remoteFunctionRegistry.getRegistry();
+    Registry registry = remoteFunctionRegistry.getRegistry(new DataChangeVersion());
     assertEquals("Registry should contain one jar", registry.getJarList().size(), 1);
     assertEquals(registry.getJar(0).getName(), default_binary_name);
   }
@@ -304,7 +306,7 @@ public class TestDynamicUDFSupport extends BaseTestQuery {
     assertTrue("Source should be present in registry area",
             fs.exists(new Path(remoteFunctionRegistry.getRegistryArea(), default_source_name)));
 
-    Registry registry = remoteFunctionRegistry.getRegistry();
+    Registry registry = remoteFunctionRegistry.getRegistry(new DataChangeVersion());
     assertEquals("Registry should contain one jar", registry.getJarList().size(), 1);
     assertEquals(registry.getJar(0).getName(), default_binary_name);
   }
@@ -337,7 +339,8 @@ public class TestDynamicUDFSupport extends BaseTestQuery {
     FileSystem fs = remoteFunctionRegistry.getFs();
 
     assertFalse("Registry area should be empty", fs.listFiles(remoteFunctionRegistry.getRegistryArea(), false).hasNext());
-    assertEquals("Registry should be empty", remoteFunctionRegistry.getRegistry().getJarList().size(), 0);
+    assertEquals("Registry should be empty",
+        remoteFunctionRegistry.getRegistry(new DataChangeVersion()).getJarList().size(), 0);
   }
 
   @Test
@@ -367,10 +370,13 @@ public class TestDynamicUDFSupport extends BaseTestQuery {
     assertTrue("Source should be present in staging area",
             fs.exists(new Path(remoteFunctionRegistry.getStagingArea(), default_source_name)));
 
-    assertFalse("Registry area should be empty", fs.listFiles(remoteFunctionRegistry.getRegistryArea(), false).hasNext());
-    assertFalse("Temporary area should be empty", fs.listFiles(remoteFunctionRegistry.getTmpArea(), false).hasNext());
+    assertFalse("Registry area should be empty",
+        fs.listFiles(remoteFunctionRegistry.getRegistryArea(), false).hasNext());
+    assertFalse("Temporary area should be empty",
+        fs.listFiles(remoteFunctionRegistry.getTmpArea(), false).hasNext());
 
-    assertEquals("Registry should be empty", remoteFunctionRegistry.getRegistry().getJarList().size(), 0);
+    assertEquals("Registry should be empty",
+        remoteFunctionRegistry.getRegistry(new DataChangeVersion()).getJarList().size(), 0);
   }
 
   @Test
@@ -402,7 +408,7 @@ public class TestDynamicUDFSupport extends BaseTestQuery {
     assertTrue("Source should be present in registry area",
             fs.exists(new Path(remoteFunctionRegistry.getRegistryArea(), default_source_name)));
 
-    Registry registry = remoteFunctionRegistry.getRegistry();
+    Registry registry = remoteFunctionRegistry.getRegistry(new DataChangeVersion());
     assertEquals("Registry should contain one jar", registry.getJarList().size(), 1);
     assertEquals(registry.getJar(0).getName(), default_binary_name);
   }
@@ -424,7 +430,8 @@ public class TestDynamicUDFSupport extends BaseTestQuery {
         .baselineValues("a")
         .go();
 
-    Path localUdfDirPath = Deencapsulation.getField(getDrillbitContext().getFunctionImplementationRegistry(), "localUdfDir");
+    Path localUdfDirPath = Deencapsulation.getField(
+        getDrillbitContext().getFunctionImplementationRegistry(), "localUdfDir");
     File localUdfDir = new File(localUdfDirPath.toUri().getPath());
 
     assertTrue("Binary should exist in local udf directory", new File(localUdfDir, default_binary_name).exists());
@@ -455,6 +462,33 @@ public class TestDynamicUDFSupport extends BaseTestQuery {
     }
   }
 
+  @Test
+  public void testOverloadedFunctionPlanningStage() throws Exception {
+    String jarName = "DrillUDF-overloading-1.0.jar";
+    copyJarsToStagingArea(jarName, JarUtil.getSourceName(jarName));
+    test("create function using jar '%s'", jarName);
+
+    testBuilder()
+        .sqlQuery("select abs('A', 'A') as res from (values(1))")
+        .unOrdered()
+        .baselineColumns("res")
+        .baselineValues("ABS was overloaded. Input: A, A")
+        .go();
+  }
+
+  @Test
+  public void testOverloadedFunctionExecutionStage() throws Exception {
+    String jarName = "DrillUDF-overloading-1.0.jar";
+    copyJarsToStagingArea(jarName, JarUtil.getSourceName(jarName));
+    test("create function using jar '%s'", jarName);
+
+    testBuilder()
+        .sqlQuery("select log('A') as res from (values(1))")
+        .unOrdered()
+        .baselineColumns("res")
+        .baselineValues("LOG was overloaded. Input: A")
+        .go();
+  }
 
   @Test
   public void testDropFunction() throws Exception {
@@ -462,7 +496,8 @@ public class TestDynamicUDFSupport extends BaseTestQuery {
     test("create function using jar '%s'", default_binary_name);
     test("select custom_lower('A') from (values(1))");
 
-    Path localUdfDirPath = Deencapsulation.getField(getDrillbitContext().getFunctionImplementationRegistry(), "localUdfDir");
+    Path localUdfDirPath = Deencapsulation.getField(
+        getDrillbitContext().getFunctionImplementationRegistry(), "localUdfDir");
     File localUdfDir = new File(localUdfDirPath.toUri().getPath());
 
     assertTrue("Binary should exist in local udf directory", new File(localUdfDir, default_binary_name).exists());
@@ -485,7 +520,8 @@ public class TestDynamicUDFSupport extends BaseTestQuery {
     }
 
     RemoteFunctionRegistry remoteFunctionRegistry = getDrillbitContext().getRemoteFunctionRegistry();
-    assertEquals("Remote registry should be empty", remoteFunctionRegistry.getRegistry().getJarList().size(), 0);
+    assertEquals("Remote registry should be empty",
+        remoteFunctionRegistry.getRegistry(new DataChangeVersion()).getJarList().size(), 0);
 
     FileSystem fs = remoteFunctionRegistry.getFs();
     assertFalse("Binary should not be present in registry area",
@@ -561,8 +597,10 @@ public class TestDynamicUDFSupport extends BaseTestQuery {
         .baselineValues(false, errorMessage)
         .go();
 
-    assertFalse("Registry area should be empty", fs.listFiles(remoteFunctionRegistry.getRegistryArea(), false).hasNext());
-    assertFalse("Temporary area should be empty", fs.listFiles(remoteFunctionRegistry.getTmpArea(), false).hasNext());
+    assertFalse("Registry area should be empty",
+        fs.listFiles(remoteFunctionRegistry.getRegistryArea(), false).hasNext());
+    assertFalse("Temporary area should be empty",
+        fs.listFiles(remoteFunctionRegistry.getTmpArea(), false).hasNext());
 
     assertTrue("Binary should be present in staging area",
         fs.exists(new Path(remoteFunctionRegistry.getStagingArea(), default_binary_name)));
@@ -684,7 +722,7 @@ public class TestDynamicUDFSupport extends BaseTestQuery {
 
     DataChangeVersion version = new DataChangeVersion();
     Registry registry = remoteFunctionRegistry.getRegistry(version);
-    assertEquals("Remote registry version should match", 2, version.getVersion());
+    assertEquals("Remote registry version should match", 1, version.getVersion());
     List<Jar> jarList = registry.getJarList();
     assertEquals("Only one jar should be registered", 1, jarList.size());
     assertEquals("Jar name should match", jarName1, jarList.get(0).getName());
@@ -748,7 +786,7 @@ public class TestDynamicUDFSupport extends BaseTestQuery {
 
     DataChangeVersion version = new DataChangeVersion();
     Registry registry = remoteFunctionRegistry.getRegistry(version);
-    assertEquals("Remote registry version should match", 3, version.getVersion());
+    assertEquals("Remote registry version should match", 2, version.getVersion());
 
     List<Jar> actualJars = registry.getJarList();
     List<String> expectedJars = Lists.newArrayList(jarName1, jarName2);
@@ -777,7 +815,7 @@ public class TestDynamicUDFSupport extends BaseTestQuery {
       public Boolean answer(InvocationOnMock invocation) throws Throwable {
         latch1.await();
         boolean result = (boolean) invocation.callRealMethod();
-        assertTrue("loadRemoteFunctions() should return true", result);
+        assertTrue("syncWithRemoteRegistry() should return true", result);
         latch2.countDown();
         return true;
       }
@@ -788,11 +826,11 @@ public class TestDynamicUDFSupport extends BaseTestQuery {
             latch1.countDown();
             latch2.await();
             boolean result = (boolean) invocation.callRealMethod();
-            assertTrue("loadRemoteFunctions() should return true", result);
+            assertTrue("syncWithRemoteRegistry() should return true", result);
             return true;
           }
         })
-        .when(functionImplementationRegistry).loadRemoteFunctions(anyLong());
+        .when(functionImplementationRegistry).syncWithRemoteRegistry(anyLong());
 
     SimpleQueryRunner simpleQueryRunner = new SimpleQueryRunner(query);
     Thread thread1 = new Thread(simpleQueryRunner);
@@ -804,9 +842,10 @@ public class TestDynamicUDFSupport extends BaseTestQuery {
     thread1.join();
     thread2.join();
 
-    verify(functionImplementationRegistry, times(2)).loadRemoteFunctions(anyLong());
-    LocalFunctionRegistry localFunctionRegistry = Deencapsulation.getField(functionImplementationRegistry, "localFunctionRegistry");
-    assertEquals("Local functionRegistry version should match", 2L, localFunctionRegistry.getVersion());
+    verify(functionImplementationRegistry, times(2)).syncWithRemoteRegistry(anyLong());
+    LocalFunctionRegistry localFunctionRegistry = Deencapsulation.getField(
+        functionImplementationRegistry, "localFunctionRegistry");
+    assertEquals("Sync function registry version should match", 1L, localFunctionRegistry.getVersion());
   }
 
   @Test
@@ -819,7 +858,7 @@ public class TestDynamicUDFSupport extends BaseTestQuery {
       @Override
       public Boolean answer(InvocationOnMock invocation) throws Throwable {
         boolean result = (boolean) invocation.callRealMethod();
-        assertTrue("loadRemoteFunctions() should return true", result);
+        assertTrue("syncWithRemoteRegistry() should return true", result);
         return true;
       }
     })
@@ -827,11 +866,11 @@ public class TestDynamicUDFSupport extends BaseTestQuery {
           @Override
           public Boolean answer(InvocationOnMock invocation) throws Throwable {
             boolean result = (boolean) invocation.callRealMethod();
-            assertFalse("loadRemoteFunctions() should return false", result);
+            assertFalse("syncWithRemoteRegistry() should return false", result);
             return false;
           }
         })
-        .when(functionImplementationRegistry).loadRemoteFunctions(anyLong());
+        .when(functionImplementationRegistry).syncWithRemoteRegistry(anyLong());
 
     test("select custom_lower('A') from (values(1))");
 
@@ -841,9 +880,10 @@ public class TestDynamicUDFSupport extends BaseTestQuery {
       assertThat(e.getMessage(), containsString("No match found for function signature unknown_lower(<CHARACTER>)"));
     }
 
-    verify(functionImplementationRegistry, times(2)).loadRemoteFunctions(anyLong());
-    LocalFunctionRegistry localFunctionRegistry = Deencapsulation.getField(functionImplementationRegistry, "localFunctionRegistry");
-    assertEquals("Local functionRegistry version should match", 2L, localFunctionRegistry.getVersion());
+    verify(functionImplementationRegistry, times(2)).syncWithRemoteRegistry(anyLong());
+    LocalFunctionRegistry localFunctionRegistry = Deencapsulation.getField(
+        functionImplementationRegistry, "localFunctionRegistry");
+    assertEquals("Sync function registry version should match", 1L, localFunctionRegistry.getVersion());
   }
 
   private void copyDefaultJarsToStagingArea() throws IOException {
@@ -866,7 +906,8 @@ public class TestDynamicUDFSupport extends BaseTestQuery {
   }
 
   private RemoteFunctionRegistry spyRemoteFunctionRegistry() {
-    FunctionImplementationRegistry functionImplementationRegistry = getDrillbitContext().getFunctionImplementationRegistry();
+    FunctionImplementationRegistry functionImplementationRegistry =
+        getDrillbitContext().getFunctionImplementationRegistry();
     RemoteFunctionRegistry remoteFunctionRegistry = functionImplementationRegistry.getRemoteFunctionRegistry();
     RemoteFunctionRegistry spy = spy(remoteFunctionRegistry);
     Deencapsulation.setField(functionImplementationRegistry, "remoteFunctionRegistry", spy);

http://git-wip-us.apache.org/repos/asf/drill/blob/dcbcb94f/exec/java-exec/src/test/java/org/apache/drill/exec/coord/zk/TestZookeeperClient.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/coord/zk/TestZookeeperClient.java b/exec/java-exec/src/test/java/org/apache/drill/exec/coord/zk/TestZookeeperClient.java
index ab886c4..88f1fcb 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/coord/zk/TestZookeeperClient.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/coord/zk/TestZookeeperClient.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -22,19 +22,13 @@ import java.util.List;
 import java.util.Map;
 
 import com.google.common.collect.Lists;
-import org.apache.curator.CuratorZookeeperClient;
 import org.apache.curator.RetryPolicy;
 import org.apache.curator.framework.CuratorFramework;
 import org.apache.curator.framework.CuratorFrameworkFactory;
-import org.apache.curator.framework.api.ACLBackgroundPathAndBytesable;
-import org.apache.curator.framework.api.CreateBuilder;
-import org.apache.curator.framework.api.DeleteBuilder;
-import org.apache.curator.framework.api.SetDataBuilder;
 import org.apache.curator.framework.recipes.cache.ChildData;
 import org.apache.curator.framework.recipes.cache.PathChildrenCache;
 import org.apache.curator.retry.RetryNTimes;
 import org.apache.curator.test.TestingServer;
-import org.apache.curator.utils.EnsurePath;
 import org.apache.drill.common.collections.ImmutableEntry;
 import org.apache.drill.common.exceptions.DrillRuntimeException;
 import org.apache.drill.exec.exception.VersionMismatchException;
@@ -47,7 +41,9 @@ import org.junit.Test;
 import org.mockito.Mockito;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
 
 public class TestZookeeperClient {
   private final static String root = "/test";
@@ -132,6 +128,26 @@ public class TestZookeeperClient {
   }
 
   @Test
+  public void testHasPathTrueWithVersion() {
+    client.put(path, data);
+    DataChangeVersion version0 = new DataChangeVersion();
+    assertTrue(client.hasPath(path, true, version0));
+    assertEquals("Versions should match", 0, version0.getVersion());
+    client.put(path, data);
+    DataChangeVersion version1 = new DataChangeVersion();
+    assertTrue(client.hasPath(path, true, version1));
+    assertEquals("Versions should match", 1, version1.getVersion());
+  }
+
+  @Test
+  public void testHasPathFalseWithVersion() {
+    DataChangeVersion version0 = new DataChangeVersion();
+    version0.setVersion(-1);
+    assertFalse(client.hasPath("unknown_path", true, version0));
+    assertEquals("Versions should not have changed", -1, version0.getVersion());
+  }
+
+  @Test
   public void testPutAndGetWorks() {
     client.put(path, data);
     final byte[] actual = client.get(path, true);

http://git-wip-us.apache.org/repos/asf/drill/blob/dcbcb94f/exec/java-exec/src/test/java/org/apache/drill/exec/expr/fn/registry/FunctionRegistryHolderTest.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/expr/fn/registry/FunctionRegistryHolderTest.java b/exec/java-exec/src/test/java/org/apache/drill/exec/expr/fn/registry/FunctionRegistryHolderTest.java
index 61fa4e5..cd4dd99 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/expr/fn/registry/FunctionRegistryHolderTest.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/expr/fn/registry/FunctionRegistryHolderTest.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -60,25 +60,24 @@ public class FunctionRegistryHolderTest {
   @Before
   public void setup() {
     resetRegistry();
-    fillInRegistry();
+    fillInRegistry(1);
   }
 
   @Test
   public void testVersion() {
     resetRegistry();
-    assertEquals("Initial version should be 0", 0, registryHolder.getVersion());
-    registryHolder.addJars(Maps.<String, List<FunctionHolder>>newHashMap());
-    assertEquals("Version should not change if no jars were added.", 0, registryHolder.getVersion());
-    registryHolder.removeJar("unknown.jar");
-    assertEquals("Version should not change if no jars were removed.", 0, registryHolder.getVersion());
-    fillInRegistry();
-    assertEquals("Version should have incremented by 1", 1, registryHolder.getVersion());
+    long expectedVersion = 0;
+    assertEquals("Initial version should be 0", expectedVersion, registryHolder.getVersion());
+    registryHolder.addJars(Maps.<String, List<FunctionHolder>>newHashMap(), ++expectedVersion);
+    assertEquals("Version can change if no jars were added.", expectedVersion, registryHolder.getVersion());
+    fillInRegistry(++expectedVersion);
+    assertEquals("Version should have incremented by 1", expectedVersion, registryHolder.getVersion());
     registryHolder.removeJar(built_in);
-    assertEquals("Version should have incremented by 1", 2, registryHolder.getVersion());
-    fillInRegistry();
-    assertEquals("Version should have incremented by 1", 3, registryHolder.getVersion());
-    fillInRegistry();
-    assertEquals("Version should have incremented by 1", 4, registryHolder.getVersion());
+    assertEquals("Version should have incremented by 1", expectedVersion, registryHolder.getVersion());
+    fillInRegistry(++expectedVersion);
+    assertEquals("Version should have incremented by 1", expectedVersion, registryHolder.getVersion());
+    fillInRegistry(++expectedVersion);
+    assertEquals("Version should have incremented by 1", expectedVersion, registryHolder.getVersion());
   }
 
   @Test
@@ -97,8 +96,9 @@ public class FunctionRegistryHolderTest {
       }
     }
 
-    registryHolder.addJars(newJars);
-    assertEquals("Version number should match", 1, registryHolder.getVersion());
+    long expectedVersion = 0;
+    registryHolder.addJars(newJars, ++expectedVersion);
+    assertEquals("Version number should match", expectedVersion, registryHolder.getVersion());
     compareTwoLists(jars, registryHolder.getAllJarNames());
     assertEquals(functionsSize, registryHolder.functionsSize());
     compareListMultimaps(functionsWithHolders, registryHolder.getAllFunctionsWithHolders());
@@ -120,16 +120,17 @@ public class FunctionRegistryHolderTest {
         functionsSize++;
       }
     }
-    registryHolder.addJars(newJars);
-    assertEquals("Version number should match", 1, registryHolder.getVersion());
+    long expectedVersion = 0;
+    registryHolder.addJars(newJars, ++expectedVersion);
+    assertEquals("Version number should match", expectedVersion, registryHolder.getVersion());
     compareTwoLists(jars, registryHolder.getAllJarNames());
     assertEquals(functionsSize, registryHolder.functionsSize());
     compareListMultimaps(functionsWithHolders, registryHolder.getAllFunctionsWithHolders());
     compareListMultimaps(functionsWithSignatures, registryHolder.getAllFunctionsWithSignatures());
 
     // adding the same jars should not cause adding duplicates, should override existing jars only
-    registryHolder.addJars(newJars);
-    assertEquals("Version number should match", 2, registryHolder.getVersion());
+    registryHolder.addJars(newJars, ++expectedVersion);
+    assertEquals("Version number should match", expectedVersion, registryHolder.getVersion());
     compareTwoLists(jars, registryHolder.getAllJarNames());
     assertEquals(functionsSize, registryHolder.functionsSize());
     compareListMultimaps(functionsWithHolders, registryHolder.getAllFunctionsWithHolders());
@@ -252,8 +253,8 @@ public class FunctionRegistryHolderTest {
     registryHolder = new FunctionRegistryHolder();
   }
 
-  private void fillInRegistry() {
-    registryHolder.addJars(newJars);
+  private void fillInRegistry(long version) {
+    registryHolder.addJars(newJars, version);
   }
 
   private <T> void compareListMultimaps(ListMultimap<String, T> lm1, ListMultimap<String, T> lm2) {

http://git-wip-us.apache.org/repos/asf/drill/blob/dcbcb94f/exec/java-exec/src/test/java/org/apache/drill/exec/record/ExpressionTreeMaterializerTest.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/record/ExpressionTreeMaterializerTest.java b/exec/java-exec/src/test/java/org/apache/drill/exec/record/ExpressionTreeMaterializerTest.java
index 8b338af..2847696 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/record/ExpressionTreeMaterializerTest.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/record/ExpressionTreeMaterializerTest.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -45,7 +45,6 @@ import org.apache.drill.exec.exception.SchemaChangeException;
 import org.apache.drill.exec.expr.ExpressionTreeMaterializer;
 import org.apache.drill.exec.expr.fn.FunctionImplementationRegistry;
 import org.apache.drill.exec.expr.fn.registry.RemoteFunctionRegistry;
-import org.apache.drill.exec.proto.UserBitShared.Registry;
 import org.junit.Test;
 
 import com.google.common.collect.ImmutableList;
@@ -202,8 +201,8 @@ public class ExpressionTreeMaterializerTest extends ExecTest {
 
     new MockUp<RemoteFunctionRegistry>() {
       @Mock
-      Registry getRegistry() {
-        return Registry.getDefaultInstance();
+      long getRegistryVersion() {
+        return 0L;
       }
     };
 

http://git-wip-us.apache.org/repos/asf/drill/blob/dcbcb94f/exec/java-exec/src/test/resources/jars/DrillUDF-overloading-1.0-sources.jar
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/resources/jars/DrillUDF-overloading-1.0-sources.jar b/exec/java-exec/src/test/resources/jars/DrillUDF-overloading-1.0-sources.jar
new file mode 100644
index 0000000..f6b250e
Binary files /dev/null and b/exec/java-exec/src/test/resources/jars/DrillUDF-overloading-1.0-sources.jar differ

http://git-wip-us.apache.org/repos/asf/drill/blob/dcbcb94f/exec/java-exec/src/test/resources/jars/DrillUDF-overloading-1.0.jar
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/resources/jars/DrillUDF-overloading-1.0.jar b/exec/java-exec/src/test/resources/jars/DrillUDF-overloading-1.0.jar
new file mode 100644
index 0000000..4b5ef8b
Binary files /dev/null and b/exec/java-exec/src/test/resources/jars/DrillUDF-overloading-1.0.jar differ


[19/27] drill git commit: DRILL-5284: Roll-up of final fixes for managed sort

Posted by jn...@apache.org.
DRILL-5284: Roll-up of final fixes for managed sort

See subtasks for details.

* Provide detailed, accurate estimate of size consumed by a record batch
* Managed external sort spills too often with Parquet data
* Managed External Sort fails with OOM
* External sort refers to the deprecated HDFS fs.default.name param
* Config param drill.exec.sort.external.batch.size is not used
* NPE in managed external sort while spilling to disk
* External Sort BatchGroup leaks memory if an OOM occurs during read
* DRILL-5294: Under certain low-memory conditions, need to force the sort to merge
two batches to make progress, even though this is a bit more than
comfortably fits into memory.

close #761


Project: http://git-wip-us.apache.org/repos/asf/drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/drill/commit/79811db5
Tree: http://git-wip-us.apache.org/repos/asf/drill/tree/79811db5
Diff: http://git-wip-us.apache.org/repos/asf/drill/diff/79811db5

Branch: refs/heads/master
Commit: 79811db5aa8c7f2cdbe6f74c0a40124bea9fb1fd
Parents: 69de3a1
Author: Paul Rogers <pr...@maprtech.com>
Authored: Fri Feb 24 10:31:25 2017 -0800
Committer: Jinfeng Ni <jn...@apache.org>
Committed: Wed Mar 1 23:15:34 2017 -0800

----------------------------------------------------------------------
 .../org/apache/drill/exec/ExecConstants.java    |   3 +-
 .../impl/sort/SortRecordBatchBuilder.java       |   7 +-
 .../physical/impl/spill/RecordBatchSizer.java   |  97 ++-----
 .../exec/physical/impl/spill/SpillSet.java      |  52 +++-
 .../exec/physical/impl/xsort/BatchGroup.java    |   2 +-
 .../physical/impl/xsort/managed/BatchGroup.java |  30 +-
 .../impl/xsort/managed/ExternalSortBatch.java   | 291 ++++++++++++-------
 .../drill/exec/record/SimpleVectorWrapper.java  |  10 +
 .../codegen/templates/FixedValueVectors.java    | 149 +++++-----
 .../codegen/templates/NullableValueVectors.java |  30 +-
 .../src/main/codegen/templates/UnionVector.java |  16 +
 .../templates/VariableLengthVectors.java        |  19 ++
 .../drill/exec/vector/BaseDataValueVector.java  |   5 +
 .../org/apache/drill/exec/vector/BitVector.java |   6 +
 .../apache/drill/exec/vector/ObjectVector.java  |  12 +
 .../apache/drill/exec/vector/ValueVector.java   |  12 +
 .../drill/exec/vector/VariableWidthVector.java  |   4 +-
 .../apache/drill/exec/vector/ZeroVector.java    |  10 +
 .../exec/vector/complex/AbstractMapVector.java  |  22 +-
 .../vector/complex/BaseRepeatedValueVector.java |  12 +-
 .../drill/exec/vector/complex/ListVector.java   |  10 +
 .../exec/vector/complex/RepeatedListVector.java |  10 +
 .../exec/vector/complex/RepeatedMapVector.java  |   5 +
 23 files changed, 525 insertions(+), 289 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/drill/blob/79811db5/exec/java-exec/src/main/java/org/apache/drill/exec/ExecConstants.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/ExecConstants.java b/exec/java-exec/src/main/java/org/apache/drill/exec/ExecConstants.java
index 460702a..60d6265 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/ExecConstants.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/ExecConstants.java
@@ -66,7 +66,6 @@ public interface ExecConstants {
 
   // External Sort Boot configuration
 
-  String EXTERNAL_SORT_TARGET_BATCH_SIZE = "drill.exec.sort.external.batch.size";
   String EXTERNAL_SORT_TARGET_SPILL_BATCH_SIZE = "drill.exec.sort.external.spill.batch.size";
   String EXTERNAL_SORT_SPILL_GROUP_SIZE = "drill.exec.sort.external.spill.group.size";
   String EXTERNAL_SORT_SPILL_THRESHOLD = "drill.exec.sort.external.spill.threshold";
@@ -79,6 +78,8 @@ public interface ExecConstants {
   String EXTERNAL_SORT_SPILL_BATCH_SIZE = "drill.exec.sort.external.spill.spill_batch_size";
   String EXTERNAL_SORT_MERGE_BATCH_SIZE = "drill.exec.sort.external.spill.merge_batch_size";
   String EXTERNAL_SORT_MAX_MEMORY = "drill.exec.sort.external.mem_limit";
+
+  // Used only by the "unmanaged" sort.
   String EXTERNAL_SORT_BATCH_LIMIT = "drill.exec.sort.external.batch_limit";
 
   // External Sort Runtime options

http://git-wip-us.apache.org/repos/asf/drill/blob/79811db5/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/sort/SortRecordBatchBuilder.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/sort/SortRecordBatchBuilder.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/sort/SortRecordBatchBuilder.java
index 33338dd..d46990f 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/sort/SortRecordBatchBuilder.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/sort/SortRecordBatchBuilder.java
@@ -238,14 +238,15 @@ public class SortRecordBatchBuilder implements AutoCloseable {
   }
 
   /**
-   * For given recordcount how muchmemory does SortRecordBatchBuilder needs for its own purpose. This is used in
+   * For given record count how much memory does SortRecordBatchBuilder needs for its own purpose. This is used in
    * ExternalSortBatch to make decisions about whether to spill or not.
    *
    * @param recordCount
    * @return
    */
   public static long memoryNeeded(int recordCount) {
-    // We need 4 bytes (SV4) for each record.
-    return recordCount * 4;
+    // We need 4 bytes (SV4) for each record. Due to power-of-two allocations, the
+    // backing buffer might be twice this size.
+    return recordCount * 2 * 4;
   }
 }

http://git-wip-us.apache.org/repos/asf/drill/blob/79811db5/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/spill/RecordBatchSizer.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/spill/RecordBatchSizer.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/spill/RecordBatchSizer.java
index 05354e5..22b1b0e 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/spill/RecordBatchSizer.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/spill/RecordBatchSizer.java
@@ -27,14 +27,7 @@ import org.apache.drill.exec.record.MaterializedField;
 import org.apache.drill.exec.record.VectorAccessible;
 import org.apache.drill.exec.record.VectorWrapper;
 import org.apache.drill.exec.record.selection.SelectionVector2;
-import org.apache.drill.exec.vector.BaseDataValueVector;
-import org.apache.drill.exec.vector.FixedWidthVector;
-import org.apache.drill.exec.vector.NullableVarCharVector;
-import org.apache.drill.exec.vector.NullableVector;
 import org.apache.drill.exec.vector.ValueVector;
-import org.apache.drill.exec.vector.VarCharVector;
-
-import io.netty.buffer.DrillBuf;
 
 /**
  * Given a record batch or vector container, determines the actual memory
@@ -42,7 +35,7 @@ import io.netty.buffer.DrillBuf;
  */
 
 public class RecordBatchSizer {
-  private static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(RecordBatchSizer.class);
+//  private static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(RecordBatchSizer.class);
 
   /**
    * Column size information.
@@ -53,23 +46,22 @@ public class RecordBatchSizer {
     /**
      * Assumed size from Drill metadata.
      */
+
     public int stdSize;
+
     /**
      * Actual memory consumed by all the vectors associated with this column.
      */
+
     public int totalSize;
+
     /**
      * Actual average column width as determined from actual memory use. This
      * size is larger than the actual data size since this size includes per-
      * column overhead such as any unused vector space, etc.
      */
-    public int estSize;
 
-    /**
-     * The size of the data vector backing the column. Useful for detecting
-     * cases of possible direct memory fragmentation.
-     */
-    public int dataVectorSize;
+    public int estSize;
     public int capacity;
     public int density;
     public int dataSize;
@@ -86,26 +78,21 @@ public class RecordBatchSizer {
       if (rowCount == 0) {
         return;
       }
-      DrillBuf[] bufs = v.getBuffers(false);
-      for (DrillBuf buf : bufs) {
-        totalSize += buf.capacity();
-      }
+
+      // Total size taken by all vectors (and underlying buffers)
+      // associated with this vector.
+
+      totalSize = v.getAllocatedByteCount();
 
       // Capacity is the number of values that the vector could
       // contain. This is useful only for fixed-length vectors.
 
       capacity = v.getValueCapacity();
 
-      // Crude way to get the size of the buffer underlying simple (scalar) values.
-      // Ignores maps, lists and other esoterica. Uses a crude way to subtract out
-      // the null "bit" (really byte) buffer size for nullable vectors.
+      // The amount of memory consumed by the payload: the actual
+      // data stored in the vectors.
 
-      if (v instanceof BaseDataValueVector) {
-        dataVectorSize = totalSize;
-        if (v instanceof NullableVector) {
-          dataVectorSize -= bufs[0].getActualMemoryConsumed();
-        }
-      }
+      dataSize = v.getPayloadByteCount();
 
       // Determine "density" the number of rows compared to potential
       // capacity. Low-density batches occur at block boundaries, ends
@@ -113,26 +100,9 @@ public class RecordBatchSizer {
       // for Varchar columns because we don't know the actual number of
       // bytes consumed (that information is hidden behind the Varchar
       // implementation where we can't get at it.)
-      //
-      // A better solution is to have each vector do this calc rather
-      // than trying to do it generically, but that increases the code
-      // change footprint and slows the commit process.
-
-      if (v instanceof FixedWidthVector) {
-        dataSize = stdSize * rowCount;
-      } else if ( v instanceof VarCharVector ) {
-        VarCharVector vv = (VarCharVector) v;
-        dataSize = vv.getOffsetVector().getAccessor().get(rowCount);
-      } else if ( v instanceof NullableVarCharVector ) {
-        NullableVarCharVector vv = (NullableVarCharVector) v;
-        dataSize = vv.getValuesVector().getOffsetVector().getAccessor().get(rowCount);
-      } else {
-        dataSize = 0;
-      }
-      if (dataSize > 0) {
-        density = roundUp(dataSize * 100, dataVectorSize);
-        estSize = roundUp(dataSize, rowCount);
-      }
+
+      density = roundUp(dataSize * 100, totalSize);
+      estSize = roundUp(dataSize, rowCount);
     }
 
     @Override
@@ -145,8 +115,6 @@ public class RecordBatchSizer {
       buf.append(estSize);
       buf.append(", total size: ");
       buf.append(totalSize);
-      buf.append(", vector size: ");
-      buf.append(dataVectorSize);
       buf.append(", data size: ");
       buf.append(dataSize);
       buf.append(", row capacity: ");
@@ -187,10 +155,12 @@ public class RecordBatchSizer {
   private int sv2Size;
   private int avgDensity;
 
+  private int netBatchSize;
+
   public RecordBatchSizer(VectorAccessible va) {
     rowCount = va.getRecordCount();
     for (VectorWrapper<?> vw : va) {
-      measureField(vw);
+      measureColumn(vw);
     }
 
     if (rowCount > 0) {
@@ -201,8 +171,8 @@ public class RecordBatchSizer {
     if (hasSv2) {
       @SuppressWarnings("resource")
       SelectionVector2 sv2 = va.getSelectionVector2();
-      sv2Size = sv2.getBuffer().capacity();
-      grossRowWidth += sv2Size;
+      sv2Size = sv2.getBuffer(false).capacity();
+      grossRowWidth += sv2Size / rowCount;
       netRowWidth += 2;
     }
 
@@ -227,12 +197,13 @@ public class RecordBatchSizer {
     totalBatchSize += sv2Size;
   }
 
-  private void measureField(VectorWrapper<?> vw) {
+  private void measureColumn(VectorWrapper<?> vw) {
     ColumnSize colSize = new ColumnSize(vw);
     columnSizes.add(colSize);
 
     stdRowWidth += colSize.stdSize;
     totalBatchSize += colSize.totalSize;
+    netBatchSize += colSize.dataSize;
     netRowWidth += colSize.estSize;
   }
 
@@ -249,27 +220,11 @@ public class RecordBatchSizer {
   public int netRowWidth() { return netRowWidth; }
   public int actualSize() { return totalBatchSize; }
   public boolean hasSv2() { return hasSv2; }
-  public int getAvgDensity() { return avgDensity; }
+  public int avgDensity() { return avgDensity; }
+  public int netSize() { return netBatchSize; }
 
   public static final int MAX_VECTOR_SIZE = 16 * 1024 * 1024; // 16 MiB
 
-  /**
-   * Look for columns backed by vectors larger than the 16 MiB size
-   * employed by the Netty allocator. Such large blocks can lead to
-   * memory fragmentation and unexpected OOM errors.
-   * @return true if any column is oversized
-   */
-  public boolean checkOversizeCols() {
-    boolean hasOversize = false;
-    for (ColumnSize colSize : columnSizes) {
-      if ( colSize.dataVectorSize > MAX_VECTOR_SIZE) {
-        logger.warn( "Column is wider than 256 characters: OOM due to memory fragmentation is possible - " + colSize.metadata.getPath() );
-        hasOversize = true;
-      }
-    }
-    return hasOversize;
-  }
-
   @Override
   public String toString() {
     StringBuilder buf = new StringBuilder();

http://git-wip-us.apache.org/repos/asf/drill/blob/79811db5/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/spill/SpillSet.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/spill/SpillSet.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/spill/SpillSet.java
index 4615500..74e1fb5 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/spill/SpillSet.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/spill/SpillSet.java
@@ -26,6 +26,7 @@ import java.io.IOException;
 import java.io.InputStream;
 import java.io.OutputStream;
 import java.util.Iterator;
+import java.util.List;
 import java.util.Set;
 
 import org.apache.drill.common.config.DrillConfig;
@@ -105,7 +106,7 @@ public class SpillSet {
 
     protected HadoopFileManager(String fsName) {
       Configuration conf = new Configuration();
-      conf.set("fs.default.name", fsName);
+      conf.set(FileSystem.FS_DEFAULT_NAME_KEY, fsName);
       try {
         fs = FileSystem.get(conf);
       } catch (IOException e) {
@@ -169,6 +170,12 @@ public class SpillSet {
     }
   }
 
+  /**
+   * Wrapper around an input stream to collect the total bytes
+   * read through the stream for use in reporting performance
+   * metrics.
+   */
+
   public static class CountingInputStream extends InputStream
   {
     private InputStream in;
@@ -218,6 +225,12 @@ public class SpillSet {
     public long getCount() { return count; }
   }
 
+  /**
+   * Wrapper around an output stream to collect the total bytes
+   * written through the stream for use in reporting performance
+   * metrics.
+   */
+
   public static class CountingOutputStream extends OutputStream {
 
     private OutputStream out;
@@ -333,6 +346,7 @@ public class SpillSet {
    */
 
   private final String spillDirName;
+  private final String spillFileName;
 
   private int fileCount = 0;
 
@@ -343,8 +357,30 @@ public class SpillSet {
   private long writeBytes;
 
   public SpillSet(FragmentContext context, PhysicalOperator popConfig) {
+    this(context, popConfig, null, "spill");
+  }
+
+  public SpillSet(FragmentContext context, PhysicalOperator popConfig,
+                  String opName, String fileName) {
+    FragmentHandle handle = context.getHandle();
     DrillConfig config = context.getConfig();
-    dirs = Iterators.cycle(config.getStringList(ExecConstants.EXTERNAL_SORT_SPILL_DIRS));
+    spillFileName = fileName;
+    List<String> dirList = config.getStringList(ExecConstants.EXTERNAL_SORT_SPILL_DIRS);
+    dirs = Iterators.cycle(dirList);
+
+    // If more than one directory, semi-randomly choose an offset into
+    // the list to avoid overloading the first directory in the list.
+
+    if (dirList.size() > 1) {
+      int hash = handle.getQueryId().hashCode() +
+                 handle.getMajorFragmentId() +
+                 handle.getMinorFragmentId() +
+                 popConfig.getOperatorId();
+      int offset = hash % dirList.size();
+      for (int i = 0; i < offset; i++) {
+        dirs.next();
+      }
+    }
 
     // Use the high-performance local file system if the local file
     // system is selected and impersonation is off. (We use that
@@ -357,9 +393,13 @@ public class SpillSet {
     } else {
       fileManager = new HadoopFileManager(spillFs);
     }
-    FragmentHandle handle = context.getHandle();
-    spillDirName = String.format("%s_major%s_minor%s_op%s", QueryIdHelper.getQueryId(handle.getQueryId()),
-        handle.getMajorFragmentId(), handle.getMinorFragmentId(), popConfig.getOperatorId());
+    spillDirName = String.format(
+        "%s_major%d_minor%d_op%d%s",
+        QueryIdHelper.getQueryId(handle.getQueryId()),
+        handle.getMajorFragmentId(),
+        handle.getMinorFragmentId(),
+        popConfig.getOperatorId(),
+        (opName == null) ? "" : "_" + opName);
   }
 
   public String getNextSpillFile() {
@@ -371,7 +411,7 @@ public class SpillSet {
     String spillDir = dirs.next();
     String currSpillPath = Joiner.on("/").join(spillDir, spillDirName);
     currSpillDirs.add(currSpillPath);
-    String outputFile = Joiner.on("/").join(currSpillPath, "spill" + ++fileCount);
+    String outputFile = Joiner.on("/").join(currSpillPath, spillFileName + ++fileCount);
     try {
         fileManager.deleteOnExit(currSpillPath);
     } catch (IOException e) {

http://git-wip-us.apache.org/repos/asf/drill/blob/79811db5/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/xsort/BatchGroup.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/xsort/BatchGroup.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/xsort/BatchGroup.java
index 0a818ee..13f0dbe 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/xsort/BatchGroup.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/xsort/BatchGroup.java
@@ -113,7 +113,7 @@ public class BatchGroup implements VectorAccessible, AutoCloseable {
     if (schema != null) {
       c = SchemaUtil.coerceContainer(c, schema, context);
     }
-//    logger.debug("Took {} us to read {} records", watch.elapsed(TimeUnit.MICROSECONDS), c.getRecordCount());
+    logger.trace("Took {} us to read {} records", watch.elapsed(TimeUnit.MICROSECONDS), c.getRecordCount());
     spilledBatches--;
     currentContainer.zeroVectors();
     Iterator<VectorWrapper<?>> wrapperIterator = c.iterator();

http://git-wip-us.apache.org/repos/asf/drill/blob/79811db5/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/xsort/managed/BatchGroup.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/xsort/managed/BatchGroup.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/xsort/managed/BatchGroup.java
index cd5cd1f..7ea599c 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/xsort/managed/BatchGroup.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/xsort/managed/BatchGroup.java
@@ -75,17 +75,21 @@ public abstract class BatchGroup implements VectorAccessible, AutoCloseable {
    */
 
   public static class InputBatch extends BatchGroup {
-    private SelectionVector2 sv2;
+    private final SelectionVector2 sv2;
+    private final int dataSize;
 
-    public InputBatch(VectorContainer container, SelectionVector2 sv2, OperatorContext context, long batchSize) {
-      super(container, context, batchSize);
+    public InputBatch(VectorContainer container, SelectionVector2 sv2, OperatorContext context, int dataSize) {
+      super(container, context);
       this.sv2 = sv2;
+      this.dataSize = dataSize;
     }
 
     public SelectionVector2 getSv2() {
       return sv2;
     }
 
+    public int getDataSize() { return dataSize; }
+
     @Override
     public int getRecordCount() {
       if (sv2 != null) {
@@ -148,8 +152,8 @@ public abstract class BatchGroup implements VectorAccessible, AutoCloseable {
     private BufferAllocator allocator;
     private int spilledBatches = 0;
 
-    public SpilledRun(SpillSet spillSet, String path, OperatorContext context, long batchSize) throws IOException {
-      super(null, context, batchSize);
+    public SpilledRun(SpillSet spillSet, String path, OperatorContext context) throws IOException {
+      super(null, context);
       this.spillSet = spillSet;
       this.path = path;
       this.allocator = context.getAllocator();
@@ -275,25 +279,23 @@ public abstract class BatchGroup implements VectorAccessible, AutoCloseable {
       if (outputStream == null) {
         return 0;
       }
-      long posn = spillSet.getPosition(outputStream);
-      spillSet.tallyWriteBytes(posn);
+      long writeSize = spillSet.getPosition(outputStream);
+      spillSet.tallyWriteBytes(writeSize);
       outputStream.close();
       outputStream = null;
-      logger.trace("Summary: Wrote {} bytes to {}", posn, path);
-      return posn;
+      logger.trace("Summary: Wrote {} bytes to {}", writeSize, path);
+      return writeSize;
     }
   }
 
   protected VectorContainer currentContainer;
   protected int pointer = 0;
-  protected OperatorContext context;
+  protected final OperatorContext context;
   protected BatchSchema schema;
-  protected long dataSize;
 
-  public BatchGroup(VectorContainer container, OperatorContext context, long dataSize) {
+  public BatchGroup(VectorContainer container, OperatorContext context) {
     this.currentContainer = container;
     this.context = context;
-    this.dataSize = dataSize;
   }
 
   /**
@@ -348,8 +350,6 @@ public abstract class BatchGroup implements VectorAccessible, AutoCloseable {
     return currentContainer.getRecordCount();
   }
 
-  public long getDataSize() { return dataSize; }
-
   @Override
   public Iterator<VectorWrapper<?>> iterator() {
     return currentContainer.iterator();

http://git-wip-us.apache.org/repos/asf/drill/blob/79811db5/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/xsort/managed/ExternalSortBatch.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/xsort/managed/ExternalSortBatch.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/xsort/managed/ExternalSortBatch.java
index 783865c..a1162a0 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/xsort/managed/ExternalSortBatch.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/xsort/managed/ExternalSortBatch.java
@@ -200,6 +200,7 @@ public class ExternalSortBatch extends AbstractRecordBatch<ExternalSort> {
   public static final String INTERRUPTION_AFTER_SORT = "after-sort";
   public static final String INTERRUPTION_AFTER_SETUP = "after-setup";
   public static final String INTERRUPTION_WHILE_SPILLING = "spilling";
+  public static final String INTERRUPTION_WHILE_MERGING = "merging";
   public static final long DEFAULT_SPILL_BATCH_SIZE = 8L * 1024 * 1024;
   public static final long MIN_SPILL_BATCH_SIZE = 256 * 1024;
 
@@ -219,6 +220,11 @@ public class ExternalSortBatch extends AbstractRecordBatch<ExternalSort> {
 
   private BatchSchema schema;
 
+  /**
+   * Incoming batches buffered in memory prior to spilling
+   * or an in-memory merge.
+   */
+
   private LinkedList<BatchGroup.InputBatch> bufferedBatches = Lists.newLinkedList();
   private LinkedList<BatchGroup.SpilledRun> spilledRuns = Lists.newLinkedList();
   private SelectionVector4 sv4;
@@ -231,6 +237,12 @@ public class ExternalSortBatch extends AbstractRecordBatch<ExternalSort> {
   private int mergeBatchRowCount;
   private int peakNumBatches = -1;
 
+  /**
+   * Maximum memory this operator may use. Usually comes from the
+   * operator definition, but may be overridden by a configuration
+   * parameter for unit testing.
+   */
+
   private long memoryLimit;
 
   /**
@@ -280,28 +292,65 @@ public class ExternalSortBatch extends AbstractRecordBatch<ExternalSort> {
   private long estimatedInputBatchSize;
 
   /**
-   * Maximum number of batches to hold in memory.
-   * (Primarily for testing.)
+   * Maximum number of spilled runs that can be merged in a single pass.
    */
 
-  private int bufferedBatchLimit;
   private int mergeLimit;
+
+  /**
+   * Target size of the first-generation spill files.
+   */
   private long spillFileSize;
+
+  /**
+   * Tracks the minimum amount of remaining memory for use
+   * in populating an operator metric.
+   */
+
   private long minimumBufferSpace;
 
   /**
-   * Minimum memory level before spilling occurs. That is, we can buffer input
-   * batches in memory until we are down to the level given by the spill point.
+   * Maximum memory level before spilling occurs. That is, we can buffer input
+   * batches in memory until we reach the level given by the buffer memory pool.
+   */
+
+  private long bufferMemoryPool;
+
+  /**
+   * Maximum memory that can hold batches during the merge
+   * phase.
    */
 
-  private long spillPoint;
   private long mergeMemoryPool;
+
+  /**
+   * The target size for merge batches sent downstream.
+   */
+
   private long preferredMergeBatchSize;
-  private long bufferMemoryPool;
-  private boolean hasOversizeCols;
+
+  /**
+   * Sum of the total number of bytes read from upstream.
+   * This is the raw memory bytes, not actual data bytes.
+   */
+
   private long totalInputBytes;
-  private Long spillBatchSize;
+
+  /**
+   * The configured size for each spill batch.
+   */
+  private Long preferredSpillBatchSize;
+
+  /**
+   * Tracks the maximum density of input batches. Density is
+   * the amount of actual data / amount of memory consumed.
+   * Low density batches indicate an EOF or something wrong in
+   * an upstream operator because a low-density batch wastes
+   * memory.
+   */
+
   private int maxDensity;
+  private int lastDensity = -1;
 
   /**
    * Estimated number of rows that fit into a single spill batch.
@@ -309,6 +358,13 @@ public class ExternalSortBatch extends AbstractRecordBatch<ExternalSort> {
 
   private int spillBatchRowCount;
 
+  /**
+   * The estimated actual spill batch size which depends on the
+   * details of the data rows for any particular query.
+   */
+
+  private int targetSpillBatchSize;
+
   // WARNING: The enum here is used within this class. But, the members of
   // this enum MUST match those in the (unmanaged) ExternalSortBatch since
   // that is the enum used in the UI to display metrics for the query profile.
@@ -349,7 +405,7 @@ public class ExternalSortBatch extends AbstractRecordBatch<ExternalSort> {
     allocator = oContext.getAllocator();
     opCodeGen = new OperatorCodeGenerator(context, popConfig);
 
-    spillSet = new SpillSet(context, popConfig);
+    spillSet = new SpillSet(context, popConfig, "sort", "run");
     copierHolder = new CopierHolder(context, allocator, opCodeGen);
     configure(context.getConfig());
   }
@@ -368,12 +424,6 @@ public class ExternalSortBatch extends AbstractRecordBatch<ExternalSort> {
       memoryLimit = Math.min(memoryLimit, configLimit);
     }
 
-    // Optional limit on the number of buffered in-memory batches.
-    // 0 means no limit. Used primarily for testing. Must allow at least two
-    // batches or no merging can occur.
-
-    bufferedBatchLimit = getConfigLimit(config, ExecConstants.EXTERNAL_SORT_BATCH_LIMIT, Integer.MAX_VALUE, 2);
-
     // Optional limit on the number of spilled runs to merge in a single
     // pass. Limits the number of open file handles. Must allow at least
     // two batches to merge to make progress.
@@ -392,8 +442,17 @@ public class ExternalSortBatch extends AbstractRecordBatch<ExternalSort> {
     // Set too large and the ratio between memory and input data sizes becomes
     // small. Set too small and disk seek times dominate performance.
 
-    spillBatchSize = config.getBytes(ExecConstants.EXTERNAL_SORT_SPILL_BATCH_SIZE);
-    spillBatchSize = Math.max(spillBatchSize, MIN_SPILL_BATCH_SIZE);
+    preferredSpillBatchSize = config.getBytes(ExecConstants.EXTERNAL_SORT_SPILL_BATCH_SIZE);
+
+    // In low memory, use no more than 1/4 of memory for each spill batch. Ensures we
+    // can merge.
+
+    preferredSpillBatchSize = Math.min(preferredSpillBatchSize, memoryLimit / 4);
+
+    // But, the spill batch should be above some minimum size to prevent complete
+    // thrashing.
+
+    preferredSpillBatchSize = Math.max(preferredSpillBatchSize, MIN_SPILL_BATCH_SIZE);
 
     // Set the target output batch size. Use the maximum size, but only if
     // this represents less than 10% of available memory. Otherwise, use 10%
@@ -401,13 +460,13 @@ public class ExternalSortBatch extends AbstractRecordBatch<ExternalSort> {
     // output batch can contain no fewer than a single record.
 
     preferredMergeBatchSize = config.getBytes(ExecConstants.EXTERNAL_SORT_MERGE_BATCH_SIZE);
-    long maxAllowance = (long) (memoryLimit * MERGE_BATCH_ALLOWANCE);
+    long maxAllowance = (long) (memoryLimit - 2 * preferredSpillBatchSize);
     preferredMergeBatchSize = Math.min(maxAllowance, preferredMergeBatchSize);
     preferredMergeBatchSize = Math.max(preferredMergeBatchSize, MIN_MERGED_BATCH_SIZE);
 
-    logger.debug("Config: memory limit = {}, batch limit = {}, " +
-                 "spill file size = {}, batch size = {}, merge limit = {}, merge batch size = {}",
-                  memoryLimit, bufferedBatchLimit, spillFileSize, spillBatchSize, mergeLimit,
+    logger.debug("Config: memory limit = {}, " +
+                 "spill file size = {}, spill batch size = {}, merge limit = {}, merge batch size = {}",
+                  memoryLimit, spillFileSize, preferredSpillBatchSize, mergeLimit,
                   preferredMergeBatchSize);
   }
 
@@ -513,11 +572,21 @@ public class ExternalSortBatch extends AbstractRecordBatch<ExternalSort> {
 
   private IterOutcome nextOutputBatch() {
     if (resultsIterator.next()) {
+      injector.injectUnchecked(context.getExecutionControls(), INTERRUPTION_WHILE_MERGING);
       return IterOutcome.OK;
     } else {
       logger.trace("Deliver phase complete: Returned {} batches, {} records",
                     resultsIterator.getBatchCount(), resultsIterator.getRecordCount());
       sortState = SortState.DONE;
+
+      // Close the iterator here to release any remaining resources such
+      // as spill files. This is important when a query has a join: the
+      // first branch sort may complete before the second branch starts;
+      // it may be quite a while after returning the last row before the
+      // fragment executor calls this opeator's close method.
+
+      resultsIterator.close();
+      resultsIterator = null;
       return IterOutcome.NONE;
     }
   }
@@ -561,11 +630,11 @@ public class ExternalSortBatch extends AbstractRecordBatch<ExternalSort> {
       // out of memory and that no work as in-flight and thus abandoned.
       // Consider removing this case once resource management is in place.
 
-      logger.debug("received OUT_OF_MEMORY, trying to spill");
+      logger.error("received OUT_OF_MEMORY, trying to spill");
       if (bufferedBatches.size() > 2) {
         spillFromMemory();
       } else {
-        logger.debug("not enough batches to spill, sending OUT_OF_MEMORY downstream");
+        logger.error("not enough batches to spill, sending OUT_OF_MEMORY downstream");
         return IterOutcome.OUT_OF_MEMORY;
       }
       break;
@@ -693,9 +762,7 @@ public class ExternalSortBatch extends AbstractRecordBatch<ExternalSort> {
     // Coerce all existing batches to the new schema.
 
     for (BatchGroup b : bufferedBatches) {
-//      System.out.println("Before: " + allocator.getAllocatedMemory()); // Debug only
       b.setSchema(schema);
-//      System.out.println("After: " + allocator.getAllocatedMemory()); // Debug only
     }
     for (BatchGroup b : spilledRuns) {
       b.setSchema(schema);
@@ -765,12 +832,12 @@ public class ExternalSortBatch extends AbstractRecordBatch<ExternalSort> {
       spillFromMemory();
     }
 
-    // Sanity check. We should now be above the spill point.
+    // Sanity check. We should now be below the buffer memory maximum.
 
     long startMem = allocator.getAllocatedMemory();
-    if (memoryLimit - startMem < spillPoint) {
-      logger.error( "ERROR: Failed to spill below the spill point. Spill point = {}, free memory = {}",
-                    spillPoint, memoryLimit - startMem);
+    if (startMem > bufferMemoryPool) {
+      logger.error( "ERROR: Failed to spill above buffer limit. Buffer pool = {}, memory = {}",
+          bufferMemoryPool, startMem);
     }
 
     // Convert the incoming batch to the agreed-upon schema.
@@ -835,7 +902,7 @@ public class ExternalSortBatch extends AbstractRecordBatch<ExternalSort> {
     RecordBatchData rbd = new RecordBatchData(convertedBatch, allocator);
     try {
       rbd.setSv2(sv2);
-      bufferedBatches.add(new BatchGroup.InputBatch(rbd.getContainer(), rbd.getSv2(), oContext, batchSize));
+      bufferedBatches.add(new BatchGroup.InputBatch(rbd.getContainer(), rbd.getSv2(), oContext, sizer.netSize()));
       if (peakNumBatches < bufferedBatches.size()) {
         peakNumBatches = bufferedBatches.size();
         stats.setLongStat(Metric.PEAK_BATCHES_IN_MEMORY, peakNumBatches);
@@ -857,9 +924,6 @@ public class ExternalSortBatch extends AbstractRecordBatch<ExternalSort> {
   private RecordBatchSizer analyzeIncomingBatch() {
     RecordBatchSizer sizer = new RecordBatchSizer(incoming);
     sizer.applySv2();
-    if (! hasOversizeCols) {
-      hasOversizeCols = sizer.checkOversizeCols();
-    }
     if (inputBatchCount == 0) {
       logger.debug("{}", sizer.toString());
     }
@@ -887,7 +951,7 @@ public class ExternalSortBatch extends AbstractRecordBatch<ExternalSort> {
     long actualBatchSize = sizer.actualSize();
     int actualRecordCount = sizer.rowCount();
 
-    if (actualBatchSize < memoryDelta) {
+    if (actualBatchSize != memoryDelta) {
       logger.debug("Memory delta: {}, actual batch size: {}, Diff: {}",
                    memoryDelta, actualBatchSize, memoryDelta - actualBatchSize);
     }
@@ -905,11 +969,12 @@ public class ExternalSortBatch extends AbstractRecordBatch<ExternalSort> {
     // We actually track the max density seen, and compare to 75% of that since
     // Parquet produces very low density record batches.
 
-    if (sizer.getAvgDensity() < maxDensity * 0.75) {
-      logger.debug("Saw low density batch. Density: {}", sizer.getAvgDensity());
+    if (sizer.avgDensity() < maxDensity * 3 / 4 && sizer.avgDensity() != lastDensity) {
+      logger.trace("Saw low density batch. Density: {}", sizer.avgDensity());
+      lastDensity = sizer.avgDensity();
       return;
     }
-    maxDensity = Math.max(maxDensity, sizer.getAvgDensity());
+    maxDensity = Math.max(maxDensity, sizer.avgDensity());
 
     // We know the batch size and number of records. Use that to estimate
     // the average record size. Since a typical batch has many records,
@@ -934,6 +999,14 @@ public class ExternalSortBatch extends AbstractRecordBatch<ExternalSort> {
     long origInputBatchSize = estimatedInputBatchSize;
     estimatedInputBatchSize = Math.max(estimatedInputBatchSize, actualBatchSize);
 
+    // The row width may end up as zero if all fields are nulls or some
+    // other unusual situation. In this case, assume a width of 10 just
+    // to avoid lots of special case code.
+
+    if (estimatedRowWidth == 0) {
+      estimatedRowWidth = 10;
+    }
+
     // Go no further if nothing changed.
 
     if (estimatedRowWidth == origRowEstimate && estimatedInputBatchSize == origInputBatchSize) {
@@ -948,16 +1021,23 @@ public class ExternalSortBatch extends AbstractRecordBatch<ExternalSort> {
     // spill batches of either 64K records, or as many records as fit into the
     // amount of memory dedicated to each spill batch, whichever is less.
 
-    spillBatchRowCount = (int) Math.max(1, spillBatchSize / estimatedRowWidth);
+    spillBatchRowCount = (int) Math.max(1, preferredSpillBatchSize / estimatedRowWidth / 2);
     spillBatchRowCount = Math.min(spillBatchRowCount, Character.MAX_VALUE);
 
+    // Compute the actual spill batch size which may be larger or smaller
+    // than the preferred size depending on the row width. Double the estimated
+    // memory needs to allow for power-of-two rounding.
+
+    targetSpillBatchSize = spillBatchRowCount * estimatedRowWidth * 2;
+
     // Determine the number of records per batch per merge step. The goal is to
     // merge batches of either 64K records, or as many records as fit into the
     // amount of memory dedicated to each merge batch, whichever is less.
 
-    targetMergeBatchSize = preferredMergeBatchSize;
-    mergeBatchRowCount = (int) Math.max(1, targetMergeBatchSize / estimatedRowWidth);
+    mergeBatchRowCount = (int) Math.max(1, preferredMergeBatchSize / estimatedRowWidth / 2);
     mergeBatchRowCount = Math.min(mergeBatchRowCount, Character.MAX_VALUE);
+    mergeBatchRowCount = Math.max(1,  mergeBatchRowCount);
+    targetMergeBatchSize = mergeBatchRowCount * estimatedRowWidth * 2;
 
     // Determine the minimum memory needed for spilling. Spilling is done just
     // before accepting a batch, so we must spill if we don't have room for a
@@ -965,33 +1045,27 @@ public class ExternalSortBatch extends AbstractRecordBatch<ExternalSort> {
     // by merging the batches already in memory. Double this to allow for power-of-two
     // memory allocations.
 
-    spillPoint = estimatedInputBatchSize + 2 * spillBatchSize;
+    long spillPoint = estimatedInputBatchSize + 2 * targetSpillBatchSize;
 
     // The merge memory pool assumes we can spill all input batches. To make
     // progress, we must have at least two merge batches (same size as an output
     // batch) and one output batch. Again, double to allow for power-of-two
     // allocation and add one for a margin of error.
 
-    int minMergeBatches = 2 * 3 + 1;
-    long minMergeMemory = minMergeBatches * targetMergeBatchSize;
+    long minMergeMemory = 2 * targetSpillBatchSize + targetMergeBatchSize;
 
     // If we are in a low-memory condition, then we might not have room for the
     // default output batch size. In that case, pick a smaller size.
 
-    long minMemory = Math.max(spillPoint, minMergeMemory);
-    if (minMemory > memoryLimit) {
+    if (minMergeMemory > memoryLimit) {
 
-      // Figure out the minimum output batch size based on memory, but can't be
-      // any smaller than the defined minimum.
+      // Figure out the minimum output batch size based on memory,
+      // must hold at least one complete row.
 
-      targetMergeBatchSize = Math.max(MIN_MERGED_BATCH_SIZE, memoryLimit / minMergeBatches);
-
-      // Regardless of anything else, the batch must hold at least one
-      // complete row.
-
-      targetMergeBatchSize = Math.max(estimatedRowWidth, targetMergeBatchSize);
-      spillPoint = estimatedInputBatchSize + 2 * spillBatchSize;
-      minMergeMemory = minMergeBatches * targetMergeBatchSize;
+      long mergeAllowance = memoryLimit - 2 * targetSpillBatchSize;
+      targetMergeBatchSize = Math.max(estimatedRowWidth, mergeAllowance / 2);
+      mergeBatchRowCount = (int) (targetMergeBatchSize / estimatedRowWidth / 2);
+      minMergeMemory = 2 * targetSpillBatchSize + targetMergeBatchSize;
     }
 
     // Determine the minimum total memory we would need to receive two input
@@ -1004,7 +1078,7 @@ public class ExternalSortBatch extends AbstractRecordBatch<ExternalSort> {
     // runs when reading from disk.
 
     bufferMemoryPool = memoryLimit - spillPoint;
-    mergeMemoryPool = Math.max(minMergeMemory,
+    mergeMemoryPool = Math.max(memoryLimit - minMergeMemory,
                                (long) ((memoryLimit - 3 * targetMergeBatchSize) * 0.95));
 
     // Sanity check: if we've been given too little memory to make progress,
@@ -1021,14 +1095,14 @@ public class ExternalSortBatch extends AbstractRecordBatch<ExternalSort> {
     // Log the calculated values. Turn this on if things seem amiss.
     // Message will appear only when the values change.
 
-    logger.debug("Memory Estimates: record size = {} bytes; input batch = {} bytes, {} records; " +
-                  "merge batch size = {} bytes, {} records; " +
-                  "output batch size = {} bytes, {} records; " +
-                  "Available memory: {}, spill point = {}, min. merge memory = {}",
-                estimatedRowWidth, estimatedInputBatchSize, actualRecordCount,
-                spillBatchSize, spillBatchRowCount,
-                targetMergeBatchSize, mergeBatchRowCount,
-                memoryLimit, spillPoint, minMergeMemory);
+    logger.debug("Input Batch Estimates: record size = {} bytes; input batch = {} bytes, {} records",
+                 estimatedRowWidth, estimatedInputBatchSize, actualRecordCount);
+    logger.debug("Merge batch size = {} bytes, {} records; spill file size: {} bytes",
+                 targetSpillBatchSize, spillBatchRowCount, spillFileSize);
+    logger.debug("Output batch size = {} bytes, {} records",
+                 targetMergeBatchSize, mergeBatchRowCount);
+    logger.debug("Available memory: {}, buffer memory = {}, merge memory = {}",
+                 memoryLimit, bufferMemoryPool, mergeMemoryPool);
   }
 
   /**
@@ -1050,14 +1124,7 @@ public class ExternalSortBatch extends AbstractRecordBatch<ExternalSort> {
     // Must spill if we are below the spill point (the amount of memory
     // needed to do the minimal spill.)
 
-    if (allocator.getAllocatedMemory() + incomingSize >= bufferMemoryPool) {
-      return true; }
-
-    // For test purposes, configuration may have set a limit on the number of
-    // batches in memory. Spill if we exceed this limit. (By default the number
-    // of in-memory batches is unlimited.)
-
-    return bufferedBatches.size() > bufferedBatchLimit;
+    return allocator.getAllocatedMemory() + incomingSize >= bufferMemoryPool;
   }
 
   /**
@@ -1068,8 +1135,8 @@ public class ExternalSortBatch extends AbstractRecordBatch<ExternalSort> {
    */
 
   private IterOutcome sortInMemory() {
-    logger.info("Starting in-memory sort. Batches = {}, Records = {}, Memory = {}",
-                bufferedBatches.size(), inputRecordCount, allocator.getAllocatedMemory());
+    logger.debug("Starting in-memory sort. Batches = {}, Records = {}, Memory = {}",
+                 bufferedBatches.size(), inputRecordCount, allocator.getAllocatedMemory());
 
     // Note the difference between how we handle batches here and in the spill/merge
     // case. In the spill/merge case, this class decides on the batch size to send
@@ -1088,8 +1155,8 @@ public class ExternalSortBatch extends AbstractRecordBatch<ExternalSort> {
         sortState = SortState.DONE;
         return IterOutcome.STOP;
       } else {
-        logger.info("Completed in-memory sort. Memory = {}",
-                allocator.getAllocatedMemory());
+        logger.debug("Completed in-memory sort. Memory = {}",
+                     allocator.getAllocatedMemory());
         resultsIterator = memoryMerge;
         memoryMerge = null;
         sortState = SortState.DELIVER;
@@ -1111,9 +1178,9 @@ public class ExternalSortBatch extends AbstractRecordBatch<ExternalSort> {
    */
 
   private IterOutcome mergeSpilledRuns() {
-    logger.info("Starting consolidate phase. Batches = {}, Records = {}, Memory = {}, In-memory batches {}, spilled runs {}",
-                inputBatchCount, inputRecordCount, allocator.getAllocatedMemory(),
-                bufferedBatches.size(), spilledRuns.size());
+    logger.debug("Starting consolidate phase. Batches = {}, Records = {}, Memory = {}, In-memory batches {}, spilled runs {}",
+                 inputBatchCount, inputRecordCount, allocator.getAllocatedMemory(),
+                 bufferedBatches.size(), spilledRuns.size());
 
     // Consolidate batches to a number that can be merged in
     // a single last pass.
@@ -1132,7 +1199,8 @@ public class ExternalSortBatch extends AbstractRecordBatch<ExternalSort> {
     allBatches.addAll(spilledRuns);
     spilledRuns.clear();
 
-    logger.info("Starting merge phase. Runs = {}, Alloc. memory = {}", allBatches.size(), allocator.getAllocatedMemory());
+    logger.debug("Starting merge phase. Runs = {}, Alloc. memory = {}",
+                 allBatches.size(), allocator.getAllocatedMemory());
 
     // Do the final merge as a results iterator.
 
@@ -1153,9 +1221,13 @@ public class ExternalSortBatch extends AbstractRecordBatch<ExternalSort> {
 
     // Can't merge more than will fit into memory at one time.
 
-    int maxMergeWidth = (int) (mergeMemoryPool / targetMergeBatchSize);
+    int maxMergeWidth = (int) (mergeMemoryPool / targetSpillBatchSize);
     maxMergeWidth = Math.min(mergeLimit, maxMergeWidth);
 
+    // But, must merge at least two batches.
+
+    maxMergeWidth = Math.max(maxMergeWidth, 2);
+
     // If we can't fit all batches in memory, must spill any in-memory
     // batches to make room for multiple spill-merge-spill cycles.
 
@@ -1177,7 +1249,7 @@ public class ExternalSortBatch extends AbstractRecordBatch<ExternalSort> {
       // is available, spill some in-memory batches.
 
       long allocated = allocator.getAllocatedMemory();
-      long totalNeeds = spilledRunsCount * targetMergeBatchSize + allocated;
+      long totalNeeds = spilledRunsCount * targetSpillBatchSize + allocated;
       if (totalNeeds > mergeMemoryPool) {
         spillFromMemory();
         return true;
@@ -1231,7 +1303,7 @@ public class ExternalSortBatch extends AbstractRecordBatch<ExternalSort> {
    * This method spills only half the accumulated batches
    * minimizing unnecessary disk writes. The exact count must lie between
    * the minimum and maximum spill counts.
-    */
+   */
 
   private void spillFromMemory() {
 
@@ -1239,30 +1311,29 @@ public class ExternalSortBatch extends AbstractRecordBatch<ExternalSort> {
     // of the desired size. The actual file size might be a bit larger
     // or smaller than the target, which is expected.
 
-    long estSize = 0;
     int spillCount = 0;
+    long spillSize = 0;
     for (InputBatch batch : bufferedBatches) {
-      estSize += batch.getDataSize();
-      if (estSize > spillFileSize) {
-        break; }
+      long batchSize = batch.getDataSize();
+      spillSize += batchSize;
       spillCount++;
+      if (spillSize + batchSize / 2 > spillFileSize) {
+        break; }
     }
 
-    // Should not happen, but just to be sure...
+    // Must always spill at least 2, even if this creates an over-size
+    // spill file. But, if this is a final consolidation, we may have only
+    // a single batch.
 
-    if (spillCount == 0) {
-      return; }
+    spillCount = Math.max(spillCount, 2);
+    spillCount = Math.min(spillCount, bufferedBatches.size());
 
     // Do the actual spill.
 
-    logger.trace("Starting spill from memory. Memory = {}, Buffered batch count = {}, Spill batch count = {}",
-                 allocator.getAllocatedMemory(), bufferedBatches.size(), spillCount);
     mergeAndSpill(bufferedBatches, spillCount);
   }
 
   private void mergeAndSpill(LinkedList<? extends BatchGroup> source, int count) {
-    if (count == 0) {
-      return; }
     spilledRuns.add(doMergeAndSpill(source, count));
   }
 
@@ -1270,13 +1341,8 @@ public class ExternalSortBatch extends AbstractRecordBatch<ExternalSort> {
     List<BatchGroup> batchesToSpill = Lists.newArrayList();
     spillCount = Math.min(batchGroups.size(), spillCount);
     assert spillCount > 0 : "Spill count to mergeAndSpill must not be zero";
-    long spillSize = 0;
     for (int i = 0; i < spillCount; i++) {
-      @SuppressWarnings("resource")
-      BatchGroup batch = batchGroups.pollFirst();
-      assert batch != null : "Encountered a null batch during merge and spill operation";
-      batchesToSpill.add(batch);
-      spillSize += batch.getDataSize();
+      batchesToSpill.add(batchGroups.pollFirst());
     }
 
     // Merge the selected set of matches and write them to the
@@ -1288,8 +1354,11 @@ public class ExternalSortBatch extends AbstractRecordBatch<ExternalSort> {
     BatchGroup.SpilledRun newGroup = null;
     try (AutoCloseable ignored = AutoCloseables.all(batchesToSpill);
          CopierHolder.BatchMerger merger = copierHolder.startMerge(schema, batchesToSpill, spillBatchRowCount)) {
-      logger.trace("Merging and spilling to {}", outputFile);
-      newGroup = new BatchGroup.SpilledRun(spillSet, outputFile, oContext, spillSize);
+      logger.trace("Spilling {} of {} batches, {} rows, memory = {}, write to {}",
+                   batchesToSpill.size(), bufferedBatches.size() + batchesToSpill.size(),
+                   spillBatchRowCount,
+                   allocator.getAllocatedMemory(), outputFile);
+      newGroup = new BatchGroup.SpilledRun(spillSet, outputFile, oContext);
 
       // The copier will merge records from the buffered batches into
       // the outputContainer up to targetRecordCount number of rows.
@@ -1298,8 +1367,7 @@ public class ExternalSortBatch extends AbstractRecordBatch<ExternalSort> {
       while (merger.next()) {
 
         // Add a new batch of records (given by merger.getOutput()) to the spill
-        // file, opening the file if not yet open, and creating the target
-        // directory if it does not yet exist.
+        // file.
         //
         // note that addBatch also clears the merger's output container
 
@@ -1322,7 +1390,7 @@ public class ExternalSortBatch extends AbstractRecordBatch<ExternalSort> {
       // It will release the memory in the close() call.
 
       try {
-        // Rethrow so we can organize how to handle the error.
+        // Rethrow so we can decide how to handle the error.
 
         throw e;
       }
@@ -1444,11 +1512,12 @@ public class ExternalSortBatch extends AbstractRecordBatch<ExternalSort> {
     } catch (RuntimeException e) {
       ex = (ex == null) ? e : ex;
     }
-    try {
-      allocator.close();
-    } catch (RuntimeException e) {
-      ex = (ex == null) ? e : ex;
-    }
+    // Note: allocator is closed by the FragmentManager
+//    try {
+//      allocator.close();
+//    } catch (RuntimeException e) {
+//      ex = (ex == null) ? e : ex;
+//    }
     if (ex != null) {
       throw ex;
     }

http://git-wip-us.apache.org/repos/asf/drill/blob/79811db5/exec/java-exec/src/main/java/org/apache/drill/exec/record/SimpleVectorWrapper.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/record/SimpleVectorWrapper.java b/exec/java-exec/src/main/java/org/apache/drill/exec/record/SimpleVectorWrapper.java
index 49562af..0a9f3d6 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/record/SimpleVectorWrapper.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/record/SimpleVectorWrapper.java
@@ -78,6 +78,7 @@ public class SimpleVectorWrapper<T extends ValueVector> implements VectorWrapper
   }
 
 
+  @SuppressWarnings("resource")
   @Override
   public VectorWrapper<?> getChildWrapper(int[] ids) {
     if (ids.length == 1) {
@@ -108,4 +109,13 @@ public class SimpleVectorWrapper<T extends ValueVector> implements VectorWrapper
     vector.makeTransferPair(((SimpleVectorWrapper<?>)destination).vector).transfer();
   }
 
+  @Override
+  public String toString() {
+    if (vector == null) {
+      return "null";
+    } else {
+      return vector.toString();
+    }
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/drill/blob/79811db5/exec/vector/src/main/codegen/templates/FixedValueVectors.java
----------------------------------------------------------------------
diff --git a/exec/vector/src/main/codegen/templates/FixedValueVectors.java b/exec/vector/src/main/codegen/templates/FixedValueVectors.java
index eb0d616..b2a5dc3 100644
--- a/exec/vector/src/main/codegen/templates/FixedValueVectors.java
+++ b/exec/vector/src/main/codegen/templates/FixedValueVectors.java
@@ -69,7 +69,7 @@ public final class ${minor.class}Vector extends BaseDataValueVector implements F
 
   @Override
   public int getValueCapacity(){
-    return (int) (data.capacity() *1.0 / ${type.width});
+    return data.capacity() / ${type.width};
   }
 
   @Override
@@ -196,7 +196,7 @@ public final class ${minor.class}Vector extends BaseDataValueVector implements F
     data = buffer.slice(0, actualLength);
     data.retain(1);
     data.writerIndex(actualLength);
-    }
+  }
 
   public TransferPair getTransferPair(BufferAllocator allocator){
     return new TransferImpl(getField(), allocator);
@@ -227,6 +227,11 @@ public final class ${minor.class}Vector extends BaseDataValueVector implements F
     target.data.writerIndex(sliceLength);
   }
 
+  @Override
+  public int getPayloadByteCount() {
+    return getAccessor().getValueCount() * ${type.width};
+  }
+
   private class TransferImpl implements TransferPair{
     private ${minor.class}Vector to;
 
@@ -390,7 +395,6 @@ public final class ${minor.class}Vector extends BaseDataValueVector implements F
       return p.plusDays(days).plusMillis(millis);
     }
 
-
     public StringBuilder getAsStringBuilder(int index) {
       final int offsetIndex = index * ${type.width};
 
@@ -539,6 +543,7 @@ public final class ${minor.class}Vector extends BaseDataValueVector implements F
     public ${friendlyType} getObject(int index) {
       return get(index);
     }
+
     public ${minor.javaType!type.javaType} getPrimitiveObject(int index) {
       return get(index);
     }
@@ -557,9 +562,7 @@ public final class ${minor.class}Vector extends BaseDataValueVector implements F
       holder.isSet = 1;
       holder.value = data.get${(minor.javaType!type.javaType)?cap_first}(index * ${type.width});
     }
-
-
-   </#if> <#-- type.width -->
+    </#if> <#-- type.width -->
  }
 
  /**
@@ -728,84 +731,84 @@ public final class ${minor.class}Vector extends BaseDataValueVector implements F
    }
 
    <#else> <#-- type.width <= 8 -->
-   public void set(int index, <#if (type.width >= 4)>${minor.javaType!type.javaType}<#else>int</#if> value) {
-     data.set${(minor.javaType!type.javaType)?cap_first}(index * ${type.width}, value);
-   }
+    public void set(int index, <#if (type.width >= 4)>${minor.javaType!type.javaType}<#else>int</#if> value) {
+      data.set${(minor.javaType!type.javaType)?cap_first}(index * ${type.width}, value);
+    }
 
    public void setSafe(int index, <#if (type.width >= 4)>${minor.javaType!type.javaType}<#else>int</#if> value) {
      while(index >= getValueCapacity()) {
-       reAlloc();
-     }
-     set(index, value);
-   }
-
-   protected void set(int index, ${minor.class}Holder holder){
-     data.set${(minor.javaType!type.javaType)?cap_first}(index * ${type.width}, holder.value);
-   }
+        reAlloc();
+      }
+      set(index, value);
+    }
 
-   public void setSafe(int index, ${minor.class}Holder holder){
-     while(index >= getValueCapacity()) {
-       reAlloc();
-     }
-     set(index, holder);
-   }
+    protected void set(int index, ${minor.class}Holder holder){
+      data.set${(minor.javaType!type.javaType)?cap_first}(index * ${type.width}, holder.value);
+    }
 
-   protected void set(int index, Nullable${minor.class}Holder holder){
-     data.set${(minor.javaType!type.javaType)?cap_first}(index * ${type.width}, holder.value);
-   }
+    public void setSafe(int index, ${minor.class}Holder holder){
+      while(index >= getValueCapacity()) {
+        reAlloc();
+      }
+      set(index, holder);
+    }
 
-   public void setSafe(int index, Nullable${minor.class}Holder holder){
-     while(index >= getValueCapacity()) {
-       reAlloc();
-     }
-     set(index, holder);
-   }
+    protected void set(int index, Nullable${minor.class}Holder holder){
+      data.set${(minor.javaType!type.javaType)?cap_first}(index * ${type.width}, holder.value);
+    }
 
-   @Override
-   public void generateTestData(int size) {
-     setValueCount(size);
-     boolean even = true;
-     final int valueCount = getAccessor().getValueCount();
-     for(int i = 0; i < valueCount; i++, even = !even) {
-       if(even){
-         set(i, ${minor.boxedType!type.boxedType}.MIN_VALUE);
-       }else{
-         set(i, ${minor.boxedType!type.boxedType}.MAX_VALUE);
-       }
-     }
-   }
+    public void setSafe(int index, Nullable${minor.class}Holder holder){
+      while(index >= getValueCapacity()) {
+        reAlloc();
+      }
+      set(index, holder);
+    }
 
-   public void generateTestDataAlt(int size) {
-     setValueCount(size);
-     boolean even = true;
-     final int valueCount = getAccessor().getValueCount();
-     for(int i = 0; i < valueCount; i++, even = !even) {
-       if(even){
-         set(i, (${(minor.javaType!type.javaType)}) 1);
-       }else{
-         set(i, (${(minor.javaType!type.javaType)}) 0);
-       }
-     }
-   }
+    @Override
+    public void generateTestData(int size) {
+      setValueCount(size);
+      boolean even = true;
+      final int valueCount = getAccessor().getValueCount();
+      for(int i = 0; i < valueCount; i++, even = !even) {
+        if(even) {
+          set(i, ${minor.boxedType!type.boxedType}.MIN_VALUE);
+        } else {
+          set(i, ${minor.boxedType!type.boxedType}.MAX_VALUE);
+        }
+      }
+    }
+
+    public void generateTestDataAlt(int size) {
+      setValueCount(size);
+      boolean even = true;
+      final int valueCount = getAccessor().getValueCount();
+      for(int i = 0; i < valueCount; i++, even = !even) {
+        if(even) {
+          set(i, (${(minor.javaType!type.javaType)}) 1);
+        } else {
+          set(i, (${(minor.javaType!type.javaType)}) 0);
+        }
+      }
+    }
 
   </#if> <#-- type.width -->
 
-   @Override
-   public void setValueCount(int valueCount) {
-     final int currentValueCapacity = getValueCapacity();
-     final int idx = (${type.width} * valueCount);
-     while(valueCount > getValueCapacity()) {
-       reAlloc();
-     }
-     if (valueCount > 0 && currentValueCapacity > valueCount * 2) {
-       incrementAllocationMonitor();
-     } else if (allocationMonitor > 0) {
-       allocationMonitor = 0;
-     }
-     VectorTrimmer.trim(data, idx);
-     data.writerIndex(valueCount * ${type.width});
-   }
- }
+    @Override
+    public void setValueCount(int valueCount) {
+      final int currentValueCapacity = getValueCapacity();
+      final int idx = (${type.width} * valueCount);
+      while(valueCount > getValueCapacity()) {
+        reAlloc();
+      }
+      if (valueCount > 0 && currentValueCapacity > valueCount * 2) {
+        incrementAllocationMonitor();
+      } else if (allocationMonitor > 0) {
+        allocationMonitor = 0;
+      }
+      VectorTrimmer.trim(data, idx);
+      data.writerIndex(valueCount * ${type.width});
+    }
+  }
 }
 
 </#if> <#-- type.major -->

http://git-wip-us.apache.org/repos/asf/drill/blob/79811db5/exec/vector/src/main/codegen/templates/NullableValueVectors.java
----------------------------------------------------------------------
diff --git a/exec/vector/src/main/codegen/templates/NullableValueVectors.java b/exec/vector/src/main/codegen/templates/NullableValueVectors.java
index 6c0a16b..b242728 100644
--- a/exec/vector/src/main/codegen/templates/NullableValueVectors.java
+++ b/exec/vector/src/main/codegen/templates/NullableValueVectors.java
@@ -45,12 +45,24 @@ package org.apache.drill.exec.vector;
  * NB: this class is automatically generated from ${.template_name} and ValueVectorTypes.tdd using FreeMarker.
  */
 @SuppressWarnings("unused")
-public final class ${className} extends BaseDataValueVector implements <#if type.major == "VarLen">VariableWidth<#else>FixedWidth</#if>Vector, NullableVector{
+public final class ${className} extends BaseDataValueVector implements <#if type.major == "VarLen">VariableWidth<#else>FixedWidth</#if>Vector, NullableVector {
   private static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(${className}.class);
 
   private final FieldReader reader = new Nullable${minor.class}ReaderImpl(Nullable${minor.class}Vector.this);
 
   private final MaterializedField bitsField = MaterializedField.create("$bits$", Types.required(MinorType.UINT1));
+
+  /**
+   * Set value flag. Meaning:
+   * <ul>
+   * <li>0: value is not set (value is null).</li>
+   * <li>1: value is set (value is not null).</li>
+   * </ul>
+   * That is, a 1 means that the values vector has a value. 0
+   * means that the vector is null. Thus, all values start as
+   * not set (null) and must be explicitly set (made not null).
+   */
+
   private final UInt1Vector bits = new UInt1Vector(bitsField, allocator);
   private final ${valuesName} values = new ${minor.class}Vector(field, allocator);
 
@@ -108,8 +120,8 @@ public final class ${className} extends BaseDataValueVector implements <#if type
       return 0;
     }
 
-    return values.getBufferSizeFor(valueCount)
-        + bits.getBufferSizeFor(valueCount);
+    return values.getBufferSizeFor(valueCount) +
+           bits.getBufferSizeFor(valueCount);
   }
 
   @Override
@@ -163,6 +175,18 @@ public final class ${className} extends BaseDataValueVector implements <#if type
     return success;
   }
 
+  @Override
+  public int getAllocatedByteCount() {
+    return bits.getAllocatedByteCount() + values.getAllocatedByteCount();
+  }
+
+  @Override
+  public int getPayloadByteCount() {
+    // For nullable, we include all values, null or not, in computing
+    // the value length.
+    return bits.getPayloadByteCount() + values.getPayloadByteCount();
+  }
+
   <#if type.major == "VarLen">
   @Override
   public void allocateNew(int totalBytes, int valueCount) {

http://git-wip-us.apache.org/repos/asf/drill/blob/79811db5/exec/vector/src/main/codegen/templates/UnionVector.java
----------------------------------------------------------------------
diff --git a/exec/vector/src/main/codegen/templates/UnionVector.java b/exec/vector/src/main/codegen/templates/UnionVector.java
index f80bb25..93854e7 100644
--- a/exec/vector/src/main/codegen/templates/UnionVector.java
+++ b/exec/vector/src/main/codegen/templates/UnionVector.java
@@ -202,6 +202,22 @@ public class UnionVector implements ValueVector {
   }
 
   @Override
+  public int getAllocatedByteCount() {
+    // Most vectors are held inside the internal map.
+
+    int count = internalMap.getAllocatedByteCount();
+    if (bit != null) {
+      count += bit.getAllocatedByteCount();
+    }
+    return count;
+  }
+
+  @Override
+  public int getPayloadByteCount() {
+    return internalMap.getPayloadByteCount();
+  }
+
+  @Override
   public TransferPair getTransferPair(BufferAllocator allocator) {
     return new TransferImpl(field, allocator);
   }

http://git-wip-us.apache.org/repos/asf/drill/blob/79811db5/exec/vector/src/main/codegen/templates/VariableLengthVectors.java
----------------------------------------------------------------------
diff --git a/exec/vector/src/main/codegen/templates/VariableLengthVectors.java b/exec/vector/src/main/codegen/templates/VariableLengthVectors.java
index 27432d2..ea3c9de 100644
--- a/exec/vector/src/main/codegen/templates/VariableLengthVectors.java
+++ b/exec/vector/src/main/codegen/templates/VariableLengthVectors.java
@@ -238,6 +238,25 @@ public final class ${minor.class}Vector extends BaseDataValueVector implements V
     return true;
   }
 
+  @Override
+  public int getAllocatedByteCount() {
+    return offsetVector.getAllocatedByteCount() + super.getAllocatedByteCount();
+  }
+
+  @Override
+  public int getPayloadByteCount() {
+    UInt${type.width}Vector.Accessor a = offsetVector.getAccessor();
+    int count = a.getValueCount();
+    if (count == 0) {
+      return 0;
+    } else {
+      // If 1 or more values, then the last value is set to
+      // the offset of the next value, which is the same as
+      // the length of existing values.
+      return a.get(count-1);
+    }
+  }
+
   private class TransferImpl implements TransferPair{
     ${minor.class}Vector to;
 

http://git-wip-us.apache.org/repos/asf/drill/blob/79811db5/exec/vector/src/main/java/org/apache/drill/exec/vector/BaseDataValueVector.java
----------------------------------------------------------------------
diff --git a/exec/vector/src/main/java/org/apache/drill/exec/vector/BaseDataValueVector.java b/exec/vector/src/main/java/org/apache/drill/exec/vector/BaseDataValueVector.java
index f812209..4def5b8 100644
--- a/exec/vector/src/main/java/org/apache/drill/exec/vector/BaseDataValueVector.java
+++ b/exec/vector/src/main/java/org/apache/drill/exec/vector/BaseDataValueVector.java
@@ -87,4 +87,9 @@ public abstract class BaseDataValueVector extends BaseValueVector {
    * the value vector. The purpose is to move the value vector to a "mutate" state
    */
   public void reset() {}
+
+  @Override
+  public int getAllocatedByteCount() {
+    return data.capacity();
+  }
 }

http://git-wip-us.apache.org/repos/asf/drill/blob/79811db5/exec/vector/src/main/java/org/apache/drill/exec/vector/BitVector.java
----------------------------------------------------------------------
diff --git a/exec/vector/src/main/java/org/apache/drill/exec/vector/BitVector.java b/exec/vector/src/main/java/org/apache/drill/exec/vector/BitVector.java
index 2b22f52..a6c0cea 100644
--- a/exec/vector/src/main/java/org/apache/drill/exec/vector/BitVector.java
+++ b/exec/vector/src/main/java/org/apache/drill/exec/vector/BitVector.java
@@ -449,4 +449,10 @@ public final class BitVector extends BaseDataValueVector implements FixedWidthVe
     this.valueCount = 0;
     super.clear();
   }
+
+  @Override
+  public int getPayloadByteCount() {
+    // One byte per value
+    return valueCount;
+  }
 }

http://git-wip-us.apache.org/repos/asf/drill/blob/79811db5/exec/vector/src/main/java/org/apache/drill/exec/vector/ObjectVector.java
----------------------------------------------------------------------
diff --git a/exec/vector/src/main/java/org/apache/drill/exec/vector/ObjectVector.java b/exec/vector/src/main/java/org/apache/drill/exec/vector/ObjectVector.java
index 4479db0..f69dc98 100644
--- a/exec/vector/src/main/java/org/apache/drill/exec/vector/ObjectVector.java
+++ b/exec/vector/src/main/java/org/apache/drill/exec/vector/ObjectVector.java
@@ -218,4 +218,16 @@ public class ObjectVector extends BaseValueVector {
       holder.obj = getObject(index);
     }
   }
+
+  @Override
+  public int getAllocatedByteCount() {
+    // Values not stored in direct memory?
+    return 0;
+  }
+
+  @Override
+  public int getPayloadByteCount() {
+    // Values not stored in direct memory?
+    return 0;
+  }
 }

http://git-wip-us.apache.org/repos/asf/drill/blob/79811db5/exec/vector/src/main/java/org/apache/drill/exec/vector/ValueVector.java
----------------------------------------------------------------------
diff --git a/exec/vector/src/main/java/org/apache/drill/exec/vector/ValueVector.java b/exec/vector/src/main/java/org/apache/drill/exec/vector/ValueVector.java
index 47cf143..f4c7935 100644
--- a/exec/vector/src/main/java/org/apache/drill/exec/vector/ValueVector.java
+++ b/exec/vector/src/main/java/org/apache/drill/exec/vector/ValueVector.java
@@ -176,6 +176,18 @@ public interface ValueVector extends Closeable, Iterable<ValueVector> {
   void load(SerializedField metadata, DrillBuf buffer);
 
   /**
+   * Return the total memory consumed by all buffers within this vector.
+   */
+
+  int getAllocatedByteCount();
+
+  /**
+   * Return the number of value bytes consumed by actual data.
+   */
+
+  int getPayloadByteCount();
+
+  /**
    * An abstraction that is used to read from this vector instance.
    */
   interface Accessor {

http://git-wip-us.apache.org/repos/asf/drill/blob/79811db5/exec/vector/src/main/java/org/apache/drill/exec/vector/VariableWidthVector.java
----------------------------------------------------------------------
diff --git a/exec/vector/src/main/java/org/apache/drill/exec/vector/VariableWidthVector.java b/exec/vector/src/main/java/org/apache/drill/exec/vector/VariableWidthVector.java
index ee9c039..d04234c 100644
--- a/exec/vector/src/main/java/org/apache/drill/exec/vector/VariableWidthVector.java
+++ b/exec/vector/src/main/java/org/apache/drill/exec/vector/VariableWidthVector.java
@@ -17,9 +17,7 @@
  */
 package org.apache.drill.exec.vector;
 
-import io.netty.buffer.DrillBuf;
-
-public interface VariableWidthVector extends ValueVector{
+public interface VariableWidthVector extends ValueVector {
 
   /**
    * Allocate a new memory space for this vector.  Must be called prior to using the ValueVector.

http://git-wip-us.apache.org/repos/asf/drill/blob/79811db5/exec/vector/src/main/java/org/apache/drill/exec/vector/ZeroVector.java
----------------------------------------------------------------------
diff --git a/exec/vector/src/main/java/org/apache/drill/exec/vector/ZeroVector.java b/exec/vector/src/main/java/org/apache/drill/exec/vector/ZeroVector.java
index 3f40d4c..9181f20 100644
--- a/exec/vector/src/main/java/org/apache/drill/exec/vector/ZeroVector.java
+++ b/exec/vector/src/main/java/org/apache/drill/exec/vector/ZeroVector.java
@@ -176,4 +176,14 @@ public class ZeroVector implements ValueVector {
 
   @Override
   public void load(UserBitShared.SerializedField metadata, DrillBuf buffer) { }
+
+  @Override
+  public int getAllocatedByteCount() {
+    return 0;
+  }
+
+  @Override
+  public int getPayloadByteCount() {
+    return 0;
+  }
 }

http://git-wip-us.apache.org/repos/asf/drill/blob/79811db5/exec/vector/src/main/java/org/apache/drill/exec/vector/complex/AbstractMapVector.java
----------------------------------------------------------------------
diff --git a/exec/vector/src/main/java/org/apache/drill/exec/vector/complex/AbstractMapVector.java b/exec/vector/src/main/java/org/apache/drill/exec/vector/complex/AbstractMapVector.java
index 08952ab..baba086 100644
--- a/exec/vector/src/main/java/org/apache/drill/exec/vector/complex/AbstractMapVector.java
+++ b/exec/vector/src/main/java/org/apache/drill/exec/vector/complex/AbstractMapVector.java
@@ -266,7 +266,7 @@ public abstract class AbstractMapVector extends AbstractContainerVector {
 
   @Override
   public int getBufferSize() {
-    int actualBufSize = 0 ;
+    int actualBufSize = 0;
 
     for (final ValueVector v : vectors.values()) {
       for (final DrillBuf buf : v.getBuffers(false)) {
@@ -275,4 +275,24 @@ public abstract class AbstractMapVector extends AbstractContainerVector {
     }
     return actualBufSize;
   }
+
+  @Override
+  public int getAllocatedByteCount() {
+    int count = 0;
+
+    for (final ValueVector v : vectors.values()) {
+      count += v.getAllocatedByteCount();
+    }
+    return count;
+  }
+
+  @Override
+  public int getPayloadByteCount() {
+    int count = 0;
+
+    for (final ValueVector v : vectors.values()) {
+      count += v.getPayloadByteCount();
+    }
+    return count;
+  }
 }

http://git-wip-us.apache.org/repos/asf/drill/blob/79811db5/exec/vector/src/main/java/org/apache/drill/exec/vector/complex/BaseRepeatedValueVector.java
----------------------------------------------------------------------
diff --git a/exec/vector/src/main/java/org/apache/drill/exec/vector/complex/BaseRepeatedValueVector.java b/exec/vector/src/main/java/org/apache/drill/exec/vector/complex/BaseRepeatedValueVector.java
index bc90eda..1664b0a 100644
--- a/exec/vector/src/main/java/org/apache/drill/exec/vector/complex/BaseRepeatedValueVector.java
+++ b/exec/vector/src/main/java/org/apache/drill/exec/vector/complex/BaseRepeatedValueVector.java
@@ -209,6 +209,17 @@ public abstract class BaseRepeatedValueVector extends BaseValueVector implements
     vector = v;
   }
 
+
+  @Override
+  public int getAllocatedByteCount() {
+    return offsets.getAllocatedByteCount() + vector.getAllocatedByteCount();
+  }
+
+  @Override
+  public int getPayloadByteCount() {
+    return offsets.getPayloadByteCount() + vector.getPayloadByteCount();
+  }
+
   public abstract class BaseRepeatedAccessor extends BaseValueVector.BaseAccessor implements RepeatedAccessor {
 
     @Override
@@ -256,5 +267,4 @@ public abstract class BaseRepeatedValueVector extends BaseValueVector implements
       vector.getMutator().setValueCount(childValueCount);
     }
   }
-
 }

http://git-wip-us.apache.org/repos/asf/drill/blob/79811db5/exec/vector/src/main/java/org/apache/drill/exec/vector/complex/ListVector.java
----------------------------------------------------------------------
diff --git a/exec/vector/src/main/java/org/apache/drill/exec/vector/complex/ListVector.java b/exec/vector/src/main/java/org/apache/drill/exec/vector/complex/ListVector.java
index 33d6ddc..f71baa7 100644
--- a/exec/vector/src/main/java/org/apache/drill/exec/vector/complex/ListVector.java
+++ b/exec/vector/src/main/java/org/apache/drill/exec/vector/complex/ListVector.java
@@ -317,4 +317,14 @@ public class ListVector extends BaseRepeatedValueVector {
       bits.getMutator().setValueCount(valueCount);
     }
   }
+
+  @Override
+  public int getAllocatedByteCount() {
+    return offsets.getAllocatedByteCount() + bits.getAllocatedByteCount() + super.getAllocatedByteCount();
+  }
+
+  @Override
+  public int getPayloadByteCount() {
+    return offsets.getPayloadByteCount() + bits.getPayloadByteCount() + super.getPayloadByteCount();
+  }
 }

http://git-wip-us.apache.org/repos/asf/drill/blob/79811db5/exec/vector/src/main/java/org/apache/drill/exec/vector/complex/RepeatedListVector.java
----------------------------------------------------------------------
diff --git a/exec/vector/src/main/java/org/apache/drill/exec/vector/complex/RepeatedListVector.java b/exec/vector/src/main/java/org/apache/drill/exec/vector/complex/RepeatedListVector.java
index 0cc3628..b5c97bf 100644
--- a/exec/vector/src/main/java/org/apache/drill/exec/vector/complex/RepeatedListVector.java
+++ b/exec/vector/src/main/java/org/apache/drill/exec/vector/complex/RepeatedListVector.java
@@ -426,4 +426,14 @@ public class RepeatedListVector extends AbstractContainerVector
   public void copyFromSafe(int fromIndex, int thisIndex, RepeatedListVector from) {
     delegate.copyFromSafe(fromIndex, thisIndex, from.delegate);
   }
+
+  @Override
+  public int getAllocatedByteCount() {
+    return delegate.getAllocatedByteCount();
+  }
+
+  @Override
+  public int getPayloadByteCount() {
+    return delegate.getPayloadByteCount();
+  }
 }

http://git-wip-us.apache.org/repos/asf/drill/blob/79811db5/exec/vector/src/main/java/org/apache/drill/exec/vector/complex/RepeatedMapVector.java
----------------------------------------------------------------------
diff --git a/exec/vector/src/main/java/org/apache/drill/exec/vector/complex/RepeatedMapVector.java b/exec/vector/src/main/java/org/apache/drill/exec/vector/complex/RepeatedMapVector.java
index 94cf4a6..3707ff0 100644
--- a/exec/vector/src/main/java/org/apache/drill/exec/vector/complex/RepeatedMapVector.java
+++ b/exec/vector/src/main/java/org/apache/drill/exec/vector/complex/RepeatedMapVector.java
@@ -584,4 +584,9 @@ public class RepeatedMapVector extends AbstractMapVector
       vector.clear();
     }
   }
+
+  @Override
+  public int getAllocatedByteCount() {
+    return super.getAllocatedByteCount( ) + offsets.getAllocatedByteCount();
+  }
 }


[14/27] drill git commit: DRILL-5301: Server metadata API

Posted by jn...@apache.org.
http://git-wip-us.apache.org/repos/asf/drill/blob/d2e0f415/protocol/src/main/java/org/apache/drill/exec/proto/SchemaUserProtos.java
----------------------------------------------------------------------
diff --git a/protocol/src/main/java/org/apache/drill/exec/proto/SchemaUserProtos.java b/protocol/src/main/java/org/apache/drill/exec/proto/SchemaUserProtos.java
index dd8c684..4bb2de7 100644
--- a/protocol/src/main/java/org/apache/drill/exec/proto/SchemaUserProtos.java
+++ b/protocol/src/main/java/org/apache/drill/exec/proto/SchemaUserProtos.java
@@ -3603,6 +3603,806 @@ public final class SchemaUserProtos
         }
     }
 
+    public static final class GetServerMetaReq
+    {
+        public static final org.apache.drill.exec.proto.SchemaUserProtos.GetServerMetaReq.MessageSchema WRITE =
+            new org.apache.drill.exec.proto.SchemaUserProtos.GetServerMetaReq.MessageSchema();
+        public static final org.apache.drill.exec.proto.SchemaUserProtos.GetServerMetaReq.BuilderSchema MERGE =
+            new org.apache.drill.exec.proto.SchemaUserProtos.GetServerMetaReq.BuilderSchema();
+        
+        public static class MessageSchema implements com.dyuproject.protostuff.Schema<org.apache.drill.exec.proto.UserProtos.GetServerMetaReq>
+        {
+            public void writeTo(com.dyuproject.protostuff.Output output, org.apache.drill.exec.proto.UserProtos.GetServerMetaReq message) throws java.io.IOException
+            {
+            }
+            public boolean isInitialized(org.apache.drill.exec.proto.UserProtos.GetServerMetaReq message)
+            {
+                return message.isInitialized();
+            }
+            public java.lang.String getFieldName(int number)
+            {
+                return org.apache.drill.exec.proto.SchemaUserProtos.GetServerMetaReq.getFieldName(number);
+            }
+            public int getFieldNumber(java.lang.String name)
+            {
+                return org.apache.drill.exec.proto.SchemaUserProtos.GetServerMetaReq.getFieldNumber(name);
+            }
+            public java.lang.Class<org.apache.drill.exec.proto.UserProtos.GetServerMetaReq> typeClass()
+            {
+                return org.apache.drill.exec.proto.UserProtos.GetServerMetaReq.class;
+            }
+            public java.lang.String messageName()
+            {
+                return org.apache.drill.exec.proto.UserProtos.GetServerMetaReq.class.getSimpleName();
+            }
+            public java.lang.String messageFullName()
+            {
+                return org.apache.drill.exec.proto.UserProtos.GetServerMetaReq.class.getName();
+            }
+            //unused
+            public void mergeFrom(com.dyuproject.protostuff.Input input, org.apache.drill.exec.proto.UserProtos.GetServerMetaReq message) throws java.io.IOException {}
+            public org.apache.drill.exec.proto.UserProtos.GetServerMetaReq newMessage() { return null; }
+        }
+        public static class BuilderSchema implements com.dyuproject.protostuff.Schema<org.apache.drill.exec.proto.UserProtos.GetServerMetaReq.Builder>
+        {
+            public void mergeFrom(com.dyuproject.protostuff.Input input, org.apache.drill.exec.proto.UserProtos.GetServerMetaReq.Builder builder) throws java.io.IOException
+            {
+                for(int number = input.readFieldNumber(this);; number = input.readFieldNumber(this))
+                {
+                    switch(number)
+                    {
+                        case 0:
+                            return;
+                        default:
+                            input.handleUnknownField(number, this);
+                    }
+                }
+            }
+            public boolean isInitialized(org.apache.drill.exec.proto.UserProtos.GetServerMetaReq.Builder builder)
+            {
+                return builder.isInitialized();
+            }
+            public org.apache.drill.exec.proto.UserProtos.GetServerMetaReq.Builder newMessage()
+            {
+                return org.apache.drill.exec.proto.UserProtos.GetServerMetaReq.newBuilder();
+            }
+            public java.lang.String getFieldName(int number)
+            {
+                return org.apache.drill.exec.proto.SchemaUserProtos.GetServerMetaReq.getFieldName(number);
+            }
+            public int getFieldNumber(java.lang.String name)
+            {
+                return org.apache.drill.exec.proto.SchemaUserProtos.GetServerMetaReq.getFieldNumber(name);
+            }
+            public java.lang.Class<org.apache.drill.exec.proto.UserProtos.GetServerMetaReq.Builder> typeClass()
+            {
+                return org.apache.drill.exec.proto.UserProtos.GetServerMetaReq.Builder.class;
+            }
+            public java.lang.String messageName()
+            {
+                return org.apache.drill.exec.proto.UserProtos.GetServerMetaReq.class.getSimpleName();
+            }
+            public java.lang.String messageFullName()
+            {
+                return org.apache.drill.exec.proto.UserProtos.GetServerMetaReq.class.getName();
+            }
+            //unused
+            public void writeTo(com.dyuproject.protostuff.Output output, org.apache.drill.exec.proto.UserProtos.GetServerMetaReq.Builder builder) throws java.io.IOException {}
+        }
+        public static java.lang.String getFieldName(int number)
+        {
+            switch(number)
+            {
+                default: return null;
+            }
+        }
+        public static int getFieldNumber(java.lang.String name)
+        {
+            java.lang.Integer number = fieldMap.get(name);
+            return number == null ? 0 : number.intValue();
+        }
+        private static final java.util.HashMap<java.lang.String,java.lang.Integer> fieldMap = new java.util.HashMap<java.lang.String,java.lang.Integer>();
+        static
+        {
+        }
+    }
+
+    public static final class ConvertSupport
+    {
+        public static final org.apache.drill.exec.proto.SchemaUserProtos.ConvertSupport.MessageSchema WRITE =
+            new org.apache.drill.exec.proto.SchemaUserProtos.ConvertSupport.MessageSchema();
+        public static final org.apache.drill.exec.proto.SchemaUserProtos.ConvertSupport.BuilderSchema MERGE =
+            new org.apache.drill.exec.proto.SchemaUserProtos.ConvertSupport.BuilderSchema();
+        
+        public static class MessageSchema implements com.dyuproject.protostuff.Schema<org.apache.drill.exec.proto.UserProtos.ConvertSupport>
+        {
+            public void writeTo(com.dyuproject.protostuff.Output output, org.apache.drill.exec.proto.UserProtos.ConvertSupport message) throws java.io.IOException
+            {
+                if(message.hasFrom())
+                    output.writeEnum(1, message.getFrom().getNumber(), false);
+                if(message.hasTo())
+                    output.writeEnum(2, message.getTo().getNumber(), false);
+            }
+            public boolean isInitialized(org.apache.drill.exec.proto.UserProtos.ConvertSupport message)
+            {
+                return message.isInitialized();
+            }
+            public java.lang.String getFieldName(int number)
+            {
+                return org.apache.drill.exec.proto.SchemaUserProtos.ConvertSupport.getFieldName(number);
+            }
+            public int getFieldNumber(java.lang.String name)
+            {
+                return org.apache.drill.exec.proto.SchemaUserProtos.ConvertSupport.getFieldNumber(name);
+            }
+            public java.lang.Class<org.apache.drill.exec.proto.UserProtos.ConvertSupport> typeClass()
+            {
+                return org.apache.drill.exec.proto.UserProtos.ConvertSupport.class;
+            }
+            public java.lang.String messageName()
+            {
+                return org.apache.drill.exec.proto.UserProtos.ConvertSupport.class.getSimpleName();
+            }
+            public java.lang.String messageFullName()
+            {
+                return org.apache.drill.exec.proto.UserProtos.ConvertSupport.class.getName();
+            }
+            //unused
+            public void mergeFrom(com.dyuproject.protostuff.Input input, org.apache.drill.exec.proto.UserProtos.ConvertSupport message) throws java.io.IOException {}
+            public org.apache.drill.exec.proto.UserProtos.ConvertSupport newMessage() { return null; }
+        }
+        public static class BuilderSchema implements com.dyuproject.protostuff.Schema<org.apache.drill.exec.proto.UserProtos.ConvertSupport.Builder>
+        {
+            public void mergeFrom(com.dyuproject.protostuff.Input input, org.apache.drill.exec.proto.UserProtos.ConvertSupport.Builder builder) throws java.io.IOException
+            {
+                for(int number = input.readFieldNumber(this);; number = input.readFieldNumber(this))
+                {
+                    switch(number)
+                    {
+                        case 0:
+                            return;
+                        case 1:
+                            builder.setFrom(org.apache.drill.common.types.TypeProtos.MinorType.valueOf(input.readEnum()));
+                            break;
+                        case 2:
+                            builder.setTo(org.apache.drill.common.types.TypeProtos.MinorType.valueOf(input.readEnum()));
+                            break;
+                        default:
+                            input.handleUnknownField(number, this);
+                    }
+                }
+            }
+            public boolean isInitialized(org.apache.drill.exec.proto.UserProtos.ConvertSupport.Builder builder)
+            {
+                return builder.isInitialized();
+            }
+            public org.apache.drill.exec.proto.UserProtos.ConvertSupport.Builder newMessage()
+            {
+                return org.apache.drill.exec.proto.UserProtos.ConvertSupport.newBuilder();
+            }
+            public java.lang.String getFieldName(int number)
+            {
+                return org.apache.drill.exec.proto.SchemaUserProtos.ConvertSupport.getFieldName(number);
+            }
+            public int getFieldNumber(java.lang.String name)
+            {
+                return org.apache.drill.exec.proto.SchemaUserProtos.ConvertSupport.getFieldNumber(name);
+            }
+            public java.lang.Class<org.apache.drill.exec.proto.UserProtos.ConvertSupport.Builder> typeClass()
+            {
+                return org.apache.drill.exec.proto.UserProtos.ConvertSupport.Builder.class;
+            }
+            public java.lang.String messageName()
+            {
+                return org.apache.drill.exec.proto.UserProtos.ConvertSupport.class.getSimpleName();
+            }
+            public java.lang.String messageFullName()
+            {
+                return org.apache.drill.exec.proto.UserProtos.ConvertSupport.class.getName();
+            }
+            //unused
+            public void writeTo(com.dyuproject.protostuff.Output output, org.apache.drill.exec.proto.UserProtos.ConvertSupport.Builder builder) throws java.io.IOException {}
+        }
+        public static java.lang.String getFieldName(int number)
+        {
+            switch(number)
+            {
+                case 1: return "from";
+                case 2: return "to";
+                default: return null;
+            }
+        }
+        public static int getFieldNumber(java.lang.String name)
+        {
+            java.lang.Integer number = fieldMap.get(name);
+            return number == null ? 0 : number.intValue();
+        }
+        private static final java.util.HashMap<java.lang.String,java.lang.Integer> fieldMap = new java.util.HashMap<java.lang.String,java.lang.Integer>();
+        static
+        {
+            fieldMap.put("from", 1);
+            fieldMap.put("to", 2);
+        }
+    }
+
+    public static final class GetServerMetaResp
+    {
+        public static final org.apache.drill.exec.proto.SchemaUserProtos.GetServerMetaResp.MessageSchema WRITE =
+            new org.apache.drill.exec.proto.SchemaUserProtos.GetServerMetaResp.MessageSchema();
+        public static final org.apache.drill.exec.proto.SchemaUserProtos.GetServerMetaResp.BuilderSchema MERGE =
+            new org.apache.drill.exec.proto.SchemaUserProtos.GetServerMetaResp.BuilderSchema();
+        
+        public static class MessageSchema implements com.dyuproject.protostuff.Schema<org.apache.drill.exec.proto.UserProtos.GetServerMetaResp>
+        {
+            public void writeTo(com.dyuproject.protostuff.Output output, org.apache.drill.exec.proto.UserProtos.GetServerMetaResp message) throws java.io.IOException
+            {
+                if(message.hasStatus())
+                    output.writeEnum(1, message.getStatus().getNumber(), false);
+                if(message.hasServerMeta())
+                    output.writeObject(2, message.getServerMeta(), org.apache.drill.exec.proto.SchemaUserProtos.ServerMeta.WRITE, false);
+
+                if(message.hasError())
+                    output.writeObject(3, message.getError(), org.apache.drill.exec.proto.SchemaUserBitShared.DrillPBError.WRITE, false);
+
+            }
+            public boolean isInitialized(org.apache.drill.exec.proto.UserProtos.GetServerMetaResp message)
+            {
+                return message.isInitialized();
+            }
+            public java.lang.String getFieldName(int number)
+            {
+                return org.apache.drill.exec.proto.SchemaUserProtos.GetServerMetaResp.getFieldName(number);
+            }
+            public int getFieldNumber(java.lang.String name)
+            {
+                return org.apache.drill.exec.proto.SchemaUserProtos.GetServerMetaResp.getFieldNumber(name);
+            }
+            public java.lang.Class<org.apache.drill.exec.proto.UserProtos.GetServerMetaResp> typeClass()
+            {
+                return org.apache.drill.exec.proto.UserProtos.GetServerMetaResp.class;
+            }
+            public java.lang.String messageName()
+            {
+                return org.apache.drill.exec.proto.UserProtos.GetServerMetaResp.class.getSimpleName();
+            }
+            public java.lang.String messageFullName()
+            {
+                return org.apache.drill.exec.proto.UserProtos.GetServerMetaResp.class.getName();
+            }
+            //unused
+            public void mergeFrom(com.dyuproject.protostuff.Input input, org.apache.drill.exec.proto.UserProtos.GetServerMetaResp message) throws java.io.IOException {}
+            public org.apache.drill.exec.proto.UserProtos.GetServerMetaResp newMessage() { return null; }
+        }
+        public static class BuilderSchema implements com.dyuproject.protostuff.Schema<org.apache.drill.exec.proto.UserProtos.GetServerMetaResp.Builder>
+        {
+            public void mergeFrom(com.dyuproject.protostuff.Input input, org.apache.drill.exec.proto.UserProtos.GetServerMetaResp.Builder builder) throws java.io.IOException
+            {
+                for(int number = input.readFieldNumber(this);; number = input.readFieldNumber(this))
+                {
+                    switch(number)
+                    {
+                        case 0:
+                            return;
+                        case 1:
+                            builder.setStatus(org.apache.drill.exec.proto.UserProtos.RequestStatus.valueOf(input.readEnum()));
+                            break;
+                        case 2:
+                            builder.setServerMeta(input.mergeObject(org.apache.drill.exec.proto.UserProtos.ServerMeta.newBuilder(), org.apache.drill.exec.proto.SchemaUserProtos.ServerMeta.MERGE));
+
+                            break;
+                        case 3:
+                            builder.setError(input.mergeObject(org.apache.drill.exec.proto.UserBitShared.DrillPBError.newBuilder(), org.apache.drill.exec.proto.SchemaUserBitShared.DrillPBError.MERGE));
+
+                            break;
+                        default:
+                            input.handleUnknownField(number, this);
+                    }
+                }
+            }
+            public boolean isInitialized(org.apache.drill.exec.proto.UserProtos.GetServerMetaResp.Builder builder)
+            {
+                return builder.isInitialized();
+            }
+            public org.apache.drill.exec.proto.UserProtos.GetServerMetaResp.Builder newMessage()
+            {
+                return org.apache.drill.exec.proto.UserProtos.GetServerMetaResp.newBuilder();
+            }
+            public java.lang.String getFieldName(int number)
+            {
+                return org.apache.drill.exec.proto.SchemaUserProtos.GetServerMetaResp.getFieldName(number);
+            }
+            public int getFieldNumber(java.lang.String name)
+            {
+                return org.apache.drill.exec.proto.SchemaUserProtos.GetServerMetaResp.getFieldNumber(name);
+            }
+            public java.lang.Class<org.apache.drill.exec.proto.UserProtos.GetServerMetaResp.Builder> typeClass()
+            {
+                return org.apache.drill.exec.proto.UserProtos.GetServerMetaResp.Builder.class;
+            }
+            public java.lang.String messageName()
+            {
+                return org.apache.drill.exec.proto.UserProtos.GetServerMetaResp.class.getSimpleName();
+            }
+            public java.lang.String messageFullName()
+            {
+                return org.apache.drill.exec.proto.UserProtos.GetServerMetaResp.class.getName();
+            }
+            //unused
+            public void writeTo(com.dyuproject.protostuff.Output output, org.apache.drill.exec.proto.UserProtos.GetServerMetaResp.Builder builder) throws java.io.IOException {}
+        }
+        public static java.lang.String getFieldName(int number)
+        {
+            switch(number)
+            {
+                case 1: return "status";
+                case 2: return "serverMeta";
+                case 3: return "error";
+                default: return null;
+            }
+        }
+        public static int getFieldNumber(java.lang.String name)
+        {
+            java.lang.Integer number = fieldMap.get(name);
+            return number == null ? 0 : number.intValue();
+        }
+        private static final java.util.HashMap<java.lang.String,java.lang.Integer> fieldMap = new java.util.HashMap<java.lang.String,java.lang.Integer>();
+        static
+        {
+            fieldMap.put("status", 1);
+            fieldMap.put("serverMeta", 2);
+            fieldMap.put("error", 3);
+        }
+    }
+
+    public static final class ServerMeta
+    {
+        public static final org.apache.drill.exec.proto.SchemaUserProtos.ServerMeta.MessageSchema WRITE =
+            new org.apache.drill.exec.proto.SchemaUserProtos.ServerMeta.MessageSchema();
+        public static final org.apache.drill.exec.proto.SchemaUserProtos.ServerMeta.BuilderSchema MERGE =
+            new org.apache.drill.exec.proto.SchemaUserProtos.ServerMeta.BuilderSchema();
+        
+        public static class MessageSchema implements com.dyuproject.protostuff.Schema<org.apache.drill.exec.proto.UserProtos.ServerMeta>
+        {
+            public void writeTo(com.dyuproject.protostuff.Output output, org.apache.drill.exec.proto.UserProtos.ServerMeta message) throws java.io.IOException
+            {
+                if(message.hasAllTablesSelectable())
+                    output.writeBool(1, message.getAllTablesSelectable(), false);
+                if(message.hasBlobIncludedInMaxRowSize())
+                    output.writeBool(2, message.getBlobIncludedInMaxRowSize(), false);
+                if(message.hasCatalogAtStart())
+                    output.writeBool(3, message.getCatalogAtStart(), false);
+                if(message.hasCatalogSeparator())
+                    output.writeString(4, message.getCatalogSeparator(), false);
+                if(message.hasCatalogTerm())
+                    output.writeString(5, message.getCatalogTerm(), false);
+                for(org.apache.drill.exec.proto.UserProtos.CollateSupport collateSupport : message.getCollateSupportList())
+                    output.writeEnum(6, collateSupport.getNumber(), true);
+                if(message.hasColumnAliasingSupported())
+                    output.writeBool(7, message.getColumnAliasingSupported(), false);
+                for(org.apache.drill.exec.proto.UserProtos.ConvertSupport convertSupport : message.getConvertSupportList())
+                    output.writeObject(8, convertSupport, org.apache.drill.exec.proto.SchemaUserProtos.ConvertSupport.WRITE, true);
+
+                if(message.hasCorrelationNamesSupport())
+                    output.writeEnum(9, message.getCorrelationNamesSupport().getNumber(), false);
+                for(String dateTimeFunctions : message.getDateTimeFunctionsList())
+                    output.writeString(10, dateTimeFunctions, true);
+                for(org.apache.drill.exec.proto.UserProtos.DateTimeLiteralsSupport dateTimeLiteralsSupport : message.getDateTimeLiteralsSupportList())
+                    output.writeEnum(11, dateTimeLiteralsSupport.getNumber(), true);
+                if(message.hasGroupBySupport())
+                    output.writeEnum(12, message.getGroupBySupport().getNumber(), false);
+                if(message.hasIdentifierCasing())
+                    output.writeEnum(13, message.getIdentifierCasing().getNumber(), false);
+                if(message.hasIdentifierQuoteString())
+                    output.writeString(14, message.getIdentifierQuoteString(), false);
+                if(message.hasLikeEscapeClauseSupported())
+                    output.writeBool(15, message.getLikeEscapeClauseSupported(), false);
+                if(message.hasMaxBinaryLiteralLength())
+                    output.writeUInt32(16, message.getMaxBinaryLiteralLength(), false);
+                if(message.hasMaxCatalogNameLength())
+                    output.writeUInt32(17, message.getMaxCatalogNameLength(), false);
+                if(message.hasMaxCharLiteralLength())
+                    output.writeUInt32(18, message.getMaxCharLiteralLength(), false);
+                if(message.hasMaxColumnNameLength())
+                    output.writeUInt32(19, message.getMaxColumnNameLength(), false);
+                if(message.hasMaxColumnsInGroupBy())
+                    output.writeUInt32(20, message.getMaxColumnsInGroupBy(), false);
+                if(message.hasMaxColumnsInOrderBy())
+                    output.writeUInt32(21, message.getMaxColumnsInOrderBy(), false);
+                if(message.hasMaxColumnsInSelect())
+                    output.writeUInt32(22, message.getMaxColumnsInSelect(), false);
+                if(message.hasMaxCursorNameLength())
+                    output.writeUInt32(23, message.getMaxCursorNameLength(), false);
+                if(message.hasMaxLogicalLobSize())
+                    output.writeUInt32(24, message.getMaxLogicalLobSize(), false);
+                if(message.hasMaxRowSize())
+                    output.writeUInt32(25, message.getMaxRowSize(), false);
+                if(message.hasMaxSchemaNameLength())
+                    output.writeUInt32(26, message.getMaxSchemaNameLength(), false);
+                if(message.hasMaxStatementLength())
+                    output.writeUInt32(27, message.getMaxStatementLength(), false);
+                if(message.hasMaxStatements())
+                    output.writeUInt32(28, message.getMaxStatements(), false);
+                if(message.hasMaxTableNameLength())
+                    output.writeUInt32(29, message.getMaxTableNameLength(), false);
+                if(message.hasMaxTablesInSelect())
+                    output.writeUInt32(30, message.getMaxTablesInSelect(), false);
+                if(message.hasMaxUserNameLength())
+                    output.writeUInt32(31, message.getMaxUserNameLength(), false);
+                if(message.hasNullCollation())
+                    output.writeEnum(32, message.getNullCollation().getNumber(), false);
+                if(message.hasNullPlusNonNullEqualsNull())
+                    output.writeBool(33, message.getNullPlusNonNullEqualsNull(), false);
+                for(String numericFunctions : message.getNumericFunctionsList())
+                    output.writeString(34, numericFunctions, true);
+                for(org.apache.drill.exec.proto.UserProtos.OrderBySupport orderBySupport : message.getOrderBySupportList())
+                    output.writeEnum(35, orderBySupport.getNumber(), true);
+                for(org.apache.drill.exec.proto.UserProtos.OuterJoinSupport outerJoinSupport : message.getOuterJoinSupportList())
+                    output.writeEnum(36, outerJoinSupport.getNumber(), true);
+                if(message.hasQuotedIdentifierCasing())
+                    output.writeEnum(37, message.getQuotedIdentifierCasing().getNumber(), false);
+                if(message.hasReadOnly())
+                    output.writeBool(38, message.getReadOnly(), false);
+                if(message.hasSchemaTerm())
+                    output.writeString(39, message.getSchemaTerm(), false);
+                if(message.hasSearchEscapeString())
+                    output.writeString(40, message.getSearchEscapeString(), false);
+                if(message.hasSelectForUpdateSupported())
+                    output.writeBool(41, message.getSelectForUpdateSupported(), false);
+                if(message.hasSpecialCharacters())
+                    output.writeString(42, message.getSpecialCharacters(), false);
+                for(String sqlKeywords : message.getSqlKeywordsList())
+                    output.writeString(43, sqlKeywords, true);
+                for(String stringFunctions : message.getStringFunctionsList())
+                    output.writeString(44, stringFunctions, true);
+                for(org.apache.drill.exec.proto.UserProtos.SubQuerySupport subquerySupport : message.getSubquerySupportList())
+                    output.writeEnum(45, subquerySupport.getNumber(), true);
+                for(String systemFunctions : message.getSystemFunctionsList())
+                    output.writeString(46, systemFunctions, true);
+                if(message.hasTableTerm())
+                    output.writeString(47, message.getTableTerm(), false);
+                if(message.hasTransactionSupported())
+                    output.writeBool(48, message.getTransactionSupported(), false);
+                for(org.apache.drill.exec.proto.UserProtos.UnionSupport unionSupport : message.getUnionSupportList())
+                    output.writeEnum(49, unionSupport.getNumber(), true);
+            }
+            public boolean isInitialized(org.apache.drill.exec.proto.UserProtos.ServerMeta message)
+            {
+                return message.isInitialized();
+            }
+            public java.lang.String getFieldName(int number)
+            {
+                return org.apache.drill.exec.proto.SchemaUserProtos.ServerMeta.getFieldName(number);
+            }
+            public int getFieldNumber(java.lang.String name)
+            {
+                return org.apache.drill.exec.proto.SchemaUserProtos.ServerMeta.getFieldNumber(name);
+            }
+            public java.lang.Class<org.apache.drill.exec.proto.UserProtos.ServerMeta> typeClass()
+            {
+                return org.apache.drill.exec.proto.UserProtos.ServerMeta.class;
+            }
+            public java.lang.String messageName()
+            {
+                return org.apache.drill.exec.proto.UserProtos.ServerMeta.class.getSimpleName();
+            }
+            public java.lang.String messageFullName()
+            {
+                return org.apache.drill.exec.proto.UserProtos.ServerMeta.class.getName();
+            }
+            //unused
+            public void mergeFrom(com.dyuproject.protostuff.Input input, org.apache.drill.exec.proto.UserProtos.ServerMeta message) throws java.io.IOException {}
+            public org.apache.drill.exec.proto.UserProtos.ServerMeta newMessage() { return null; }
+        }
+        public static class BuilderSchema implements com.dyuproject.protostuff.Schema<org.apache.drill.exec.proto.UserProtos.ServerMeta.Builder>
+        {
+            public void mergeFrom(com.dyuproject.protostuff.Input input, org.apache.drill.exec.proto.UserProtos.ServerMeta.Builder builder) throws java.io.IOException
+            {
+                for(int number = input.readFieldNumber(this);; number = input.readFieldNumber(this))
+                {
+                    switch(number)
+                    {
+                        case 0:
+                            return;
+                        case 1:
+                            builder.setAllTablesSelectable(input.readBool());
+                            break;
+                        case 2:
+                            builder.setBlobIncludedInMaxRowSize(input.readBool());
+                            break;
+                        case 3:
+                            builder.setCatalogAtStart(input.readBool());
+                            break;
+                        case 4:
+                            builder.setCatalogSeparator(input.readString());
+                            break;
+                        case 5:
+                            builder.setCatalogTerm(input.readString());
+                            break;
+                        case 6:
+                            builder.addCollateSupport(org.apache.drill.exec.proto.UserProtos.CollateSupport.valueOf(input.readEnum()));
+                            break;
+                        case 7:
+                            builder.setColumnAliasingSupported(input.readBool());
+                            break;
+                        case 8:
+                            builder.addConvertSupport(input.mergeObject(org.apache.drill.exec.proto.UserProtos.ConvertSupport.newBuilder(), org.apache.drill.exec.proto.SchemaUserProtos.ConvertSupport.MERGE));
+
+                            break;
+                        case 9:
+                            builder.setCorrelationNamesSupport(org.apache.drill.exec.proto.UserProtos.CorrelationNamesSupport.valueOf(input.readEnum()));
+                            break;
+                        case 10:
+                            builder.addDateTimeFunctions(input.readString());
+                            break;
+                        case 11:
+                            builder.addDateTimeLiteralsSupport(org.apache.drill.exec.proto.UserProtos.DateTimeLiteralsSupport.valueOf(input.readEnum()));
+                            break;
+                        case 12:
+                            builder.setGroupBySupport(org.apache.drill.exec.proto.UserProtos.GroupBySupport.valueOf(input.readEnum()));
+                            break;
+                        case 13:
+                            builder.setIdentifierCasing(org.apache.drill.exec.proto.UserProtos.IdentifierCasing.valueOf(input.readEnum()));
+                            break;
+                        case 14:
+                            builder.setIdentifierQuoteString(input.readString());
+                            break;
+                        case 15:
+                            builder.setLikeEscapeClauseSupported(input.readBool());
+                            break;
+                        case 16:
+                            builder.setMaxBinaryLiteralLength(input.readUInt32());
+                            break;
+                        case 17:
+                            builder.setMaxCatalogNameLength(input.readUInt32());
+                            break;
+                        case 18:
+                            builder.setMaxCharLiteralLength(input.readUInt32());
+                            break;
+                        case 19:
+                            builder.setMaxColumnNameLength(input.readUInt32());
+                            break;
+                        case 20:
+                            builder.setMaxColumnsInGroupBy(input.readUInt32());
+                            break;
+                        case 21:
+                            builder.setMaxColumnsInOrderBy(input.readUInt32());
+                            break;
+                        case 22:
+                            builder.setMaxColumnsInSelect(input.readUInt32());
+                            break;
+                        case 23:
+                            builder.setMaxCursorNameLength(input.readUInt32());
+                            break;
+                        case 24:
+                            builder.setMaxLogicalLobSize(input.readUInt32());
+                            break;
+                        case 25:
+                            builder.setMaxRowSize(input.readUInt32());
+                            break;
+                        case 26:
+                            builder.setMaxSchemaNameLength(input.readUInt32());
+                            break;
+                        case 27:
+                            builder.setMaxStatementLength(input.readUInt32());
+                            break;
+                        case 28:
+                            builder.setMaxStatements(input.readUInt32());
+                            break;
+                        case 29:
+                            builder.setMaxTableNameLength(input.readUInt32());
+                            break;
+                        case 30:
+                            builder.setMaxTablesInSelect(input.readUInt32());
+                            break;
+                        case 31:
+                            builder.setMaxUserNameLength(input.readUInt32());
+                            break;
+                        case 32:
+                            builder.setNullCollation(org.apache.drill.exec.proto.UserProtos.NullCollation.valueOf(input.readEnum()));
+                            break;
+                        case 33:
+                            builder.setNullPlusNonNullEqualsNull(input.readBool());
+                            break;
+                        case 34:
+                            builder.addNumericFunctions(input.readString());
+                            break;
+                        case 35:
+                            builder.addOrderBySupport(org.apache.drill.exec.proto.UserProtos.OrderBySupport.valueOf(input.readEnum()));
+                            break;
+                        case 36:
+                            builder.addOuterJoinSupport(org.apache.drill.exec.proto.UserProtos.OuterJoinSupport.valueOf(input.readEnum()));
+                            break;
+                        case 37:
+                            builder.setQuotedIdentifierCasing(org.apache.drill.exec.proto.UserProtos.IdentifierCasing.valueOf(input.readEnum()));
+                            break;
+                        case 38:
+                            builder.setReadOnly(input.readBool());
+                            break;
+                        case 39:
+                            builder.setSchemaTerm(input.readString());
+                            break;
+                        case 40:
+                            builder.setSearchEscapeString(input.readString());
+                            break;
+                        case 41:
+                            builder.setSelectForUpdateSupported(input.readBool());
+                            break;
+                        case 42:
+                            builder.setSpecialCharacters(input.readString());
+                            break;
+                        case 43:
+                            builder.addSqlKeywords(input.readString());
+                            break;
+                        case 44:
+                            builder.addStringFunctions(input.readString());
+                            break;
+                        case 45:
+                            builder.addSubquerySupport(org.apache.drill.exec.proto.UserProtos.SubQuerySupport.valueOf(input.readEnum()));
+                            break;
+                        case 46:
+                            builder.addSystemFunctions(input.readString());
+                            break;
+                        case 47:
+                            builder.setTableTerm(input.readString());
+                            break;
+                        case 48:
+                            builder.setTransactionSupported(input.readBool());
+                            break;
+                        case 49:
+                            builder.addUnionSupport(org.apache.drill.exec.proto.UserProtos.UnionSupport.valueOf(input.readEnum()));
+                            break;
+                        default:
+                            input.handleUnknownField(number, this);
+                    }
+                }
+            }
+            public boolean isInitialized(org.apache.drill.exec.proto.UserProtos.ServerMeta.Builder builder)
+            {
+                return builder.isInitialized();
+            }
+            public org.apache.drill.exec.proto.UserProtos.ServerMeta.Builder newMessage()
+            {
+                return org.apache.drill.exec.proto.UserProtos.ServerMeta.newBuilder();
+            }
+            public java.lang.String getFieldName(int number)
+            {
+                return org.apache.drill.exec.proto.SchemaUserProtos.ServerMeta.getFieldName(number);
+            }
+            public int getFieldNumber(java.lang.String name)
+            {
+                return org.apache.drill.exec.proto.SchemaUserProtos.ServerMeta.getFieldNumber(name);
+            }
+            public java.lang.Class<org.apache.drill.exec.proto.UserProtos.ServerMeta.Builder> typeClass()
+            {
+                return org.apache.drill.exec.proto.UserProtos.ServerMeta.Builder.class;
+            }
+            public java.lang.String messageName()
+            {
+                return org.apache.drill.exec.proto.UserProtos.ServerMeta.class.getSimpleName();
+            }
+            public java.lang.String messageFullName()
+            {
+                return org.apache.drill.exec.proto.UserProtos.ServerMeta.class.getName();
+            }
+            //unused
+            public void writeTo(com.dyuproject.protostuff.Output output, org.apache.drill.exec.proto.UserProtos.ServerMeta.Builder builder) throws java.io.IOException {}
+        }
+        public static java.lang.String getFieldName(int number)
+        {
+            switch(number)
+            {
+                case 1: return "allTablesSelectable";
+                case 2: return "blobIncludedInMaxRowSize";
+                case 3: return "catalogAtStart";
+                case 4: return "catalogSeparator";
+                case 5: return "catalogTerm";
+                case 6: return "collateSupport";
+                case 7: return "columnAliasingSupported";
+                case 8: return "convertSupport";
+                case 9: return "correlationNamesSupport";
+                case 10: return "dateTimeFunctions";
+                case 11: return "dateTimeLiteralsSupport";
+                case 12: return "groupBySupport";
+                case 13: return "identifierCasing";
+                case 14: return "identifierQuoteString";
+                case 15: return "likeEscapeClauseSupported";
+                case 16: return "maxBinaryLiteralLength";
+                case 17: return "maxCatalogNameLength";
+                case 18: return "maxCharLiteralLength";
+                case 19: return "maxColumnNameLength";
+                case 20: return "maxColumnsInGroupBy";
+                case 21: return "maxColumnsInOrderBy";
+                case 22: return "maxColumnsInSelect";
+                case 23: return "maxCursorNameLength";
+                case 24: return "maxLogicalLobSize";
+                case 25: return "maxRowSize";
+                case 26: return "maxSchemaNameLength";
+                case 27: return "maxStatementLength";
+                case 28: return "maxStatements";
+                case 29: return "maxTableNameLength";
+                case 30: return "maxTablesInSelect";
+                case 31: return "maxUserNameLength";
+                case 32: return "nullCollation";
+                case 33: return "nullPlusNonNullEqualsNull";
+                case 34: return "numericFunctions";
+                case 35: return "orderBySupport";
+                case 36: return "outerJoinSupport";
+                case 37: return "quotedIdentifierCasing";
+                case 38: return "readOnly";
+                case 39: return "schemaTerm";
+                case 40: return "searchEscapeString";
+                case 41: return "selectForUpdateSupported";
+                case 42: return "specialCharacters";
+                case 43: return "sqlKeywords";
+                case 44: return "stringFunctions";
+                case 45: return "subquerySupport";
+                case 46: return "systemFunctions";
+                case 47: return "tableTerm";
+                case 48: return "transactionSupported";
+                case 49: return "unionSupport";
+                default: return null;
+            }
+        }
+        public static int getFieldNumber(java.lang.String name)
+        {
+            java.lang.Integer number = fieldMap.get(name);
+            return number == null ? 0 : number.intValue();
+        }
+        private static final java.util.HashMap<java.lang.String,java.lang.Integer> fieldMap = new java.util.HashMap<java.lang.String,java.lang.Integer>();
+        static
+        {
+            fieldMap.put("allTablesSelectable", 1);
+            fieldMap.put("blobIncludedInMaxRowSize", 2);
+            fieldMap.put("catalogAtStart", 3);
+            fieldMap.put("catalogSeparator", 4);
+            fieldMap.put("catalogTerm", 5);
+            fieldMap.put("collateSupport", 6);
+            fieldMap.put("columnAliasingSupported", 7);
+            fieldMap.put("convertSupport", 8);
+            fieldMap.put("correlationNamesSupport", 9);
+            fieldMap.put("dateTimeFunctions", 10);
+            fieldMap.put("dateTimeLiteralsSupport", 11);
+            fieldMap.put("groupBySupport", 12);
+            fieldMap.put("identifierCasing", 13);
+            fieldMap.put("identifierQuoteString", 14);
+            fieldMap.put("likeEscapeClauseSupported", 15);
+            fieldMap.put("maxBinaryLiteralLength", 16);
+            fieldMap.put("maxCatalogNameLength", 17);
+            fieldMap.put("maxCharLiteralLength", 18);
+            fieldMap.put("maxColumnNameLength", 19);
+            fieldMap.put("maxColumnsInGroupBy", 20);
+            fieldMap.put("maxColumnsInOrderBy", 21);
+            fieldMap.put("maxColumnsInSelect", 22);
+            fieldMap.put("maxCursorNameLength", 23);
+            fieldMap.put("maxLogicalLobSize", 24);
+            fieldMap.put("maxRowSize", 25);
+            fieldMap.put("maxSchemaNameLength", 26);
+            fieldMap.put("maxStatementLength", 27);
+            fieldMap.put("maxStatements", 28);
+            fieldMap.put("maxTableNameLength", 29);
+            fieldMap.put("maxTablesInSelect", 30);
+            fieldMap.put("maxUserNameLength", 31);
+            fieldMap.put("nullCollation", 32);
+            fieldMap.put("nullPlusNonNullEqualsNull", 33);
+            fieldMap.put("numericFunctions", 34);
+            fieldMap.put("orderBySupport", 35);
+            fieldMap.put("outerJoinSupport", 36);
+            fieldMap.put("quotedIdentifierCasing", 37);
+            fieldMap.put("readOnly", 38);
+            fieldMap.put("schemaTerm", 39);
+            fieldMap.put("searchEscapeString", 40);
+            fieldMap.put("selectForUpdateSupported", 41);
+            fieldMap.put("specialCharacters", 42);
+            fieldMap.put("sqlKeywords", 43);
+            fieldMap.put("stringFunctions", 44);
+            fieldMap.put("subquerySupport", 45);
+            fieldMap.put("systemFunctions", 46);
+            fieldMap.put("tableTerm", 47);
+            fieldMap.put("transactionSupported", 48);
+            fieldMap.put("unionSupport", 49);
+        }
+    }
+
     public static final class RunQuery
     {
         public static final org.apache.drill.exec.proto.SchemaUserProtos.RunQuery.MessageSchema WRITE =


[18/27] drill git commit: DRILL-5221: Send cancel message as soon as possible in C++ connector

Posted by jn...@apache.org.
DRILL-5221: Send cancel message as soon as possible in C++ connector

In C++ connector, try to send cancel request to the server as soon as
possible, which means when receiving the queryId or when requested by the
user if queryId has already been received.

close #733


Project: http://git-wip-us.apache.org/repos/asf/drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/drill/commit/20a374c5
Tree: http://git-wip-us.apache.org/repos/asf/drill/tree/20a374c5
Diff: http://git-wip-us.apache.org/repos/asf/drill/diff/20a374c5

Branch: refs/heads/master
Commit: 20a374c5bfabb49f4c8b3144a5a8529d17ee03fd
Parents: c81f588
Author: Laurent Goujon <la...@dremio.com>
Authored: Tue Jan 24 18:47:47 2017 -0800
Committer: Jinfeng Ni <jn...@apache.org>
Committed: Wed Mar 1 23:15:33 2017 -0800

----------------------------------------------------------------------
 .../client/src/clientlib/drillClientImpl.cpp    | 48 +++++++++++---------
 .../client/src/clientlib/drillClientImpl.hpp    |  2 +-
 2 files changed, 27 insertions(+), 23 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/drill/blob/20a374c5/contrib/native/client/src/clientlib/drillClientImpl.cpp
----------------------------------------------------------------------
diff --git a/contrib/native/client/src/clientlib/drillClientImpl.cpp b/contrib/native/client/src/clientlib/drillClientImpl.cpp
index 417fe80..ce3ab63 100644
--- a/contrib/native/client/src/clientlib/drillClientImpl.cpp
+++ b/contrib/native/client/src/clientlib/drillClientImpl.cpp
@@ -1012,7 +1012,7 @@ status_t DrillClientImpl::processQueryData(AllocatedBufferPtr allocatedBuffer, c
             DRILL_MT_LOG(DRILL_LOG(LOG_DEBUG) << "Processing Query cancellation " << std::endl;)
         	delete qr;
         	delete allocatedBuffer;
-        	ret =  QRY_CANCEL;
+        	ret =  QRY_CANCELED;
         } else {
         	//Validate the RPC message
         	std::string valErr;
@@ -1689,7 +1689,6 @@ status_t DrillClientImpl::handleQryCancellation(status_t status, DrillClientQuer
 	pQueryHandle->setIsQueryPending(false);
 	DRILL_MT_LOG(DRILL_LOG(LOG_DEBUG) << "Client app cancelled query." << std::endl;)
 	pQueryHandle->setQueryStatus(status);
-	removeQueryResult(pQueryHandle);
 	removeQueryHandle(pQueryHandle);
 	return status;
 }
@@ -1732,25 +1731,23 @@ status_t DrillClientImpl::handleTerminatedQryState(
 
 void DrillClientImpl::removeQueryHandle(DrillClientQueryHandle* pQueryHandle){
     boost::lock_guard<boost::mutex> lock(m_dcMutex);
-    if(!m_queryHandles.empty()){
-        for(std::map<int, DrillClientQueryHandle*>::const_iterator iter=m_queryHandles.begin(); iter!=m_queryHandles.end(); iter++) {
-            if(pQueryHandle==(DrillClientQueryHandle*)iter->second){
-                m_queryHandles.erase(iter->first);
-                break;
-            }
-        }
+    // Removing first the base handle
+    for(std::map<int, DrillClientQueryHandle*>::const_iterator iter=m_queryHandles.begin(); iter!=m_queryHandles.end(); iter++) {
+    	if(pQueryHandle==(DrillClientQueryHandle*)iter->second){
+    		m_queryHandles.erase(iter->first);
+    		break;
+    	}
     }
-}
 
-void DrillClientImpl::removeQueryResult(DrillClientQueryResult* pQueryResult){
-    boost::lock_guard<boost::mutex> lock(m_dcMutex);
-    if(!m_queryResults.empty()){
-        for(std::map<exec::shared::QueryId*, DrillClientQueryResult*, compareQueryId>::const_iterator it=m_queryResults.begin(); it!=m_queryResults.end(); it++) {
-            if(pQueryResult==(DrillClientQueryResult*)it->second){
-                m_queryResults.erase(it->first);
-                break;
-            }
-        }
+    // if the query handle is a result handle, m_queryResults also need to be cleaned.
+    DrillClientQueryResult* pQueryResult = dynamic_cast<DrillClientQueryResult*>(pQueryHandle);
+    if (pQueryResult) {
+    	for(std::map<exec::shared::QueryId*, DrillClientQueryResult*, compareQueryId>::const_iterator it=m_queryResults.begin(); it!=m_queryResults.end(); it++) {
+    		if(pQueryResult==(DrillClientQueryResult*)it->second){
+    			m_queryResults.erase(it->first);
+    			break;
+    		}
+    	}
     }
 }
 
@@ -1949,6 +1946,16 @@ RecordBatch*  DrillClientQueryResult::peekNext(){
     return pRecordBatch;
 }
 
+void DrillClientQueryResult::cancel() {
+	// Calling parent class
+	DrillClientBaseHandle<pfnQueryResultsListener, RecordBatch*>::cancel();
+
+	// If queryId has already been received, don't wait to send the
+	// cancellation message
+	if (this->m_pQueryId) {
+		this->client().handleQryCancellation(QRY_CANCELED, this);
+	}
+}
 RecordBatch*  DrillClientQueryResult::getNext() {
     RecordBatch* pRecordBatch=NULL;
     boost::unique_lock<boost::mutex> cvLock(this->m_cvMutex);
@@ -2073,9 +2080,6 @@ void DrillClientQueryResult::clearAndDestroy(){
         DRILL_MT_LOG(DRILL_LOG(LOG_TRACE) << "Clearing state for Query Id - " << debugPrintQid(*this->m_pQueryId) << std::endl;)
     }
 
-    //Tell the parent to remove this from its lists
-    this->client().removeQueryResult(this);
-
     //clear query id map entries.
     if(this->m_pQueryId!=NULL){
         delete this->m_pQueryId; this->m_pQueryId=NULL;

http://git-wip-us.apache.org/repos/asf/drill/blob/20a374c5/contrib/native/client/src/clientlib/drillClientImpl.hpp
----------------------------------------------------------------------
diff --git a/contrib/native/client/src/clientlib/drillClientImpl.hpp b/contrib/native/client/src/clientlib/drillClientImpl.hpp
index 5eb850d..bc6503d 100644
--- a/contrib/native/client/src/clientlib/drillClientImpl.hpp
+++ b/contrib/native/client/src/clientlib/drillClientImpl.hpp
@@ -210,6 +210,7 @@ class DrillClientQueryResult: public DrillClientBaseHandle<pfnQueryResultsListen
         m_pSchemaListener=l;
     }
 
+    void cancel();
     // Synchronous call to get data. Caller assumes ownership of the record batch
     // returned and it is assumed to have been consumed.
     RecordBatch*  getNext();
@@ -519,7 +520,6 @@ class DrillClientImpl : public DrillClientImplBase{
                 DrillClientQueryResult* pQueryResult);
         void broadcastError(DrillClientError* pErr);
         void removeQueryHandle(DrillClientQueryHandle* pQueryHandle);
-        void removeQueryResult(DrillClientQueryResult* pQueryResult);
         void sendAck(const rpc::InBoundRpcMessage& msg, bool isOk);
         void sendCancel(const exec::shared::QueryId* pQueryId);
 


[06/27] drill git commit: DRILL-5301: Add C++ client support for Server metadata API

Posted by jn...@apache.org.
http://git-wip-us.apache.org/repos/asf/drill/blob/d3238b1b/contrib/native/client/src/protobuf/User.pb.h
----------------------------------------------------------------------
diff --git a/contrib/native/client/src/protobuf/User.pb.h b/contrib/native/client/src/protobuf/User.pb.h
index d332c36..a8f8db4 100644
--- a/contrib/native/client/src/protobuf/User.pb.h
+++ b/contrib/native/client/src/protobuf/User.pb.h
@@ -26,6 +26,7 @@
 #include <google/protobuf/generated_enum_reflection.h>
 #include <google/protobuf/unknown_field_set.h>
 #include "SchemaDef.pb.h"
+#include "Types.pb.h"
 #include "UserBitShared.pb.h"
 #include "BitData.pb.h"
 #include "BitControl.pb.h"
@@ -66,6 +67,10 @@ class ResultColumnMetadata;
 class PreparedStatementHandle;
 class PreparedStatement;
 class CreatePreparedStatementResp;
+class GetServerMetaReq;
+class ConvertSupport;
+class GetServerMetaResp;
+class ServerMeta;
 class RunQuery;
 
 enum RpcType {
@@ -82,6 +87,7 @@ enum RpcType {
   GET_TABLES = 16,
   GET_COLUMNS = 17,
   CREATE_PREPARED_STATEMENT = 22,
+  GET_SERVER_META = 8,
   QUERY_DATA = 6,
   QUERY_HANDLE = 7,
   QUERY_PLAN_FRAGMENTS = 13,
@@ -90,8 +96,7 @@ enum RpcType {
   TABLES = 20,
   COLUMNS = 21,
   PREPARED_STATEMENT = 23,
-  REQ_META_FUNCTIONS = 8,
-  RESP_FUNCTION_LIST = 9,
+  SERVER_META = 9,
   QUERY_RESULT = 10,
   SASL_MESSAGE = 24
 };
@@ -232,6 +237,232 @@ inline bool ColumnUpdatability_Parse(
   return ::google::protobuf::internal::ParseNamedEnum<ColumnUpdatability>(
     ColumnUpdatability_descriptor(), name, value);
 }
+enum CollateSupport {
+  CS_UNKNOWN = 0,
+  CS_GROUP_BY = 1
+};
+bool CollateSupport_IsValid(int value);
+const CollateSupport CollateSupport_MIN = CS_UNKNOWN;
+const CollateSupport CollateSupport_MAX = CS_GROUP_BY;
+const int CollateSupport_ARRAYSIZE = CollateSupport_MAX + 1;
+
+const ::google::protobuf::EnumDescriptor* CollateSupport_descriptor();
+inline const ::std::string& CollateSupport_Name(CollateSupport value) {
+  return ::google::protobuf::internal::NameOfEnum(
+    CollateSupport_descriptor(), value);
+}
+inline bool CollateSupport_Parse(
+    const ::std::string& name, CollateSupport* value) {
+  return ::google::protobuf::internal::ParseNamedEnum<CollateSupport>(
+    CollateSupport_descriptor(), name, value);
+}
+enum CorrelationNamesSupport {
+  CN_NONE = 1,
+  CN_DIFFERENT_NAMES = 2,
+  CN_ANY = 3
+};
+bool CorrelationNamesSupport_IsValid(int value);
+const CorrelationNamesSupport CorrelationNamesSupport_MIN = CN_NONE;
+const CorrelationNamesSupport CorrelationNamesSupport_MAX = CN_ANY;
+const int CorrelationNamesSupport_ARRAYSIZE = CorrelationNamesSupport_MAX + 1;
+
+const ::google::protobuf::EnumDescriptor* CorrelationNamesSupport_descriptor();
+inline const ::std::string& CorrelationNamesSupport_Name(CorrelationNamesSupport value) {
+  return ::google::protobuf::internal::NameOfEnum(
+    CorrelationNamesSupport_descriptor(), value);
+}
+inline bool CorrelationNamesSupport_Parse(
+    const ::std::string& name, CorrelationNamesSupport* value) {
+  return ::google::protobuf::internal::ParseNamedEnum<CorrelationNamesSupport>(
+    CorrelationNamesSupport_descriptor(), name, value);
+}
+enum DateTimeLiteralsSupport {
+  DL_UNKNOWN = 0,
+  DL_DATE = 1,
+  DL_TIME = 2,
+  DL_TIMESTAMP = 3,
+  DL_INTERVAL_YEAR = 4,
+  DL_INTERVAL_MONTH = 5,
+  DL_INTERVAL_DAY = 6,
+  DL_INTERVAL_HOUR = 7,
+  DL_INTERVAL_MINUTE = 8,
+  DL_INTERVAL_SECOND = 9,
+  DL_INTERVAL_YEAR_TO_MONTH = 10,
+  DL_INTERVAL_DAY_TO_HOUR = 11,
+  DL_INTERVAL_DAY_TO_MINUTE = 12,
+  DL_INTERVAL_DAY_TO_SECOND = 13,
+  DL_INTERVAL_HOUR_TO_MINUTE = 14,
+  DL_INTERVAL_HOUR_TO_SECOND = 15,
+  DL_INTERVAL_MINUTE_TO_SECOND = 16
+};
+bool DateTimeLiteralsSupport_IsValid(int value);
+const DateTimeLiteralsSupport DateTimeLiteralsSupport_MIN = DL_UNKNOWN;
+const DateTimeLiteralsSupport DateTimeLiteralsSupport_MAX = DL_INTERVAL_MINUTE_TO_SECOND;
+const int DateTimeLiteralsSupport_ARRAYSIZE = DateTimeLiteralsSupport_MAX + 1;
+
+const ::google::protobuf::EnumDescriptor* DateTimeLiteralsSupport_descriptor();
+inline const ::std::string& DateTimeLiteralsSupport_Name(DateTimeLiteralsSupport value) {
+  return ::google::protobuf::internal::NameOfEnum(
+    DateTimeLiteralsSupport_descriptor(), value);
+}
+inline bool DateTimeLiteralsSupport_Parse(
+    const ::std::string& name, DateTimeLiteralsSupport* value) {
+  return ::google::protobuf::internal::ParseNamedEnum<DateTimeLiteralsSupport>(
+    DateTimeLiteralsSupport_descriptor(), name, value);
+}
+enum GroupBySupport {
+  GB_NONE = 1,
+  GB_SELECT_ONLY = 2,
+  GB_BEYOND_SELECT = 3,
+  GB_UNRELATED = 4
+};
+bool GroupBySupport_IsValid(int value);
+const GroupBySupport GroupBySupport_MIN = GB_NONE;
+const GroupBySupport GroupBySupport_MAX = GB_UNRELATED;
+const int GroupBySupport_ARRAYSIZE = GroupBySupport_MAX + 1;
+
+const ::google::protobuf::EnumDescriptor* GroupBySupport_descriptor();
+inline const ::std::string& GroupBySupport_Name(GroupBySupport value) {
+  return ::google::protobuf::internal::NameOfEnum(
+    GroupBySupport_descriptor(), value);
+}
+inline bool GroupBySupport_Parse(
+    const ::std::string& name, GroupBySupport* value) {
+  return ::google::protobuf::internal::ParseNamedEnum<GroupBySupport>(
+    GroupBySupport_descriptor(), name, value);
+}
+enum IdentifierCasing {
+  IC_UNKNOWN = 0,
+  IC_STORES_LOWER = 1,
+  IC_STORES_MIXED = 2,
+  IC_STORES_UPPER = 3,
+  IC_SUPPORTS_MIXED = 4
+};
+bool IdentifierCasing_IsValid(int value);
+const IdentifierCasing IdentifierCasing_MIN = IC_UNKNOWN;
+const IdentifierCasing IdentifierCasing_MAX = IC_SUPPORTS_MIXED;
+const int IdentifierCasing_ARRAYSIZE = IdentifierCasing_MAX + 1;
+
+const ::google::protobuf::EnumDescriptor* IdentifierCasing_descriptor();
+inline const ::std::string& IdentifierCasing_Name(IdentifierCasing value) {
+  return ::google::protobuf::internal::NameOfEnum(
+    IdentifierCasing_descriptor(), value);
+}
+inline bool IdentifierCasing_Parse(
+    const ::std::string& name, IdentifierCasing* value) {
+  return ::google::protobuf::internal::ParseNamedEnum<IdentifierCasing>(
+    IdentifierCasing_descriptor(), name, value);
+}
+enum NullCollation {
+  NC_UNKNOWN = 0,
+  NC_AT_START = 1,
+  NC_AT_END = 2,
+  NC_HIGH = 3,
+  NC_LOW = 4
+};
+bool NullCollation_IsValid(int value);
+const NullCollation NullCollation_MIN = NC_UNKNOWN;
+const NullCollation NullCollation_MAX = NC_LOW;
+const int NullCollation_ARRAYSIZE = NullCollation_MAX + 1;
+
+const ::google::protobuf::EnumDescriptor* NullCollation_descriptor();
+inline const ::std::string& NullCollation_Name(NullCollation value) {
+  return ::google::protobuf::internal::NameOfEnum(
+    NullCollation_descriptor(), value);
+}
+inline bool NullCollation_Parse(
+    const ::std::string& name, NullCollation* value) {
+  return ::google::protobuf::internal::ParseNamedEnum<NullCollation>(
+    NullCollation_descriptor(), name, value);
+}
+enum OrderBySupport {
+  OB_UNKNOWN = 0,
+  OB_UNRELATED = 1,
+  OB_EXPRESSION = 2
+};
+bool OrderBySupport_IsValid(int value);
+const OrderBySupport OrderBySupport_MIN = OB_UNKNOWN;
+const OrderBySupport OrderBySupport_MAX = OB_EXPRESSION;
+const int OrderBySupport_ARRAYSIZE = OrderBySupport_MAX + 1;
+
+const ::google::protobuf::EnumDescriptor* OrderBySupport_descriptor();
+inline const ::std::string& OrderBySupport_Name(OrderBySupport value) {
+  return ::google::protobuf::internal::NameOfEnum(
+    OrderBySupport_descriptor(), value);
+}
+inline bool OrderBySupport_Parse(
+    const ::std::string& name, OrderBySupport* value) {
+  return ::google::protobuf::internal::ParseNamedEnum<OrderBySupport>(
+    OrderBySupport_descriptor(), name, value);
+}
+enum OuterJoinSupport {
+  OJ_UNKNOWN = 0,
+  OJ_LEFT = 1,
+  OJ_RIGHT = 2,
+  OJ_FULL = 3,
+  OJ_NESTED = 4,
+  OJ_NOT_ORDERED = 5,
+  OJ_INNER = 6,
+  OJ_ALL_COMPARISON_OPS = 7
+};
+bool OuterJoinSupport_IsValid(int value);
+const OuterJoinSupport OuterJoinSupport_MIN = OJ_UNKNOWN;
+const OuterJoinSupport OuterJoinSupport_MAX = OJ_ALL_COMPARISON_OPS;
+const int OuterJoinSupport_ARRAYSIZE = OuterJoinSupport_MAX + 1;
+
+const ::google::protobuf::EnumDescriptor* OuterJoinSupport_descriptor();
+inline const ::std::string& OuterJoinSupport_Name(OuterJoinSupport value) {
+  return ::google::protobuf::internal::NameOfEnum(
+    OuterJoinSupport_descriptor(), value);
+}
+inline bool OuterJoinSupport_Parse(
+    const ::std::string& name, OuterJoinSupport* value) {
+  return ::google::protobuf::internal::ParseNamedEnum<OuterJoinSupport>(
+    OuterJoinSupport_descriptor(), name, value);
+}
+enum SubQuerySupport {
+  SQ_UNKNOWN = 0,
+  SQ_CORRELATED = 1,
+  SQ_IN_COMPARISON = 2,
+  SQ_IN_EXISTS = 3,
+  SQ_IN_INSERT = 4,
+  SQ_IN_QUANTIFIED = 5
+};
+bool SubQuerySupport_IsValid(int value);
+const SubQuerySupport SubQuerySupport_MIN = SQ_UNKNOWN;
+const SubQuerySupport SubQuerySupport_MAX = SQ_IN_QUANTIFIED;
+const int SubQuerySupport_ARRAYSIZE = SubQuerySupport_MAX + 1;
+
+const ::google::protobuf::EnumDescriptor* SubQuerySupport_descriptor();
+inline const ::std::string& SubQuerySupport_Name(SubQuerySupport value) {
+  return ::google::protobuf::internal::NameOfEnum(
+    SubQuerySupport_descriptor(), value);
+}
+inline bool SubQuerySupport_Parse(
+    const ::std::string& name, SubQuerySupport* value) {
+  return ::google::protobuf::internal::ParseNamedEnum<SubQuerySupport>(
+    SubQuerySupport_descriptor(), name, value);
+}
+enum UnionSupport {
+  U_UNKNOWN = 0,
+  U_UNION = 1,
+  U_UNION_ALL = 2
+};
+bool UnionSupport_IsValid(int value);
+const UnionSupport UnionSupport_MIN = U_UNKNOWN;
+const UnionSupport UnionSupport_MAX = U_UNION_ALL;
+const int UnionSupport_ARRAYSIZE = UnionSupport_MAX + 1;
+
+const ::google::protobuf::EnumDescriptor* UnionSupport_descriptor();
+inline const ::std::string& UnionSupport_Name(UnionSupport value) {
+  return ::google::protobuf::internal::NameOfEnum(
+    UnionSupport_descriptor(), value);
+}
+inline bool UnionSupport_Parse(
+    const ::std::string& name, UnionSupport* value) {
+  return ::google::protobuf::internal::ParseNamedEnum<UnionSupport>(
+    UnionSupport_descriptor(), name, value);
+}
 // ===================================================================
 
 class Property : public ::google::protobuf::Message {
@@ -3545,14 +3776,14 @@ class CreatePreparedStatementResp : public ::google::protobuf::Message {
 };
 // -------------------------------------------------------------------
 
-class RunQuery : public ::google::protobuf::Message {
+class GetServerMetaReq : public ::google::protobuf::Message {
  public:
-  RunQuery();
-  virtual ~RunQuery();
+  GetServerMetaReq();
+  virtual ~GetServerMetaReq();
 
-  RunQuery(const RunQuery& from);
+  GetServerMetaReq(const GetServerMetaReq& from);
 
-  inline RunQuery& operator=(const RunQuery& from) {
+  inline GetServerMetaReq& operator=(const GetServerMetaReq& from) {
     CopyFrom(from);
     return *this;
   }
@@ -3566,17 +3797,17 @@ class RunQuery : public ::google::protobuf::Message {
   }
 
   static const ::google::protobuf::Descriptor* descriptor();
-  static const RunQuery& default_instance();
+  static const GetServerMetaReq& default_instance();
 
-  void Swap(RunQuery* other);
+  void Swap(GetServerMetaReq* other);
 
   // implements Message ----------------------------------------------
 
-  RunQuery* New() const;
+  GetServerMetaReq* New() const;
   void CopyFrom(const ::google::protobuf::Message& from);
   void MergeFrom(const ::google::protobuf::Message& from);
-  void CopyFrom(const RunQuery& from);
-  void MergeFrom(const RunQuery& from);
+  void CopyFrom(const GetServerMetaReq& from);
+  void MergeFrom(const GetServerMetaReq& from);
   void Clear();
   bool IsInitialized() const;
 
@@ -3599,1668 +3830,3985 @@ class RunQuery : public ::google::protobuf::Message {
 
   // accessors -------------------------------------------------------
 
-  // optional .exec.user.QueryResultsMode results_mode = 1;
-  inline bool has_results_mode() const;
-  inline void clear_results_mode();
-  static const int kResultsModeFieldNumber = 1;
-  inline ::exec::user::QueryResultsMode results_mode() const;
-  inline void set_results_mode(::exec::user::QueryResultsMode value);
+  // @@protoc_insertion_point(class_scope:exec.user.GetServerMetaReq)
+ private:
 
-  // optional .exec.shared.QueryType type = 2;
-  inline bool has_type() const;
-  inline void clear_type();
-  static const int kTypeFieldNumber = 2;
-  inline ::exec::shared::QueryType type() const;
-  inline void set_type(::exec::shared::QueryType value);
+  ::google::protobuf::UnknownFieldSet _unknown_fields_;
 
-  // optional string plan = 3;
-  inline bool has_plan() const;
-  inline void clear_plan();
-  static const int kPlanFieldNumber = 3;
-  inline const ::std::string& plan() const;
-  inline void set_plan(const ::std::string& value);
-  inline void set_plan(const char* value);
-  inline void set_plan(const char* value, size_t size);
-  inline ::std::string* mutable_plan();
-  inline ::std::string* release_plan();
-  inline void set_allocated_plan(::std::string* plan);
 
-  // repeated .exec.bit.control.PlanFragment fragments = 4;
-  inline int fragments_size() const;
-  inline void clear_fragments();
-  static const int kFragmentsFieldNumber = 4;
-  inline const ::exec::bit::control::PlanFragment& fragments(int index) const;
-  inline ::exec::bit::control::PlanFragment* mutable_fragments(int index);
-  inline ::exec::bit::control::PlanFragment* add_fragments();
-  inline const ::google::protobuf::RepeatedPtrField< ::exec::bit::control::PlanFragment >&
-      fragments() const;
-  inline ::google::protobuf::RepeatedPtrField< ::exec::bit::control::PlanFragment >*
-      mutable_fragments();
+  mutable int _cached_size_;
+  ::google::protobuf::uint32 _has_bits_[1];
 
-  // optional .exec.user.PreparedStatementHandle prepared_statement_handle = 5;
-  inline bool has_prepared_statement_handle() const;
-  inline void clear_prepared_statement_handle();
-  static const int kPreparedStatementHandleFieldNumber = 5;
-  inline const ::exec::user::PreparedStatementHandle& prepared_statement_handle() const;
-  inline ::exec::user::PreparedStatementHandle* mutable_prepared_statement_handle();
-  inline ::exec::user::PreparedStatementHandle* release_prepared_statement_handle();
-  inline void set_allocated_prepared_statement_handle(::exec::user::PreparedStatementHandle* prepared_statement_handle);
+  friend void  protobuf_AddDesc_User_2eproto();
+  friend void protobuf_AssignDesc_User_2eproto();
+  friend void protobuf_ShutdownFile_User_2eproto();
 
-  // @@protoc_insertion_point(class_scope:exec.user.RunQuery)
+  void InitAsDefaultInstance();
+  static GetServerMetaReq* default_instance_;
+};
+// -------------------------------------------------------------------
+
+class ConvertSupport : public ::google::protobuf::Message {
+ public:
+  ConvertSupport();
+  virtual ~ConvertSupport();
+
+  ConvertSupport(const ConvertSupport& from);
+
+  inline ConvertSupport& operator=(const ConvertSupport& from) {
+    CopyFrom(from);
+    return *this;
+  }
+
+  inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const {
+    return _unknown_fields_;
+  }
+
+  inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() {
+    return &_unknown_fields_;
+  }
+
+  static const ::google::protobuf::Descriptor* descriptor();
+  static const ConvertSupport& default_instance();
+
+  void Swap(ConvertSupport* other);
+
+  // implements Message ----------------------------------------------
+
+  ConvertSupport* New() const;
+  void CopyFrom(const ::google::protobuf::Message& from);
+  void MergeFrom(const ::google::protobuf::Message& from);
+  void CopyFrom(const ConvertSupport& from);
+  void MergeFrom(const ConvertSupport& from);
+  void Clear();
+  bool IsInitialized() const;
+
+  int ByteSize() const;
+  bool MergePartialFromCodedStream(
+      ::google::protobuf::io::CodedInputStream* input);
+  void SerializeWithCachedSizes(
+      ::google::protobuf::io::CodedOutputStream* output) const;
+  ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const;
+  int GetCachedSize() const { return _cached_size_; }
+  private:
+  void SharedCtor();
+  void SharedDtor();
+  void SetCachedSize(int size) const;
+  public:
+
+  ::google::protobuf::Metadata GetMetadata() const;
+
+  // nested types ----------------------------------------------------
+
+  // accessors -------------------------------------------------------
+
+  // required .common.MinorType from = 1;
+  inline bool has_from() const;
+  inline void clear_from();
+  static const int kFromFieldNumber = 1;
+  inline ::common::MinorType from() const;
+  inline void set_from(::common::MinorType value);
+
+  // required .common.MinorType to = 2;
+  inline bool has_to() const;
+  inline void clear_to();
+  static const int kToFieldNumber = 2;
+  inline ::common::MinorType to() const;
+  inline void set_to(::common::MinorType value);
+
+  // @@protoc_insertion_point(class_scope:exec.user.ConvertSupport)
  private:
-  inline void set_has_results_mode();
-  inline void clear_has_results_mode();
-  inline void set_has_type();
-  inline void clear_has_type();
-  inline void set_has_plan();
-  inline void clear_has_plan();
-  inline void set_has_prepared_statement_handle();
-  inline void clear_has_prepared_statement_handle();
+  inline void set_has_from();
+  inline void clear_has_from();
+  inline void set_has_to();
+  inline void clear_has_to();
 
   ::google::protobuf::UnknownFieldSet _unknown_fields_;
 
-  int results_mode_;
-  int type_;
-  ::std::string* plan_;
-  ::google::protobuf::RepeatedPtrField< ::exec::bit::control::PlanFragment > fragments_;
-  ::exec::user::PreparedStatementHandle* prepared_statement_handle_;
+  int from_;
+  int to_;
 
   mutable int _cached_size_;
-  ::google::protobuf::uint32 _has_bits_[(5 + 31) / 32];
+  ::google::protobuf::uint32 _has_bits_[(2 + 31) / 32];
 
   friend void  protobuf_AddDesc_User_2eproto();
   friend void protobuf_AssignDesc_User_2eproto();
   friend void protobuf_ShutdownFile_User_2eproto();
 
   void InitAsDefaultInstance();
-  static RunQuery* default_instance_;
+  static ConvertSupport* default_instance_;
 };
-// ===================================================================
-
+// -------------------------------------------------------------------
 
-// ===================================================================
+class GetServerMetaResp : public ::google::protobuf::Message {
+ public:
+  GetServerMetaResp();
+  virtual ~GetServerMetaResp();
 
-// Property
+  GetServerMetaResp(const GetServerMetaResp& from);
 
-// required string key = 1;
-inline bool Property::has_key() const {
-  return (_has_bits_[0] & 0x00000001u) != 0;
-}
-inline void Property::set_has_key() {
-  _has_bits_[0] |= 0x00000001u;
-}
-inline void Property::clear_has_key() {
-  _has_bits_[0] &= ~0x00000001u;
-}
-inline void Property::clear_key() {
-  if (key_ != &::google::protobuf::internal::kEmptyString) {
-    key_->clear();
-  }
-  clear_has_key();
-}
-inline const ::std::string& Property::key() const {
-  return *key_;
-}
-inline void Property::set_key(const ::std::string& value) {
-  set_has_key();
-  if (key_ == &::google::protobuf::internal::kEmptyString) {
-    key_ = new ::std::string;
-  }
-  key_->assign(value);
-}
-inline void Property::set_key(const char* value) {
-  set_has_key();
-  if (key_ == &::google::protobuf::internal::kEmptyString) {
-    key_ = new ::std::string;
-  }
-  key_->assign(value);
-}
-inline void Property::set_key(const char* value, size_t size) {
-  set_has_key();
-  if (key_ == &::google::protobuf::internal::kEmptyString) {
-    key_ = new ::std::string;
-  }
-  key_->assign(reinterpret_cast<const char*>(value), size);
-}
-inline ::std::string* Property::mutable_key() {
-  set_has_key();
-  if (key_ == &::google::protobuf::internal::kEmptyString) {
-    key_ = new ::std::string;
-  }
-  return key_;
-}
-inline ::std::string* Property::release_key() {
-  clear_has_key();
-  if (key_ == &::google::protobuf::internal::kEmptyString) {
-    return NULL;
-  } else {
-    ::std::string* temp = key_;
-    key_ = const_cast< ::std::string*>(&::google::protobuf::internal::kEmptyString);
-    return temp;
-  }
-}
-inline void Property::set_allocated_key(::std::string* key) {
-  if (key_ != &::google::protobuf::internal::kEmptyString) {
-    delete key_;
+  inline GetServerMetaResp& operator=(const GetServerMetaResp& from) {
+    CopyFrom(from);
+    return *this;
   }
-  if (key) {
-    set_has_key();
-    key_ = key;
-  } else {
-    clear_has_key();
-    key_ = const_cast< ::std::string*>(&::google::protobuf::internal::kEmptyString);
+
+  inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const {
+    return _unknown_fields_;
   }
-}
 
-// required string value = 2;
-inline bool Property::has_value() const {
-  return (_has_bits_[0] & 0x00000002u) != 0;
-}
-inline void Property::set_has_value() {
-  _has_bits_[0] |= 0x00000002u;
-}
-inline void Property::clear_has_value() {
-  _has_bits_[0] &= ~0x00000002u;
-}
-inline void Property::clear_value() {
-  if (value_ != &::google::protobuf::internal::kEmptyString) {
-    value_->clear();
+  inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() {
+    return &_unknown_fields_;
   }
-  clear_has_value();
-}
+
+  static const ::google::protobuf::Descriptor* descriptor();
+  static const GetServerMetaResp& default_instance();
+
+  void Swap(GetServerMetaResp* other);
+
+  // implements Message ----------------------------------------------
+
+  GetServerMetaResp* New() const;
+  void CopyFrom(const ::google::protobuf::Message& from);
+  void MergeFrom(const ::google::protobuf::Message& from);
+  void CopyFrom(const GetServerMetaResp& from);
+  void MergeFrom(const GetServerMetaResp& from);
+  void Clear();
+  bool IsInitialized() const;
+
+  int ByteSize() const;
+  bool MergePartialFromCodedStream(
+      ::google::protobuf::io::CodedInputStream* input);
+  void SerializeWithCachedSizes(
+      ::google::protobuf::io::CodedOutputStream* output) const;
+  ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const;
+  int GetCachedSize() const { return _cached_size_; }
+  private:
+  void SharedCtor();
+  void SharedDtor();
+  void SetCachedSize(int size) const;
+  public:
+
+  ::google::protobuf::Metadata GetMetadata() const;
+
+  // nested types ----------------------------------------------------
+
+  // accessors -------------------------------------------------------
+
+  // optional .exec.user.RequestStatus status = 1;
+  inline bool has_status() const;
+  inline void clear_status();
+  static const int kStatusFieldNumber = 1;
+  inline ::exec::user::RequestStatus status() const;
+  inline void set_status(::exec::user::RequestStatus value);
+
+  // optional .exec.user.ServerMeta server_meta = 2;
+  inline bool has_server_meta() const;
+  inline void clear_server_meta();
+  static const int kServerMetaFieldNumber = 2;
+  inline const ::exec::user::ServerMeta& server_meta() const;
+  inline ::exec::user::ServerMeta* mutable_server_meta();
+  inline ::exec::user::ServerMeta* release_server_meta();
+  inline void set_allocated_server_meta(::exec::user::ServerMeta* server_meta);
+
+  // optional .exec.shared.DrillPBError error = 3;
+  inline bool has_error() const;
+  inline void clear_error();
+  static const int kErrorFieldNumber = 3;
+  inline const ::exec::shared::DrillPBError& error() const;
+  inline ::exec::shared::DrillPBError* mutable_error();
+  inline ::exec::shared::DrillPBError* release_error();
+  inline void set_allocated_error(::exec::shared::DrillPBError* error);
+
+  // @@protoc_insertion_point(class_scope:exec.user.GetServerMetaResp)
+ private:
+  inline void set_has_status();
+  inline void clear_has_status();
+  inline void set_has_server_meta();
+  inline void clear_has_server_meta();
+  inline void set_has_error();
+  inline void clear_has_error();
+
+  ::google::protobuf::UnknownFieldSet _unknown_fields_;
+
+  ::exec::user::ServerMeta* server_meta_;
+  ::exec::shared::DrillPBError* error_;
+  int status_;
+
+  mutable int _cached_size_;
+  ::google::protobuf::uint32 _has_bits_[(3 + 31) / 32];
+
+  friend void  protobuf_AddDesc_User_2eproto();
+  friend void protobuf_AssignDesc_User_2eproto();
+  friend void protobuf_ShutdownFile_User_2eproto();
+
+  void InitAsDefaultInstance();
+  static GetServerMetaResp* default_instance_;
+};
+// -------------------------------------------------------------------
+
+class ServerMeta : public ::google::protobuf::Message {
+ public:
+  ServerMeta();
+  virtual ~ServerMeta();
+
+  ServerMeta(const ServerMeta& from);
+
+  inline ServerMeta& operator=(const ServerMeta& from) {
+    CopyFrom(from);
+    return *this;
+  }
+
+  inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const {
+    return _unknown_fields_;
+  }
+
+  inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() {
+    return &_unknown_fields_;
+  }
+
+  static const ::google::protobuf::Descriptor* descriptor();
+  static const ServerMeta& default_instance();
+
+  void Swap(ServerMeta* other);
+
+  // implements Message ----------------------------------------------
+
+  ServerMeta* New() const;
+  void CopyFrom(const ::google::protobuf::Message& from);
+  void MergeFrom(const ::google::protobuf::Message& from);
+  void CopyFrom(const ServerMeta& from);
+  void MergeFrom(const ServerMeta& from);
+  void Clear();
+  bool IsInitialized() const;
+
+  int ByteSize() const;
+  bool MergePartialFromCodedStream(
+      ::google::protobuf::io::CodedInputStream* input);
+  void SerializeWithCachedSizes(
+      ::google::protobuf::io::CodedOutputStream* output) const;
+  ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const;
+  int GetCachedSize() const { return _cached_size_; }
+  private:
+  void SharedCtor();
+  void SharedDtor();
+  void SetCachedSize(int size) const;
+  public:
+
+  ::google::protobuf::Metadata GetMetadata() const;
+
+  // nested types ----------------------------------------------------
+
+  // accessors -------------------------------------------------------
+
+  // optional bool all_tables_selectable = 1;
+  inline bool has_all_tables_selectable() const;
+  inline void clear_all_tables_selectable();
+  static const int kAllTablesSelectableFieldNumber = 1;
+  inline bool all_tables_selectable() const;
+  inline void set_all_tables_selectable(bool value);
+
+  // optional bool blob_included_in_max_row_size = 2;
+  inline bool has_blob_included_in_max_row_size() const;
+  inline void clear_blob_included_in_max_row_size();
+  static const int kBlobIncludedInMaxRowSizeFieldNumber = 2;
+  inline bool blob_included_in_max_row_size() const;
+  inline void set_blob_included_in_max_row_size(bool value);
+
+  // optional bool catalog_at_start = 3;
+  inline bool has_catalog_at_start() const;
+  inline void clear_catalog_at_start();
+  static const int kCatalogAtStartFieldNumber = 3;
+  inline bool catalog_at_start() const;
+  inline void set_catalog_at_start(bool value);
+
+  // optional string catalog_separator = 4;
+  inline bool has_catalog_separator() const;
+  inline void clear_catalog_separator();
+  static const int kCatalogSeparatorFieldNumber = 4;
+  inline const ::std::string& catalog_separator() const;
+  inline void set_catalog_separator(const ::std::string& value);
+  inline void set_catalog_separator(const char* value);
+  inline void set_catalog_separator(const char* value, size_t size);
+  inline ::std::string* mutable_catalog_separator();
+  inline ::std::string* release_catalog_separator();
+  inline void set_allocated_catalog_separator(::std::string* catalog_separator);
+
+  // optional string catalog_term = 5;
+  inline bool has_catalog_term() const;
+  inline void clear_catalog_term();
+  static const int kCatalogTermFieldNumber = 5;
+  inline const ::std::string& catalog_term() const;
+  inline void set_catalog_term(const ::std::string& value);
+  inline void set_catalog_term(const char* value);
+  inline void set_catalog_term(const char* value, size_t size);
+  inline ::std::string* mutable_catalog_term();
+  inline ::std::string* release_catalog_term();
+  inline void set_allocated_catalog_term(::std::string* catalog_term);
+
+  // repeated .exec.user.CollateSupport collate_support = 6;
+  inline int collate_support_size() const;
+  inline void clear_collate_support();
+  static const int kCollateSupportFieldNumber = 6;
+  inline ::exec::user::CollateSupport collate_support(int index) const;
+  inline void set_collate_support(int index, ::exec::user::CollateSupport value);
+  inline void add_collate_support(::exec::user::CollateSupport value);
+  inline const ::google::protobuf::RepeatedField<int>& collate_support() const;
+  inline ::google::protobuf::RepeatedField<int>* mutable_collate_support();
+
+  // optional bool column_aliasing_supported = 7;
+  inline bool has_column_aliasing_supported() const;
+  inline void clear_column_aliasing_supported();
+  static const int kColumnAliasingSupportedFieldNumber = 7;
+  inline bool column_aliasing_supported() const;
+  inline void set_column_aliasing_supported(bool value);
+
+  // repeated .exec.user.ConvertSupport convert_support = 8;
+  inline int convert_support_size() const;
+  inline void clear_convert_support();
+  static const int kConvertSupportFieldNumber = 8;
+  inline const ::exec::user::ConvertSupport& convert_support(int index) const;
+  inline ::exec::user::ConvertSupport* mutable_convert_support(int index);
+  inline ::exec::user::ConvertSupport* add_convert_support();
+  inline const ::google::protobuf::RepeatedPtrField< ::exec::user::ConvertSupport >&
+      convert_support() const;
+  inline ::google::protobuf::RepeatedPtrField< ::exec::user::ConvertSupport >*
+      mutable_convert_support();
+
+  // optional .exec.user.CorrelationNamesSupport correlation_names_support = 9;
+  inline bool has_correlation_names_support() const;
+  inline void clear_correlation_names_support();
+  static const int kCorrelationNamesSupportFieldNumber = 9;
+  inline ::exec::user::CorrelationNamesSupport correlation_names_support() const;
+  inline void set_correlation_names_support(::exec::user::CorrelationNamesSupport value);
+
+  // repeated string date_time_functions = 10;
+  inline int date_time_functions_size() const;
+  inline void clear_date_time_functions();
+  static const int kDateTimeFunctionsFieldNumber = 10;
+  inline const ::std::string& date_time_functions(int index) const;
+  inline ::std::string* mutable_date_time_functions(int index);
+  inline void set_date_time_functions(int index, const ::std::string& value);
+  inline void set_date_time_functions(int index, const char* value);
+  inline void set_date_time_functions(int index, const char* value, size_t size);
+  inline ::std::string* add_date_time_functions();
+  inline void add_date_time_functions(const ::std::string& value);
+  inline void add_date_time_functions(const char* value);
+  inline void add_date_time_functions(const char* value, size_t size);
+  inline const ::google::protobuf::RepeatedPtrField< ::std::string>& date_time_functions() const;
+  inline ::google::protobuf::RepeatedPtrField< ::std::string>* mutable_date_time_functions();
+
+  // repeated .exec.user.DateTimeLiteralsSupport date_time_literals_support = 11;
+  inline int date_time_literals_support_size() const;
+  inline void clear_date_time_literals_support();
+  static const int kDateTimeLiteralsSupportFieldNumber = 11;
+  inline ::exec::user::DateTimeLiteralsSupport date_time_literals_support(int index) const;
+  inline void set_date_time_literals_support(int index, ::exec::user::DateTimeLiteralsSupport value);
+  inline void add_date_time_literals_support(::exec::user::DateTimeLiteralsSupport value);
+  inline const ::google::protobuf::RepeatedField<int>& date_time_literals_support() const;
+  inline ::google::protobuf::RepeatedField<int>* mutable_date_time_literals_support();
+
+  // optional .exec.user.GroupBySupport group_by_support = 12;
+  inline bool has_group_by_support() const;
+  inline void clear_group_by_support();
+  static const int kGroupBySupportFieldNumber = 12;
+  inline ::exec::user::GroupBySupport group_by_support() const;
+  inline void set_group_by_support(::exec::user::GroupBySupport value);
+
+  // optional .exec.user.IdentifierCasing identifier_casing = 13;
+  inline bool has_identifier_casing() const;
+  inline void clear_identifier_casing();
+  static const int kIdentifierCasingFieldNumber = 13;
+  inline ::exec::user::IdentifierCasing identifier_casing() const;
+  inline void set_identifier_casing(::exec::user::IdentifierCasing value);
+
+  // optional string identifier_quote_string = 14;
+  inline bool has_identifier_quote_string() const;
+  inline void clear_identifier_quote_string();
+  static const int kIdentifierQuoteStringFieldNumber = 14;
+  inline const ::std::string& identifier_quote_string() const;
+  inline void set_identifier_quote_string(const ::std::string& value);
+  inline void set_identifier_quote_string(const char* value);
+  inline void set_identifier_quote_string(const char* value, size_t size);
+  inline ::std::string* mutable_identifier_quote_string();
+  inline ::std::string* release_identifier_quote_string();
+  inline void set_allocated_identifier_quote_string(::std::string* identifier_quote_string);
+
+  // optional bool like_escape_clause_supported = 15;
+  inline bool has_like_escape_clause_supported() const;
+  inline void clear_like_escape_clause_supported();
+  static const int kLikeEscapeClauseSupportedFieldNumber = 15;
+  inline bool like_escape_clause_supported() const;
+  inline void set_like_escape_clause_supported(bool value);
+
+  // optional uint32 max_binary_literal_length = 16;
+  inline bool has_max_binary_literal_length() const;
+  inline void clear_max_binary_literal_length();
+  static const int kMaxBinaryLiteralLengthFieldNumber = 16;
+  inline ::google::protobuf::uint32 max_binary_literal_length() const;
+  inline void set_max_binary_literal_length(::google::protobuf::uint32 value);
+
+  // optional uint32 max_catalog_name_length = 17;
+  inline bool has_max_catalog_name_length() const;
+  inline void clear_max_catalog_name_length();
+  static const int kMaxCatalogNameLengthFieldNumber = 17;
+  inline ::google::protobuf::uint32 max_catalog_name_length() const;
+  inline void set_max_catalog_name_length(::google::protobuf::uint32 value);
+
+  // optional uint32 max_char_literal_length = 18;
+  inline bool has_max_char_literal_length() const;
+  inline void clear_max_char_literal_length();
+  static const int kMaxCharLiteralLengthFieldNumber = 18;
+  inline ::google::protobuf::uint32 max_char_literal_length() const;
+  inline void set_max_char_literal_length(::google::protobuf::uint32 value);
+
+  // optional uint32 max_column_name_length = 19;
+  inline bool has_max_column_name_length() const;
+  inline void clear_max_column_name_length();
+  static const int kMaxColumnNameLengthFieldNumber = 19;
+  inline ::google::protobuf::uint32 max_column_name_length() const;
+  inline void set_max_column_name_length(::google::protobuf::uint32 value);
+
+  // optional uint32 max_columns_in_group_by = 20;
+  inline bool has_max_columns_in_group_by() const;
+  inline void clear_max_columns_in_group_by();
+  static const int kMaxColumnsInGroupByFieldNumber = 20;
+  inline ::google::protobuf::uint32 max_columns_in_group_by() const;
+  inline void set_max_columns_in_group_by(::google::protobuf::uint32 value);
+
+  // optional uint32 max_columns_in_order_by = 21;
+  inline bool has_max_columns_in_order_by() const;
+  inline void clear_max_columns_in_order_by();
+  static const int kMaxColumnsInOrderByFieldNumber = 21;
+  inline ::google::protobuf::uint32 max_columns_in_order_by() const;
+  inline void set_max_columns_in_order_by(::google::protobuf::uint32 value);
+
+  // optional uint32 max_columns_in_select = 22;
+  inline bool has_max_columns_in_select() const;
+  inline void clear_max_columns_in_select();
+  static const int kMaxColumnsInSelectFieldNumber = 22;
+  inline ::google::protobuf::uint32 max_columns_in_select() const;
+  inline void set_max_columns_in_select(::google::protobuf::uint32 value);
+
+  // optional uint32 max_cursor_name_length = 23;
+  inline bool has_max_cursor_name_length() const;
+  inline void clear_max_cursor_name_length();
+  static const int kMaxCursorNameLengthFieldNumber = 23;
+  inline ::google::protobuf::uint32 max_cursor_name_length() const;
+  inline void set_max_cursor_name_length(::google::protobuf::uint32 value);
+
+  // optional uint32 max_logical_lob_size = 24;
+  inline bool has_max_logical_lob_size() const;
+  inline void clear_max_logical_lob_size();
+  static const int kMaxLogicalLobSizeFieldNumber = 24;
+  inline ::google::protobuf::uint32 max_logical_lob_size() const;
+  inline void set_max_logical_lob_size(::google::protobuf::uint32 value);
+
+  // optional uint32 max_row_size = 25;
+  inline bool has_max_row_size() const;
+  inline void clear_max_row_size();
+  static const int kMaxRowSizeFieldNumber = 25;
+  inline ::google::protobuf::uint32 max_row_size() const;
+  inline void set_max_row_size(::google::protobuf::uint32 value);
+
+  // optional uint32 max_schema_name_length = 26;
+  inline bool has_max_schema_name_length() const;
+  inline void clear_max_schema_name_length();
+  static const int kMaxSchemaNameLengthFieldNumber = 26;
+  inline ::google::protobuf::uint32 max_schema_name_length() const;
+  inline void set_max_schema_name_length(::google::protobuf::uint32 value);
+
+  // optional uint32 max_statement_length = 27;
+  inline bool has_max_statement_length() const;
+  inline void clear_max_statement_length();
+  static const int kMaxStatementLengthFieldNumber = 27;
+  inline ::google::protobuf::uint32 max_statement_length() const;
+  inline void set_max_statement_length(::google::protobuf::uint32 value);
+
+  // optional uint32 max_statements = 28;
+  inline bool has_max_statements() const;
+  inline void clear_max_statements();
+  static const int kMaxStatementsFieldNumber = 28;
+  inline ::google::protobuf::uint32 max_statements() const;
+  inline void set_max_statements(::google::protobuf::uint32 value);
+
+  // optional uint32 max_table_name_length = 29;
+  inline bool has_max_table_name_length() const;
+  inline void clear_max_table_name_length();
+  static const int kMaxTableNameLengthFieldNumber = 29;
+  inline ::google::protobuf::uint32 max_table_name_length() const;
+  inline void set_max_table_name_length(::google::protobuf::uint32 value);
+
+  // optional uint32 max_tables_in_select = 30;
+  inline bool has_max_tables_in_select() const;
+  inline void clear_max_tables_in_select();
+  static const int kMaxTablesInSelectFieldNumber = 30;
+  inline ::google::protobuf::uint32 max_tables_in_select() const;
+  inline void set_max_tables_in_select(::google::protobuf::uint32 value);
+
+  // optional uint32 max_user_name_length = 31;
+  inline bool has_max_user_name_length() const;
+  inline void clear_max_user_name_length();
+  static const int kMaxUserNameLengthFieldNumber = 31;
+  inline ::google::protobuf::uint32 max_user_name_length() const;
+  inline void set_max_user_name_length(::google::protobuf::uint32 value);
+
+  // optional .exec.user.NullCollation null_collation = 32;
+  inline bool has_null_collation() const;
+  inline void clear_null_collation();
+  static const int kNullCollationFieldNumber = 32;
+  inline ::exec::user::NullCollation null_collation() const;
+  inline void set_null_collation(::exec::user::NullCollation value);
+
+  // optional bool null_plus_non_null_equals_null = 33;
+  inline bool has_null_plus_non_null_equals_null() const;
+  inline void clear_null_plus_non_null_equals_null();
+  static const int kNullPlusNonNullEqualsNullFieldNumber = 33;
+  inline bool null_plus_non_null_equals_null() const;
+  inline void set_null_plus_non_null_equals_null(bool value);
+
+  // repeated string numeric_functions = 34;
+  inline int numeric_functions_size() const;
+  inline void clear_numeric_functions();
+  static const int kNumericFunctionsFieldNumber = 34;
+  inline const ::std::string& numeric_functions(int index) const;
+  inline ::std::string* mutable_numeric_functions(int index);
+  inline void set_numeric_functions(int index, const ::std::string& value);
+  inline void set_numeric_functions(int index, const char* value);
+  inline void set_numeric_functions(int index, const char* value, size_t size);
+  inline ::std::string* add_numeric_functions();
+  inline void add_numeric_functions(const ::std::string& value);
+  inline void add_numeric_functions(const char* value);
+  inline void add_numeric_functions(const char* value, size_t size);
+  inline const ::google::protobuf::RepeatedPtrField< ::std::string>& numeric_functions() const;
+  inline ::google::protobuf::RepeatedPtrField< ::std::string>* mutable_numeric_functions();
+
+  // repeated .exec.user.OrderBySupport order_by_support = 35;
+  inline int order_by_support_size() const;
+  inline void clear_order_by_support();
+  static const int kOrderBySupportFieldNumber = 35;
+  inline ::exec::user::OrderBySupport order_by_support(int index) const;
+  inline void set_order_by_support(int index, ::exec::user::OrderBySupport value);
+  inline void add_order_by_support(::exec::user::OrderBySupport value);
+  inline const ::google::protobuf::RepeatedField<int>& order_by_support() const;
+  inline ::google::protobuf::RepeatedField<int>* mutable_order_by_support();
+
+  // repeated .exec.user.OuterJoinSupport outer_join_support = 36;
+  inline int outer_join_support_size() const;
+  inline void clear_outer_join_support();
+  static const int kOuterJoinSupportFieldNumber = 36;
+  inline ::exec::user::OuterJoinSupport outer_join_support(int index) const;
+  inline void set_outer_join_support(int index, ::exec::user::OuterJoinSupport value);
+  inline void add_outer_join_support(::exec::user::OuterJoinSupport value);
+  inline const ::google::protobuf::RepeatedField<int>& outer_join_support() const;
+  inline ::google::protobuf::RepeatedField<int>* mutable_outer_join_support();
+
+  // optional .exec.user.IdentifierCasing quoted_identifier_casing = 37;
+  inline bool has_quoted_identifier_casing() const;
+  inline void clear_quoted_identifier_casing();
+  static const int kQuotedIdentifierCasingFieldNumber = 37;
+  inline ::exec::user::IdentifierCasing quoted_identifier_casing() const;
+  inline void set_quoted_identifier_casing(::exec::user::IdentifierCasing value);
+
+  // optional bool read_only = 38;
+  inline bool has_read_only() const;
+  inline void clear_read_only();
+  static const int kReadOnlyFieldNumber = 38;
+  inline bool read_only() const;
+  inline void set_read_only(bool value);
+
+  // optional string schema_term = 39;
+  inline bool has_schema_term() const;
+  inline void clear_schema_term();
+  static const int kSchemaTermFieldNumber = 39;
+  inline const ::std::string& schema_term() const;
+  inline void set_schema_term(const ::std::string& value);
+  inline void set_schema_term(const char* value);
+  inline void set_schema_term(const char* value, size_t size);
+  inline ::std::string* mutable_schema_term();
+  inline ::std::string* release_schema_term();
+  inline void set_allocated_schema_term(::std::string* schema_term);
+
+  // optional string search_escape_string = 40;
+  inline bool has_search_escape_string() const;
+  inline void clear_search_escape_string();
+  static const int kSearchEscapeStringFieldNumber = 40;
+  inline const ::std::string& search_escape_string() const;
+  inline void set_search_escape_string(const ::std::string& value);
+  inline void set_search_escape_string(const char* value);
+  inline void set_search_escape_string(const char* value, size_t size);
+  inline ::std::string* mutable_search_escape_string();
+  inline ::std::string* release_search_escape_string();
+  inline void set_allocated_search_escape_string(::std::string* search_escape_string);
+
+  // optional bool select_for_update_supported = 41;
+  inline bool has_select_for_update_supported() const;
+  inline void clear_select_for_update_supported();
+  static const int kSelectForUpdateSupportedFieldNumber = 41;
+  inline bool select_for_update_supported() const;
+  inline void set_select_for_update_supported(bool value);
+
+  // optional string special_characters = 42;
+  inline bool has_special_characters() const;
+  inline void clear_special_characters();
+  static const int kSpecialCharactersFieldNumber = 42;
+  inline const ::std::string& special_characters() const;
+  inline void set_special_characters(const ::std::string& value);
+  inline void set_special_characters(const char* value);
+  inline void set_special_characters(const char* value, size_t size);
+  inline ::std::string* mutable_special_characters();
+  inline ::std::string* release_special_characters();
+  inline void set_allocated_special_characters(::std::string* special_characters);
+
+  // repeated string sql_keywords = 43;
+  inline int sql_keywords_size() const;
+  inline void clear_sql_keywords();
+  static const int kSqlKeywordsFieldNumber = 43;
+  inline const ::std::string& sql_keywords(int index) const;
+  inline ::std::string* mutable_sql_keywords(int index);
+  inline void set_sql_keywords(int index, const ::std::string& value);
+  inline void set_sql_keywords(int index, const char* value);
+  inline void set_sql_keywords(int index, const char* value, size_t size);
+  inline ::std::string* add_sql_keywords();
+  inline void add_sql_keywords(const ::std::string& value);
+  inline void add_sql_keywords(const char* value);
+  inline void add_sql_keywords(const char* value, size_t size);
+  inline const ::google::protobuf::RepeatedPtrField< ::std::string>& sql_keywords() const;
+  inline ::google::protobuf::RepeatedPtrField< ::std::string>* mutable_sql_keywords();
+
+  // repeated string string_functions = 44;
+  inline int string_functions_size() const;
+  inline void clear_string_functions();
+  static const int kStringFunctionsFieldNumber = 44;
+  inline const ::std::string& string_functions(int index) const;
+  inline ::std::string* mutable_string_functions(int index);
+  inline void set_string_functions(int index, const ::std::string& value);
+  inline void set_string_functions(int index, const char* value);
+  inline void set_string_functions(int index, const char* value, size_t size);
+  inline ::std::string* add_string_functions();
+  inline void add_string_functions(const ::std::string& value);
+  inline void add_string_functions(const char* value);
+  inline void add_string_functions(const char* value, size_t size);
+  inline const ::google::protobuf::RepeatedPtrField< ::std::string>& string_functions() const;
+  inline ::google::protobuf::RepeatedPtrField< ::std::string>* mutable_string_functions();
+
+  // repeated .exec.user.SubQuerySupport subquery_support = 45;
+  inline int subquery_support_size() const;
+  inline void clear_subquery_support();
+  static const int kSubquerySupportFieldNumber = 45;
+  inline ::exec::user::SubQuerySupport subquery_support(int index) const;
+  inline void set_subquery_support(int index, ::exec::user::SubQuerySupport value);
+  inline void add_subquery_support(::exec::user::SubQuerySupport value);
+  inline const ::google::protobuf::RepeatedField<int>& subquery_support() const;
+  inline ::google::protobuf::RepeatedField<int>* mutable_subquery_support();
+
+  // repeated string system_functions = 46;
+  inline int system_functions_size() const;
+  inline void clear_system_functions();
+  static const int kSystemFunctionsFieldNumber = 46;
+  inline const ::std::string& system_functions(int index) const;
+  inline ::std::string* mutable_system_functions(int index);
+  inline void set_system_functions(int index, const ::std::string& value);
+  inline void set_system_functions(int index, const char* value);
+  inline void set_system_functions(int index, const char* value, size_t size);
+  inline ::std::string* add_system_functions();
+  inline void add_system_functions(const ::std::string& value);
+  inline void add_system_functions(const char* value);
+  inline void add_system_functions(const char* value, size_t size);
+  inline const ::google::protobuf::RepeatedPtrField< ::std::string>& system_functions() const;
+  inline ::google::protobuf::RepeatedPtrField< ::std::string>* mutable_system_functions();
+
+  // optional string table_term = 47;
+  inline bool has_table_term() const;
+  inline void clear_table_term();
+  static const int kTableTermFieldNumber = 47;
+  inline const ::std::string& table_term() const;
+  inline void set_table_term(const ::std::string& value);
+  inline void set_table_term(const char* value);
+  inline void set_table_term(const char* value, size_t size);
+  inline ::std::string* mutable_table_term();
+  inline ::std::string* release_table_term();
+  inline void set_allocated_table_term(::std::string* table_term);
+
+  // optional bool transaction_supported = 48;
+  inline bool has_transaction_supported() const;
+  inline void clear_transaction_supported();
+  static const int kTransactionSupportedFieldNumber = 48;
+  inline bool transaction_supported() const;
+  inline void set_transaction_supported(bool value);
+
+  // repeated .exec.user.UnionSupport union_support = 49;
+  inline int union_support_size() const;
+  inline void clear_union_support();
+  static const int kUnionSupportFieldNumber = 49;
+  inline ::exec::user::UnionSupport union_support(int index) const;
+  inline void set_union_support(int index, ::exec::user::UnionSupport value);
+  inline void add_union_support(::exec::user::UnionSupport value);
+  inline const ::google::protobuf::RepeatedField<int>& union_support() const;
+  inline ::google::protobuf::RepeatedField<int>* mutable_union_support();
+
+  // @@protoc_insertion_point(class_scope:exec.user.ServerMeta)
+ private:
+  inline void set_has_all_tables_selectable();
+  inline void clear_has_all_tables_selectable();
+  inline void set_has_blob_included_in_max_row_size();
+  inline void clear_has_blob_included_in_max_row_size();
+  inline void set_has_catalog_at_start();
+  inline void clear_has_catalog_at_start();
+  inline void set_has_catalog_separator();
+  inline void clear_has_catalog_separator();
+  inline void set_has_catalog_term();
+  inline void clear_has_catalog_term();
+  inline void set_has_column_aliasing_supported();
+  inline void clear_has_column_aliasing_supported();
+  inline void set_has_correlation_names_support();
+  inline void clear_has_correlation_names_support();
+  inline void set_has_group_by_support();
+  inline void clear_has_group_by_support();
+  inline void set_has_identifier_casing();
+  inline void clear_has_identifier_casing();
+  inline void set_has_identifier_quote_string();
+  inline void clear_has_identifier_quote_string();
+  inline void set_has_like_escape_clause_supported();
+  inline void clear_has_like_escape_clause_supported();
+  inline void set_has_max_binary_literal_length();
+  inline void clear_has_max_binary_literal_length();
+  inline void set_has_max_catalog_name_length();
+  inline void clear_has_max_catalog_name_length();
+  inline void set_has_max_char_literal_length();
+  inline void clear_has_max_char_literal_length();
+  inline void set_has_max_column_name_length();
+  inline void clear_has_max_column_name_length();
+  inline void set_has_max_columns_in_group_by();
+  inline void clear_has_max_columns_in_group_by();
+  inline void set_has_max_columns_in_order_by();
+  inline void clear_has_max_columns_in_order_by();
+  inline void set_has_max_columns_in_select();
+  inline void clear_has_max_columns_in_select();
+  inline void set_has_max_cursor_name_length();
+  inline void clear_has_max_cursor_name_length();
+  inline void set_has_max_logical_lob_size();
+  inline void clear_has_max_logical_lob_size();
+  inline void set_has_max_row_size();
+  inline void clear_has_max_row_size();
+  inline void set_has_max_schema_name_length();
+  inline void clear_has_max_schema_name_length();
+  inline void set_has_max_statement_length();
+  inline void clear_has_max_statement_length();
+  inline void set_has_max_statements();
+  inline void clear_has_max_statements();
+  inline void set_has_max_table_name_length();
+  inline void clear_has_max_table_name_length();
+  inline void set_has_max_tables_in_select();
+  inline void clear_has_max_tables_in_select();
+  inline void set_has_max_user_name_length();
+  inline void clear_has_max_user_name_length();
+  inline void set_has_null_collation();
+  inline void clear_has_null_collation();
+  inline void set_has_null_plus_non_null_equals_null();
+  inline void clear_has_null_plus_non_null_equals_null();
+  inline void set_has_quoted_identifier_casing();
+  inline void clear_has_quoted_identifier_casing();
+  inline void set_has_read_only();
+  inline void clear_has_read_only();
+  inline void set_has_schema_term();
+  inline void clear_has_schema_term();
+  inline void set_has_search_escape_string();
+  inline void clear_has_search_escape_string();
+  inline void set_has_select_for_update_supported();
+  inline void clear_has_select_for_update_supported();
+  inline void set_has_special_characters();
+  inline void clear_has_special_characters();
+  inline void set_has_table_term();
+  inline void clear_has_table_term();
+  inline void set_has_transaction_supported();
+  inline void clear_has_transaction_supported();
+
+  ::google::protobuf::UnknownFieldSet _unknown_fields_;
+
+  ::std::string* catalog_separator_;
+  bool all_tables_selectable_;
+  bool blob_included_in_max_row_size_;
+  bool catalog_at_start_;
+  bool column_aliasing_supported_;
+  int correlation_names_support_;
+  ::std::string* catalog_term_;
+  ::google::protobuf::RepeatedField<int> collate_support_;
+  ::google::protobuf::RepeatedPtrField< ::exec::user::ConvertSupport > convert_support_;
+  ::google::protobuf::RepeatedPtrField< ::std::string> date_time_functions_;
+  ::google::protobuf::RepeatedField<int> date_time_literals_support_;
+  int group_by_support_;
+  int identifier_casing_;
+  ::std::string* identifier_quote_string_;
+  ::google::protobuf::uint32 max_binary_literal_length_;
+  ::google::protobuf::uint32 max_catalog_name_length_;
+  ::google::protobuf::uint32 max_char_literal_length_;
+  ::google::protobuf::uint32 max_column_name_length_;
+  ::google::protobuf::uint32 max_columns_in_group_by_;
+  ::google::protobuf::uint32 max_columns_in_order_by_;
+  ::google::protobuf::uint32 max_columns_in_select_;
+  ::google::protobuf::uint32 max_cursor_name_length_;
+  ::google::protobuf::uint32 max_logical_lob_size_;
+  ::google::protobuf::uint32 max_row_size_;
+  ::google::protobuf::uint32 max_schema_name_length_;
+  ::google::protobuf::uint32 max_statement_length_;
+  ::google::protobuf::uint32 max_statements_;
+  ::google::protobuf::uint32 max_table_name_length_;
+  ::google::protobuf::uint32 max_tables_in_select_;
+  ::google::protobuf::uint32 max_user_name_length_;
+  bool like_escape_clause_supported_;
+  bool null_plus_non_null_equals_null_;
+  bool read_only_;
+  bool select_for_update_supported_;
+  int null_collation_;
+  ::google::protobuf::RepeatedPtrField< ::std::string> numeric_functions_;
+  ::google::protobuf::RepeatedField<int> order_by_support_;
+  ::google::protobuf::RepeatedField<int> outer_join_support_;
+  ::std::string* schema_term_;
+  ::std::string* search_escape_string_;
+  ::std::string* special_characters_;
+  int quoted_identifier_casing_;
+  bool transaction_supported_;
+  ::google::protobuf::RepeatedPtrField< ::std::string> sql_keywords_;
+  ::google::protobuf::RepeatedPtrField< ::std::string> string_functions_;
+  ::google::protobuf::RepeatedField<int> subquery_support_;
+  ::google::protobuf::RepeatedPtrField< ::std::string> system_functions_;
+  ::std::string* table_term_;
+  ::google::protobuf::RepeatedField<int> union_support_;
+
+  mutable int _cached_size_;
+  ::google::protobuf::uint32 _has_bits_[(49 + 31) / 32];
+
+  friend void  protobuf_AddDesc_User_2eproto();
+  friend void protobuf_AssignDesc_User_2eproto();
+  friend void protobuf_ShutdownFile_User_2eproto();
+
+  void InitAsDefaultInstance();
+  static ServerMeta* default_instance_;
+};
+// -------------------------------------------------------------------
+
+class RunQuery : public ::google::protobuf::Message {
+ public:
+  RunQuery();
+  virtual ~RunQuery();
+
+  RunQuery(const RunQuery& from);
+
+  inline RunQuery& operator=(const RunQuery& from) {
+    CopyFrom(from);
+    return *this;
+  }
+
+  inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const {
+    return _unknown_fields_;
+  }
+
+  inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() {
+    return &_unknown_fields_;
+  }
+
+  static const ::google::protobuf::Descriptor* descriptor();
+  static const RunQuery& default_instance();
+
+  void Swap(RunQuery* other);
+
+  // implements Message ----------------------------------------------
+
+  RunQuery* New() const;
+  void CopyFrom(const ::google::protobuf::Message& from);
+  void MergeFrom(const ::google::protobuf::Message& from);
+  void CopyFrom(const RunQuery& from);
+  void MergeFrom(const RunQuery& from);
+  void Clear();
+  bool IsInitialized() const;
+
+  int ByteSize() const;
+  bool MergePartialFromCodedStream(
+      ::google::protobuf::io::CodedInputStream* input);
+  void SerializeWithCachedSizes(
+      ::google::protobuf::io::CodedOutputStream* output) const;
+  ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const;
+  int GetCachedSize() const { return _cached_size_; }
+  private:
+  void SharedCtor();
+  void SharedDtor();
+  void SetCachedSize(int size) const;
+  public:
+
+  ::google::protobuf::Metadata GetMetadata() const;
+
+  // nested types ----------------------------------------------------
+
+  // accessors -------------------------------------------------------
+
+  // optional .exec.user.QueryResultsMode results_mode = 1;
+  inline bool has_results_mode() const;
+  inline void clear_results_mode();
+  static const int kResultsModeFieldNumber = 1;
+  inline ::exec::user::QueryResultsMode results_mode() const;
+  inline void set_results_mode(::exec::user::QueryResultsMode value);
+
+  // optional .exec.shared.QueryType type = 2;
+  inline bool has_type() const;
+  inline void clear_type();
+  static const int kTypeFieldNumber = 2;
+  inline ::exec::shared::QueryType type() const;
+  inline void set_type(::exec::shared::QueryType value);
+
+  // optional string plan = 3;
+  inline bool has_plan() const;
+  inline void clear_plan();
+  static const int kPlanFieldNumber = 3;
+  inline const ::std::string& plan() const;
+  inline void set_plan(const ::std::string& value);
+  inline void set_plan(const char* value);
+  inline void set_plan(const char* value, size_t size);
+  inline ::std::string* mutable_plan();
+  inline ::std::string* release_plan();
+  inline void set_allocated_plan(::std::string* plan);
+
+  // repeated .exec.bit.control.PlanFragment fragments = 4;
+  inline int fragments_size() const;
+  inline void clear_fragments();
+  static const int kFragmentsFieldNumber = 4;
+  inline const ::exec::bit::control::PlanFragment& fragments(int index) const;
+  inline ::exec::bit::control::PlanFragment* mutable_fragments(int index);
+  inline ::exec::bit::control::PlanFragment* add_fragments();
+  inline const ::google::protobuf::RepeatedPtrField< ::exec::bit::control::PlanFragment >&
+      fragments() const;
+  inline ::google::protobuf::RepeatedPtrField< ::exec::bit::control::PlanFragment >*
+      mutable_fragments();
+
+  // optional .exec.user.PreparedStatementHandle prepared_statement_handle = 5;
+  inline bool has_prepared_statement_handle() const;
+  inline void clear_prepared_statement_handle();
+  static const int kPreparedStatementHandleFieldNumber = 5;
+  inline const ::exec::user::PreparedStatementHandle& prepared_statement_handle() const;
+  inline ::exec::user::PreparedStatementHandle* mutable_prepared_statement_handle();
+  inline ::exec::user::PreparedStatementHandle* release_prepared_statement_handle();
+  inline void set_allocated_prepared_statement_handle(::exec::user::PreparedStatementHandle* prepared_statement_handle);
+
+  // @@protoc_insertion_point(class_scope:exec.user.RunQuery)
+ private:
+  inline void set_has_results_mode();
+  inline void clear_has_results_mode();
+  inline void set_has_type();
+  inline void clear_has_type();
+  inline void set_has_plan();
+  inline void clear_has_plan();
+  inline void set_has_prepared_statement_handle();
+  inline void clear_has_prepared_statement_handle();
+
+  ::google::protobuf::UnknownFieldSet _unknown_fields_;
+
+  int results_mode_;
+  int type_;
+  ::std::string* plan_;
+  ::google::protobuf::RepeatedPtrField< ::exec::bit::control::PlanFragment > fragments_;
+  ::exec::user::PreparedStatementHandle* prepared_statement_handle_;
+
+  mutable int _cached_size_;
+  ::google::protobuf::uint32 _has_bits_[(5 + 31) / 32];
+
+  friend void  protobuf_AddDesc_User_2eproto();
+  friend void protobuf_AssignDesc_User_2eproto();
+  friend void protobuf_ShutdownFile_User_2eproto();
+
+  void InitAsDefaultInstance();
+  static RunQuery* default_instance_;
+};
+// ===================================================================
+
+
+// ===================================================================
+
+// Property
+
+// required string key = 1;
+inline bool Property::has_key() const {
+  return (_has_bits_[0] & 0x00000001u) != 0;
+}
+inline void Property::set_has_key() {
+  _has_bits_[0] |= 0x00000001u;
+}
+inline void Property::clear_has_key() {
+  _has_bits_[0] &= ~0x00000001u;
+}
+inline void Property::clear_key() {
+  if (key_ != &::google::protobuf::internal::kEmptyString) {
+    key_->clear();
+  }
+  clear_has_key();
+}
+inline const ::std::string& Property::key() const {
+  return *key_;
+}
+inline void Property::set_key(const ::std::string& value) {
+  set_has_key();
+  if (key_ == &::google::protobuf::internal::kEmptyString) {
+    key_ = new ::std::string;
+  }
+  key_->assign(value);
+}
+inline void Property::set_key(const char* value) {
+  set_has_key();
+  if (key_ == &::google::protobuf::internal::kEmptyString) {
+    key_ = new ::std::string;
+  }
+  key_->assign(value);
+}
+inline void Property::set_key(const char* value, size_t size) {
+  set_has_key();
+  if (key_ == &::google::protobuf::internal::kEmptyString) {
+    key_ = new ::std::string;
+  }
+  key_->assign(reinterpret_cast<const char*>(value), size);
+}
+inline ::std::string* Property::mutable_key() {
+  set_has_key();
+  if (key_ == &::google::protobuf::internal::kEmptyString) {
+    key_ = new ::std::string;
+  }
+  return key_;
+}
+inline ::std::string* Property::release_key() {
+  clear_has_key();
+  if (key_ == &::google::protobuf::internal::kEmptyString) {
+    return NULL;
+  } else {
+    ::std::string* temp = key_;
+    key_ = const_cast< ::std::string*>(&::google::protobuf::internal::kEmptyString);
+    return temp;
+  }
+}
+inline void Property::set_allocated_key(::std::string* key) {
+  if (key_ != &::google::protobuf::internal::kEmptyString) {
+    delete key_;
+  }
+  if (key) {
+    set_has_key();
+    key_ = key;
+  } else {
+    clear_has_key();
+    key_ = const_cast< ::std::string*>(&::google::protobuf::internal::kEmptyString);
+  }
+}
+
+// required string value = 2;
+inline bool Property::has_value() const {
+  return (_has_bits_[0] & 0x00000002u) != 0;
+}
+inline void Property::set_has_value() {
+  _has_bits_[0] |= 0x00000002u;
+}
+inline void Property::clear_has_value() {
+  _has_bits_[0] &= ~0x00000002u;
+}
+inline void Property::clear_value() {
+  if (value_ != &::google::protobuf::internal::kEmptyString) {
+    value_->clear();
+  }
+  clear_has_value();
+}
 inline const ::std::string& Property::value() const {
   return *value_;
 }
-inline void Property::set_value(const ::std::string& value) {
-  set_has_value();
-  if (value_ == &::google::protobuf::internal::kEmptyString) {
-    value_ = new ::std::string;
+inline void Property::set_value(const ::std::string& value) {
+  set_has_value();
+  if (value_ == &::google::protobuf::internal::kEmptyString) {
+    value_ = new ::std::string;
+  }
+  value_->assign(value);
+}
+inline void Property::set_value(const char* value) {
+  set_has_value();
+  if (value_ == &::google::protobuf::internal::kEmptyString) {
+    value_ = new ::std::string;
+  }
+  value_->assign(value);
+}
+inline void Property::set_value(const char* value, size_t size) {
+  set_has_value();
+  if (value_ == &::google::protobuf::internal::kEmptyString) {
+    value_ = new ::std::string;
+  }
+  value_->assign(reinterpret_cast<const char*>(value), size);
+}
+inline ::std::string* Property::mutable_value() {
+  set_has_value();
+  if (value_ == &::google::protobuf::internal::kEmptyString) {
+    value_ = new ::std::string;
+  }
+  return value_;
+}
+inline ::std::string* Property::release_value() {
+  clear_has_value();
+  if (value_ == &::google::protobuf::internal::kEmptyString) {
+    return NULL;
+  } else {
+    ::std::string* temp = value_;
+    value_ = const_cast< ::std::string*>(&::google::protobuf::internal::kEmptyString);
+    return temp;
+  }
+}
+inline void Property::set_allocated_value(::std::string* value) {
+  if (value_ != &::google::protobuf::internal::kEmptyString) {
+    delete value_;
+  }
+  if (value) {
+    set_has_value();
+    value_ = value;
+  } else {
+    clear_has_value();
+    value_ = const_cast< ::std::string*>(&::google::protobuf::internal::kEmptyString);
+  }
+}
+
+// -------------------------------------------------------------------
+
+// UserProperties
+
+// repeated .exec.user.Property properties = 1;
+inline int UserProperties::properties_size() const {
+  return properties_.size();
+}
+inline void UserProperties::clear_properties() {
+  properties_.Clear();
+}
+inline const ::exec::user::Property& UserProperties::properties(int index) const {
+  return properties_.Get(index);
+}
+inline ::exec::user::Property* UserProperties::mutable_properties(int index) {
+  return properties_.Mutable(index);
+}
+inline ::exec::user::Property* UserProperties::add_properties() {
+  return properties_.Add();
+}
+inline const ::google::protobuf::RepeatedPtrField< ::exec::user::Property >&
+UserProperties::properties() const {
+  return properties_;
+}
+inline ::google::protobuf::RepeatedPtrField< ::exec::user::Property >*
+UserProperties::mutable_properties() {
+  return &properties_;
+}
+
+// -------------------------------------------------------------------
+
+// RpcEndpointInfos
+
+// optional string name = 1;
+inline bool RpcEndpointInfos::has_name() const {
+  return (_has_bits_[0] & 0x00000001u) != 0;
+}
+inline void RpcEndpointInfos::set_has_name() {
+  _has_bits_[0] |= 0x00000001u;
+}
+inline void RpcEndpointInfos::clear_has_name() {
+  _has_bits_[0] &= ~0x00000001u;
+}
+inline void RpcEndpointInfos::clear_name() {
+  if (name_ != &::google::protobuf::internal::kEmptyString) {
+    name_->clear();
+  }
+  clear_has_name();
+}
+inline const ::std::string& RpcEndpointInfos::name() const {
+  return *name_;
+}
+inline void RpcEndpointInfos::set_name(const ::std::string& value) {
+  set_has_name();
+  if (name_ == &::google::protobuf::internal::kEmptyString) {
+    name_ = new ::std::string;
+  }
+  name_->assign(value);
+}
+inline void RpcEndpointInfos::set_name(const char* value) {
+  set_has_name();
+  if (name_ == &::google::protobuf::internal::kEmptyString) {
+    name_ = new ::std::string;
+  }
+  name_->assign(value);
+}
+inline void RpcEndpointInfos::set_name(const char* value, size_t size) {
+  set_has_name();
+  if (name_ == &::google::protobuf::internal::kEmptyString) {
+    name_ = new ::std::string;
+  }
+  name_->assign(reinterpret_cast<const char*>(value), size);
+}
+inline ::std::string* RpcEndpointInfos::mutable_name() {
+  set_has_name();
+  if (name_ == &::google::protobuf::internal::kEmptyString) {
+    name_ = new ::std::string;
+  }
+  return name_;
+}
+inline ::std::string* RpcEndpointInfos::release_name() {
+  clear_has_name();
+  if (name_ == &::google::protobuf::internal::kEmptyString) {
+    return NULL;
+  } else {
+    ::std::string* temp = name_;
+    name_ = const_cast< ::std::string*>(&::google::protobuf::internal::kEmptyString);
+    return temp;
+  }
+}
+inline void RpcEndpointInfos::set_allocated_name(::std::string* name) {
+  if (name_ != &::google::protobuf::internal::kEmptyString) {
+    delete name_;
+  }
+  if (name) {
+    set_has_name();
+    name_ = name;
+  } else {
+    clear_has_name();
+    name_ = const_cast< ::std::string*>(&::google::protobuf::internal::kEmptyString);
+  }
+}
+
+// optional string version = 2;
+inline bool RpcEndpointInfos::has_version() const {
+  return (_has_bits_[0] & 0x00000002u) != 0;
+}
+inline void RpcEndpointInfos::set_has_version() {
+  _has_bits_[0] |= 0x00000002u;
+}
+inline void RpcEndpointInfos::clear_has_version() {
+  _has_bits_[0] &= ~0x00000002u;
+}
+inline void RpcEndpointInfos::clear_version() {
+  if (version_ != &::google::protobuf::internal::kEmptyString) {
+    version_->clear();
+  }
+  clear_has_version();
+}
+inline const ::std::string& RpcEndpointInfos::version() const {
+  return *version_;
+}
+inline void RpcEndpointInfos::set_version(const ::std::string& value) {
+  set_has_version();
+  if (version_ == &::google::protobuf::internal::kEmptyString) {
+    version_ = new ::std::string;
+  }
+  version_->assign(value);
+}
+inline void RpcEndpointInfos::set_version(const char* value) {
+  set_has_version();
+  if (version_ == &::google::protobuf::internal::kEmptyString) {
+    version_ = new ::std::string;
+  }
+  version_->assign(value);
+}
+inline void RpcEndpointInfos::set_version(const char* value, size_t size) {
+  set_has_version();
+  if (version_ == &::google::protobuf::internal::kEmptyString) {
+    version_ = new ::std::string;
+  }
+  version_->assign(reinterpret_cast<const char*>(value), size);
+}
+inline ::std::string* RpcEndpointInfos::mutable_version() {
+  set_has_version();
+  if (version_ == &::google::protobuf::internal::kEmptyString) {
+    version_ = new ::std::string;
+  }
+  return version_;
+}
+inline ::std::string* RpcEndpointInfos::release_version() {
+  clear_has_version();
+  if (version_ == &::google::protobuf::internal::kEmptyString) {
+    return NULL;
+  } else {
+    ::std::string* temp = version_;
+    version_ = const_cast< ::std::string*>(&::google::protobuf::internal::kEmptyString);
+    return temp;
+  }
+}
+inline void RpcEndpointInfos::set_allocated_version(::std::string* version) {
+  if (version_ != &::google::protobuf::internal::kEmptyString) {
+    delete version_;
+  }
+  if (version) {
+    set_has_version();
+    version_ = version;
+  } else {
+    clear_has_version();
+    version_ = const_cast< ::std::string*>(&::google::protobuf::internal::kEmptyString);
+  }
+}
+
+// optional uint32 majorVersion = 3;
+inline bool RpcEndpointInfos::has_majorversion() const {
+  return (_has_bits_[0] & 0x00000004u) != 0;
+}
+inline void RpcEndpointInfos::set_has_majorversion() {
+  _has_bits_[0] |= 0x00000004u;
+}
+inline void RpcEndpointInfos::clear_has_majorversion() {
+  _has_bits_[0] &= ~0x00000004u;
+}
+inline void RpcEndpointInfos::clear_majorversion() {
+  majorversion_ = 0u;
+  clear_has_majorversion();
+}
+inline ::google::protobuf::uint32 RpcEndpointInfos::majorversion() const {
+  return majorversion_;
+}
+inline void RpcEndpointInfos::set_majorversion(::google::protobuf::uint32 value) {
+  set_has_majorversion();
+  majorversion_ = value;
+}
+
+// optional uint32 minorVersion = 4;
+inline bool RpcEndpointInfos::has_minorversion() const {
+  return (_has_bits_[0] & 0x00000008u) != 0;
+}
+inline void RpcEndpointInfos::set_has_minorversion() {
+  _has_bits_[0] |= 0x00000008u;
+}
+inline void RpcEndpointInfos::clear_has_minorversion() {
+  _has_bits_[0] &= ~0x00000008u;
+}
+inline void RpcEndpointInfos::clear_minorversion() {
+  minorversion_ = 0u;
+  clear_has_minorversion();
+}
+inline ::google::protobuf::uint32 RpcEndpointInfos::minorversion() const {
+  return minorversion_;
+}
+inline void RpcEndpointInfos::set_minorversion(::google::protobuf::uint32 value) {
+  set_has_minorversion();
+  minorversion_ = value;
+}
+
+// optional uint32 patchVersion = 5;
+inline bool RpcEndpointInfos::has_patchversion() const {
+  return (_has_bits_[0] & 0x00000010u) != 0;
+}
+inline void RpcEndpointInfos::set_has_patchversion() {
+  _has_bits_[0] |= 0x00000010u;
+}
+inline void RpcEndpointInfos::clear_has_patchversion() {
+  _has_bits_[0] &= ~0x00000010u;
+}
+inline void RpcEndpointInfos::clear_patchversion() {
+  patchversion_ = 0u;
+  clear_has_patchversion();
+}
+inline ::google::protobuf::uint32 RpcEndpointInfos::patchversion() const {
+  return patchversion_;
+}
+inline void RpcEndpointInfos::set_patchversion(::google::protobuf::uint32 value) {
+  set_has_patchversion();
+  patchversion_ = value;
+}
+
+// optional string application = 6;
+inline bool RpcEndpointInfos::has_application() const {
+  return (_has_bits_[0] & 0x00000020u) != 0;
+}
+inline void RpcEndpointInfos::set_has_application() {
+  _has_bits_[0] |= 0x00000020u;
+}
+inline void RpcEndpointInfos::clear_has_application() {
+  _has_bits_[0] &= ~0x00000020u;
+}
+inline void RpcEndpointInfos::clear_application() {
+  if (application_ != &::google::protobuf::internal::kEmptyString) {
+    application_->clear();
+  }
+  clear_has_application();
+}
+inline const ::std::string& RpcEndpointInfos::application() const {
+  return *application_;
+}
+inline void RpcEndpointInfos::set_application(const ::std::string& value) {
+  set_has_application();
+  if (application_ == &::google::protobuf::internal::kEmptyString) {
+    application_ = new ::std::string;
+  }
+  application_->assign(value);
+}
+inline void RpcEndpointInfos::set_application(const char* value) {
+  set_has_application();
+  if (application_ == &::google::protobuf::internal::kEmptyString) {
+    application_ = new ::std::string;
+  }
+  application_->assign(value);
+}
+inline void RpcEndpointInfos::set_application(const char* value, size_t size) {
+  set_has_application();
+  if (application_ == &::google::protobuf::internal::kEmptyString) {
+    application_ = new ::std::string;
+  }
+  application_->assign(reinterpret_cast<const char*>(value), size);
+}
+inline ::std::string* RpcEndpointInfos::mutable_application() {
+  set_has_application();
+  if (application_ == &::google::protobuf::internal::kEmptyString) {
+    application_ = new ::std::string;
+  }
+  return application_;
+}
+inline ::std::string* RpcEndpointInfos::release_application() {
+  clear_has_application();
+  if (application_ == &::google::protobuf::internal::kEmptyString) {
+    return NULL;
+  } else {
+    ::std::string* temp = application_;
+    application_ = const_cast< ::std::string*>(&::google::protobuf::internal::kEmptyString);
+    return temp;
+  }
+}
+inline void RpcEndpointInfos::set_allocated_application(::std::string* application) {
+  if (application_ != &::google::protobuf::internal::kEmptyString) {
+    delete application_;
+  }
+  if (application) {
+    set_has_application();
+    application_ = application;
+  } else {
+    clear_has_application();
+    application_ = const_cast< ::std::string*>(&::google::protobuf::internal::kEmptyString);
+  }
+}
+
+// optional uint32 buildNumber = 7;
+inline bool RpcEndpointInfos::has_buildnumber() const {
+  return (_has_bits_[0] & 0x00000040u) != 0;
+}
+inline void RpcEndpointInfos::set_has_buildnumber() {
+  _has_bits_[0] |= 0x00000040u;
+}
+inline void RpcEndpointInfos::clear_has_buildnumber() {
+  _has_bits_[0] &= ~0x00000040u;
+}
+inline void RpcEndpointInfos::clear_buildnumber() {
+  buildnumber_ = 0u;
+  clear_has_buildnumber();
+}
+inline ::google::protobuf::uint32 RpcEndpointInfos::buildnumber() const {
+  return buildnumber_;
+}
+inline void RpcEndpointInfos::set_buildnumber(::google::protobuf::uint32 value) {
+  set_has_buildnumber();
+  buildnumber_ = value;
+}
+
+// optional string versionQualifier = 8;
+inline bool RpcEndpointInfos::has_versionqualifier() const {
+  return (_has_bits_[0] & 0x00000080u) != 0;
+}
+inline void RpcEndpointInfos::set_has_versionqualifier() {
+  _has_bits_[0] |= 0x00000080u;
+}
+inline void RpcEndpointInfos::clear_has_versionqualifier() {
+  _has_bits_[0] &= ~0x00000080u;
+}
+inline void RpcEndpointInfos::clear_versionqualifier() {
+  if (versionqualifier_ != &::google::protobuf::internal::kEmptyString) {
+    versionqualifier_->clear();
+  }
+  clear_has_versionqualifier();
+}
+inline const ::std::string& RpcEndpointInfos::versionqualifier() const {
+  return *versionqualifier_;
+}
+inline void RpcEndpointInfos::set_versionqualifier(const ::std::string& value) {
+  set_has_versionqualifier();
+  if (versionqualifier_ == &::google::protobuf::internal::kEmptyString) {
+    versionqualifier_ = new ::std::string;
+  }
+  versionqualifier_->assign(value);
+}
+inline void RpcEndpointInfos::set_versionqualifier(const char* value) {
+  set_has_versionqualifier();
+  if (versionqualifier_ == &::google::protobuf::internal::kEmptyString) {
+    versionqualifier_ = new ::std::string;
+  }
+  versionqualifier_->assign(value);
+}
+inline void RpcEndpointInfos::set_versionqualifier(const char* value, size_t size) {
+  set_has_versionqualifier();
+  if (versionqualifier_ == &::google::protobuf::internal::kEmptyString) {
+    versionqualifier_ = new ::std::string;
+  }
+  versionqualifier_->assign(reinterpret_cast<const char*>(value), size);
+}
+inline ::std::string* RpcEndpointInfos::mutable_versionqualifier() {
+  set_has_versionqualifier();
+  if (versionqualifier_ == &::google::protobuf::internal::kEmptyString) {
+    versionqualifier_ = new ::std::string;
+  }
+  return versionqualifier_;
+}
+inline ::std::string* RpcEndpointInfos::release_versionqualifier() {
+  clear_has_versionqualifier();
+  if (versionqualifier_ == &::google::protobuf::internal::kEmptyString) {
+    return NULL;
+  } else {
+    ::std::string* temp = versionqualifier_;
+    versionqualifier_ = const_cast< ::std::string*>(&::google::protobuf::internal::kEmptyString);
+    return temp;
+  }
+}
+inline void RpcEndpointInfos::set_allocated_versionqualifier(::std::string* versionqualifier) {
+  if (versionqualifier_ != &::google::protobuf::internal::kEmptyString) {
+    delete versionqualifier_;
+  }
+  if (versionqualifier) {
+    set_has_versionqualifier();
+    versionqualifier_ = versionqualifier;
+  } else {
+    clear_has_versionqualifier();
+    versionqualifier_ = const_cast< ::std::string*>(&::google::protobuf::internal::kEmptyString);
+  }
+}
+
+// -------------------------------------------------------------------
+
+// UserToBitHandshake
+
+// optional .exec.shared.RpcChannel channel = 1 [default = USER];
+inline bool UserToBitHandshake::has_channel() const {
+  return (_has_bits_[0] & 0x00000001u) != 0;
+}
+inline void UserToBitHandshake::set_has_channel() {
+  _has_bits_[0] |= 0x00000001u;
+}
+inline void UserToBitHandshake::clear_has_channel() {
+  _has_bits_[0] &= ~0x00000001u;
+}
+inline void UserToBitHandshake::clear_channel() {
+  channel_ = 2;
+  clear_has_channel();
+}
+inline ::exec::shared::RpcChannel UserToBitHandshake::channel() const {
+  return static_cast< ::exec::shared::RpcChannel >(channel_);
+}
+inline void UserToBitHandshake::set_channel(::exec::shared::RpcChannel value) {
+  assert(::exec::shared::RpcChannel_IsValid(value));
+  set_has_channel();
+  channel_ = value;
+}
+
+// optional bool support_listening = 2;
+inline bool UserToBitHandshake::has_support_listening() const {
+  return (_has_bits_[0] & 0x00000002u) != 0;
+}
+inline void UserToBitHandshake::set_has_support_listening() {
+  _has_bits_[0] |= 0x00000002u;
+}
+inline void UserToBitHandshake::clear_has_support_listening() {
+  _has_bits_[0] &= ~0x00000002u;
+}
+inline void UserToBitHandshake::clear_support_listening() {
+  support_listening_ = false;
+  clear_has_support_listening();
+}
+inline bool UserToBitHandshake::support_listening() const {
+  return support_listening_;
+}
+inline void UserToBitHandshake::set_support_listening(bool value) {
+  set_has_support_listening();
+  support_listening_ = value;
+}
+
+// optional int32 rpc_version = 3;
+inline bool UserToBitHandshake::has_rpc_version() const {
+  return (_has_bits_[0] & 0x00000004u) != 0;
+}
+inline void UserToBitHandshake::set_has_rpc_version() {
+  _has_bits_[0] |= 0x00000004u;
+}
+inline void UserToBitHandshake::clear_has_rpc_version() {
+  _has_bits_[0] &= ~0x00000004u;
+}
+inline void UserToBitHandshake::clear_rpc_version() {
+  rpc_version_ = 0;
+  clear_has_rpc_version();
+}
+inline ::google::protobuf::int32 UserToBitHandshake::rpc_version() const {
+  return rpc_version_;
+}
+inline void UserToBitHandshake::set_rpc_version(::google::protobuf::int32 value) {
+  set_has_rpc_version();
+  rpc_version_ = value;
+}
+
+// optional .exec.shared.UserCredentials credentials = 4;
+inline bool UserToBitHandshake::has_credentials() const {
+  return (_has_bits_[0] & 0x00000008u) != 0;
+}
+inline void UserToBitHandshake::set_has_credentials() {
+  _has_bits_[0] |= 0x00000008u;
+}
+inline void UserToBitHandshake::clear_has_credentials() {
+  _has_bits_[0] &= ~0x00000008u;
+}
+inline void UserToBitHandshake::clear_credentials() {
+  if (credentials_ != NULL) credentials_->::exec::shared::UserCredentials::Clear();
+  clear_has_credentials();
+}
+inline const ::exec::shared::UserCredentials& UserToBitHandshake::credentials() const {
+  return credentials_ != NULL ? *credentials_ : *default_instance_->credentials_;
+}
+inline ::exec::shared::UserCredentials* UserToBitHandshake::mutable_credentials() {
+  set_has_credentials();
+  if (credentials_ == NULL) credentials_ = new ::exec::shared::UserCredentials;
+  return credentials_;
+}
+inline ::exec::shared::UserCredentials* UserToBitHandshake::release_credentials() {
+  clear_has_credentials();
+  ::exec::shared::UserCredentials* temp = credentials_;
+  credentials_ = NULL;
+  return temp;
+}
+inline void UserToBitHandshake::set_allocated_credentials(::exec::shared::UserCredentials* credentials) {
+  delete credentials_;
+  credentials_ = credentials;
+  if (credentials) {
+    set_has_credentials();
+  } else {
+    clear_has_credentials();
+  }
+}
+
+// optional .exec.user.UserProperties properties = 5;
+inline bool UserToBitHandshake::has_properties() const {
+  return (_has_bits_[0] & 0x00000010u) != 0;
+}
+inline void UserToBitHandshake::set_has_properties() {
+  _has_bits_[0] |= 0x00000010u;
+}
+inline void UserToBitHandshake::clear_has_properties() {
+  _has_bits_[0] &= ~0x00000010u;
+}
+inline void UserToBitHandshake::clear_properties() {
+  if (properties_ != NULL) properties_->::exec::user::UserProperties::Clear();
+  clear_has_properties();
+}
+inline const ::exec::user::UserProperties& UserToBitHandshake::properties() const {
+  return properties_ != NULL ? *properties_ : *default_instance_->properties_;
+}
+inline ::exec::user::UserProperties* UserToBitHandshake::mutable_properties() {
+  set_has_properties();
+  if (properties_ == NULL) properties_ = new ::exec::user::UserProperties;
+  return properties_;
+}
+inline ::exec::user::UserProperties* UserToBitHandshake::release_properties() {
+  clear_has_properties();
+  ::exec::user::UserProperties* temp = properties_;
+  properties_ = NULL;
+  return temp;
+}
+inline void UserToBitHandshake::set_allocated_properties(::exec::user::UserProperties* properties) {
+  delete properties_;
+  properties_ = properties;
+  if (properties) {
+    set_has_properties();
+  } else {
+    clear_has_properties();
+  }
+}
+
+// optional bool support_complex_types = 6 [default = false];
+inline bool UserToBitHandshake::has_support_complex_types() const {
+  return (_has_bits_[0] & 0x00000020u) != 0;
+}
+inline void UserToBitHandshake::set_has_support_complex_types() {
+  _has_bits_[0] |= 0x00000020u;
+}
+inline void UserToBitHandshake::clear_has_support_complex_types() {
+  _has_bits_[0] &= ~0x00000020u;
+}
+inline void UserToBitHandshake::clear_support_complex_types() {
+  support_complex_types_ = false;
+  clear_has_support_complex_types();
+}
+inline bool UserToBitHandshake::support_complex_types() const {
+  return support_complex_types_;
+}
+inline void UserToBitHandshake::set_support_complex_types(bool value) {
+  set_has_support_complex_types();
+  support_complex_types_ = value;
+}
+
+// optional bool support_timeout = 7 [default = false];
+inline bool UserToBitHandshake::has_support_timeout() const {
+  return (_has_bits_[0] & 0x00000040u) != 0;
+}
+inline void UserToBitHandshake::set_has_support_timeout() {
+  _has_bits_[0] |= 0x00000040u;
+}
+inline void UserToBitHandshake::clear_has_support_timeout() {
+  _has_bits_[0] &= ~0x00000040u;
+}
+inline void UserToBitHandshake::clear_support_timeout() {
+  support_timeout_ = false;
+  clear_has_support_timeout();
+}
+inline bool UserToBitHandshake::support_timeout() const {
+  return support_timeout_;
+}
+inline void UserToBitHandshake::set_support_timeout(bool value) {
+  set_has_support_timeout();
+  support_timeout_ = value;
+}
+
+// optional .exec.user.RpcEndpointInfos client_infos = 8;
+inline bool UserToBitHandshake::has_client_infos() const {
+  return (_has_bits_[0] & 0x00000080u) != 0;
+}
+inline void UserToBitHandshake::set_has_client_infos() {
+  _has_bits_[0] |= 0x00000080u;
+}
+inline void UserToBitHandshake::clear_has_client_infos() {
+  _has_bits_[0] &= ~0x00000080u;
+}
+inline void UserToBitHandshake::clear_client_infos() {
+  if (client_infos_ != NULL) client_infos_->::exec::user::RpcEndpointInfos::Clear();
+  clear_has_client_infos();
+}
+inline const ::exec::user::RpcEndpointInfos& UserToBitHandshake::client_infos() const {
+  return client_infos_ != NULL ? *client_infos_ : *default_instance_->client_infos_;
+}
+inline ::exec::user::RpcEndpointInfos* UserToBitHandshake::mutable_client_infos() {
+  set_has_client_infos();
+  if (client_infos_ == NULL) client_infos_ = new ::exec::user::RpcEndpointInfos;
+  return client_infos_;
+}
+inline ::exec::user::RpcEndpointInfos* UserToBitHandshake::release_client_infos() {
+  clear_has_client_infos();
+  ::exec::user::RpcEndpointInfos* temp = client_infos_;
+  client_infos_ = NULL;
+  return temp;
+}
+inline void UserToBitHandshake::set_allocated_client_infos(::exec::user::RpcEndpointInfos* client_infos) {
+  delete client_infos_;
+  client_infos_ = client_infos;
+  if (client_infos) {
+    set_has_client_infos();
+  } else {
+    clear_has_client_infos();
+  }
+}
+
+// optional .exec.user.SaslSupport sasl_support = 9;
+inline bool UserToBitHandshake::has_sasl_support() const {
+  return (_has_bits_[0] & 0x00000100u) != 0;
+}
+inline void UserToBitHandshake::set_has_sasl_support() {
+  _has_bits_[0] |= 0x00000100u;
+}
+inline void UserToBitHandshake::clear_has_sasl_support() {
+  _has_bits_[0] &= ~0x00000100u;
+}
+inline void UserToBitHandshake::clear_sasl_support() {
+  sasl_support_ = 0;
+  clear_has_sasl_support();
+}
+inline ::exec::user::SaslSupport UserToBitHandshake::sasl_support() const {
+  return static_cast< ::exec::user::SaslSupport >(sasl_support_);
+}
+inline void UserToBitHandshake::set_sasl_support(::exec::user::SaslSupport value) {
+  assert(::exec::user::SaslSupport_IsValid(value));
+  set_has_sasl_support();
+  sasl_support_ = value;
+}
+
+// -------------------------------------------------------------------
+
+// RequestResults
+
+// optional .exec.shared.QueryId query_id = 1;
+inline bool RequestResults::has_query_id() const {
+  return (_has_bits_[0] & 0x00000001u) != 0;
+}
+inline void RequestResults::set_has_query_id() {
+  _has_bits_[0] |= 0x00000001u;
+}
+inline void RequestResults::clear_has_query_id() {
+  _has_bits_[0] &= ~0x00000001u;
+}
+inline void RequestResults::clear_query_id() {
+  if (query_id_ != NULL) query_id_->::exec::shared::QueryId::Clear();
+  clear_has_query_id();
+}
+inline const ::exec::shared::QueryId& RequestResults::query_id() const {
+  return query_id_ != NULL ? *query_id_ : *default_instance_->query_id_;
+}
+inline ::exec::shared::QueryId* RequestResults::mutable_query_id() {
+  set_has_query_id();
+  if (query_id_ == NULL) query_id_ = new ::exec::shared::QueryId;
+  return query_id_;
+}
+inline ::exec::shared::QueryId* RequestResults::release_query_id() {
+  clear_has_query_id();
+  ::exec::shared::QueryId* temp = query_id_;
+  query_id_ = NULL;
+  return temp;
+}
+inline void RequestResults::set_allocated_query_id(::exec::shared::QueryId* query_id) {
+  delete query_id_;
+  query_id_ = query_id;
+  if (query_id) {
+    set_has_query_id();
+  } else {
+    clear_has_query_id();
+  }
+}
+
+// optional int32 maximum_responses = 2;
+inline bool RequestResults::has_maximum_responses() const {
+  return (_has_bits_[0] & 0x00000002u) != 0;
+}
+inline void RequestResults::set_has_maximum_responses() {
+  _has_bits_[0] |= 0x00000002u;
+}
+inline void RequestResults::clear_has_maximum_responses() {
+  _has_bits_[0] &= ~0x00000002u;
+}
+inline void RequestResults::clear_maximum_responses() {
+  maximum_responses_ = 0;
+  clear_has_maximum_responses();
+}
+inline ::google::protobuf::int32 RequestResults::maximu

<TRUNCATED>

[20/27] drill git commit: DRILL-5304: Queries fail intermittently when there is skew in data distribution

Posted by jn...@apache.org.
DRILL-5304: Queries fail intermittently when there is skew in data distribution

close #766


Project: http://git-wip-us.apache.org/repos/asf/drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/drill/commit/69de3a1e
Tree: http://git-wip-us.apache.org/repos/asf/drill/tree/69de3a1e
Diff: http://git-wip-us.apache.org/repos/asf/drill/diff/69de3a1e

Branch: refs/heads/master
Commit: 69de3a1e409bb1fb9a25e679ce1750d9f9daf238
Parents: 974c613
Author: Padma Penumarthy <pp...@yahoo.com>
Authored: Mon Feb 27 18:32:24 2017 -0800
Committer: Jinfeng Ni <jn...@apache.org>
Committed: Wed Mar 1 23:15:34 2017 -0800

----------------------------------------------------------------------
 .../SoftAffinityFragmentParallelizer.java       |  2 +-
 .../exec/store/schedule/AssignmentCreator.java  | 28 +++++++++++++-------
 2 files changed, 19 insertions(+), 11 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/drill/blob/69de3a1e/exec/java-exec/src/main/java/org/apache/drill/exec/planner/fragment/SoftAffinityFragmentParallelizer.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/fragment/SoftAffinityFragmentParallelizer.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/fragment/SoftAffinityFragmentParallelizer.java
index 1ebed86..644263e 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/fragment/SoftAffinityFragmentParallelizer.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/fragment/SoftAffinityFragmentParallelizer.java
@@ -117,7 +117,7 @@ public class SoftAffinityFragmentParallelizer implements FragmentParallelizer {
 
       // Find the maximum number of slots which should go to endpoints with affinity (See DRILL-825 for details)
       int affinedSlots =
-          Math.max(1, (int) (parameters.getAffinityFactor() * width / activeEndpoints.size())) * sortedAffinityList.size();
+          Math.max(1, (int) (Math.ceil((double)parameters.getAffinityFactor() * width / activeEndpoints.size()) * sortedAffinityList.size()));
 
       // Make sure affined slots is at least the number of mandatory nodes
       affinedSlots = Math.max(affinedSlots, numRequiredNodes);

http://git-wip-us.apache.org/repos/asf/drill/blob/69de3a1e/exec/java-exec/src/main/java/org/apache/drill/exec/store/schedule/AssignmentCreator.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/schedule/AssignmentCreator.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/schedule/AssignmentCreator.java
index aeaf4bf..198d1ac 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/schedule/AssignmentCreator.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/schedule/AssignmentCreator.java
@@ -106,13 +106,16 @@ public class AssignmentCreator<T extends CompleteWork> {
     LinkedList<WorkEndpointListPair<T>> unassignedWorkList;
     Map<DrillbitEndpoint,FragIteratorWrapper> endpointIterators = getEndpointIterators();
 
-    // Assign upto minCount per node based on locality.
-    unassignedWorkList = assign(workList, endpointIterators, true);
     // Assign upto maxCount per node based on locality.
-    unassignedWorkList = assign(unassignedWorkList, endpointIterators, false);
+    unassignedWorkList = assign(workList, endpointIterators, false);
+
     // Assign upto minCount per node in a round robin fashion.
     assignLeftovers(unassignedWorkList, endpointIterators, true);
-    // Assign upto maxCount per node in a round robin fashion.
+
+    // Assign upto maxCount + leftovers per node based on locality.
+    unassignedWorkList = assign(unassignedWorkList, endpointIterators,  true);
+
+    // Assign upto maxCount + leftovers per node in a round robin fashion.
     assignLeftovers(unassignedWorkList, endpointIterators, false);
 
     if (unassignedWorkList.size() != 0) {
@@ -127,10 +130,12 @@ public class AssignmentCreator<T extends CompleteWork> {
    *
    * @param workList the list of work units to assign
    * @param endpointIterators the endpointIterators to assign to
-   * @param assignMinimum whether to assign only up to the minimum required
+   * @param assignMaxLeftOvers whether to assign upto maximum including leftovers
    * @return a list of unassigned work units
    */
-  private LinkedList<WorkEndpointListPair<T>> assign(List<WorkEndpointListPair<T>> workList, Map<DrillbitEndpoint,FragIteratorWrapper> endpointIterators, boolean assignMinimum) {
+  private LinkedList<WorkEndpointListPair<T>> assign(List<WorkEndpointListPair<T>> workList,
+                                                     Map<DrillbitEndpoint,FragIteratorWrapper> endpointIterators,
+                                                     boolean assignMaxLeftOvers) {
     LinkedList<WorkEndpointListPair<T>> currentUnassignedList = Lists.newLinkedList();
     outer: for (WorkEndpointListPair<T> workPair : workList) {
       List<DrillbitEndpoint> endpoints = workPair.sortedEndpoints;
@@ -139,7 +144,7 @@ public class AssignmentCreator<T extends CompleteWork> {
         if (iteratorWrapper == null) {
           continue;
         }
-        if (iteratorWrapper.count < (assignMinimum ? iteratorWrapper.minCount : iteratorWrapper.maxCount)) {
+        if (iteratorWrapper.count < (assignMaxLeftOvers ? (iteratorWrapper.maxCount + iteratorWrapper.maxCountLeftOver) : iteratorWrapper.maxCount)) {
           Integer assignment = iteratorWrapper.iter.next();
           iteratorWrapper.count++;
           mappings.put(assignment, workPair.work);
@@ -157,9 +162,11 @@ public class AssignmentCreator<T extends CompleteWork> {
    * @param endpointIterators the endpointIterators to assign to
    * @param assignMinimum wheterh to assign the minimum amount
    */
-  private void assignLeftovers(LinkedList<WorkEndpointListPair<T>> unassignedWorkList, Map<DrillbitEndpoint,FragIteratorWrapper> endpointIterators, boolean assignMinimum) {
+  private void assignLeftovers(LinkedList<WorkEndpointListPair<T>> unassignedWorkList,
+                               Map<DrillbitEndpoint,FragIteratorWrapper> endpointIterators,
+                               boolean assignMinimum) {
     outer: for (FragIteratorWrapper iteratorWrapper : endpointIterators.values()) {
-      while (iteratorWrapper.count < (assignMinimum ? iteratorWrapper.minCount : iteratorWrapper.maxCount)) {
+      while (iteratorWrapper.count < (assignMinimum ? iteratorWrapper.minCount : (iteratorWrapper.maxCount + iteratorWrapper.maxCountLeftOver))) {
         WorkEndpointListPair<T> workPair = unassignedWorkList.poll();
         if (workPair == null) {
           break outer;
@@ -261,7 +268,7 @@ public class AssignmentCreator<T extends CompleteWork> {
     while (totalMaxCount < units.size()) {
       for (Entry<DrillbitEndpoint, FragIteratorWrapper> entry : map.entrySet()) {
         FragIteratorWrapper iteratorWrapper = entry.getValue();
-        iteratorWrapper.maxCount++;
+        iteratorWrapper.maxCountLeftOver++;
         totalMaxCount++;
         if (totalMaxCount == units.size()) {
           break;
@@ -279,6 +286,7 @@ public class AssignmentCreator<T extends CompleteWork> {
   private static class FragIteratorWrapper {
     int count = 0;
     int maxCount;
+    int maxCountLeftOver;
     int minCount;
     Iterator<Integer> iter;
   }


[25/27] drill git commit: DRILL-5034: Select timestamp from hive generated parquet always return in UTC

Posted by jn...@apache.org.
DRILL-5034: Select timestamp from hive generated parquet always return in UTC

- TIMESTAMP_IMPALA function is reverted to retaine local timezone
- TIMESTAMP_IMPALA_LOCALTIMEZONE is deleted
- Retain local timezone for the INT96 timestamp values in the parquet files while
  PARQUET_READER_INT96_AS_TIMESTAMP option is on

Minor changes according to the review

Fix for the test, which relies on particular timezone

close #656


Project: http://git-wip-us.apache.org/repos/asf/drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/drill/commit/4a0fd56c
Tree: http://git-wip-us.apache.org/repos/asf/drill/tree/4a0fd56c
Diff: http://git-wip-us.apache.org/repos/asf/drill/diff/4a0fd56c

Branch: refs/heads/master
Commit: 4a0fd56c106550eee26ca68eaed6108f0dbad798
Parents: 8cded5a
Author: Vitalii Diravka <vi...@gmail.com>
Authored: Mon Nov 14 21:13:28 2016 +0000
Committer: Jinfeng Ni <jn...@apache.org>
Committed: Thu Mar 2 10:37:23 2017 -0800

----------------------------------------------------------------------
 ...onvertHiveParquetScanToDrillParquetScan.java |  2 +-
 .../impl/conv/ConvertFromImpalaTimestamp.java   | 28 +++----------------
 .../store/parquet/ParquetReaderUtility.java     | 25 +++++++++++++----
 .../NullableFixedByteAlignedReaders.java        | 19 +++++--------
 .../parquet2/DrillParquetGroupConverter.java    |  2 +-
 .../test/java/org/apache/drill/TestBuilder.java | 15 ++++++++--
 .../physical/impl/writer/TestParquetWriter.java | 29 +++++++++++---------
 7 files changed, 61 insertions(+), 59 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/drill/blob/4a0fd56c/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/planner/sql/logical/ConvertHiveParquetScanToDrillParquetScan.java
----------------------------------------------------------------------
diff --git a/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/planner/sql/logical/ConvertHiveParquetScanToDrillParquetScan.java b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/planner/sql/logical/ConvertHiveParquetScanToDrillParquetScan.java
index 46c0ff0..bb59600 100644
--- a/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/planner/sql/logical/ConvertHiveParquetScanToDrillParquetScan.java
+++ b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/planner/sql/logical/ConvertHiveParquetScanToDrillParquetScan.java
@@ -68,7 +68,7 @@ public class ConvertHiveParquetScanToDrillParquetScan extends StoragePluginOptim
   public static final ConvertHiveParquetScanToDrillParquetScan INSTANCE = new ConvertHiveParquetScanToDrillParquetScan();
 
   private static final DrillSqlOperator INT96_TO_TIMESTAMP =
-      new DrillSqlOperator("convert_fromTIMESTAMP_IMPALA_LOCALTIMEZONE", 1, true, false);
+      new DrillSqlOperator("convert_fromTIMESTAMP_IMPALA", 1, true, false);
 
   private static final DrillSqlOperator RTRIM = new DrillSqlOperator("RTRIM", 1, true, false);
 

http://git-wip-us.apache.org/repos/asf/drill/blob/4a0fd56c/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/impl/conv/ConvertFromImpalaTimestamp.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/impl/conv/ConvertFromImpalaTimestamp.java b/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/impl/conv/ConvertFromImpalaTimestamp.java
index 38e0514..4d3d46b 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/impl/conv/ConvertFromImpalaTimestamp.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/impl/conv/ConvertFromImpalaTimestamp.java
@@ -28,29 +28,6 @@ import org.apache.drill.exec.expr.holders.VarBinaryHolder;
 public class ConvertFromImpalaTimestamp {
 
 
-  @FunctionTemplate(name = "convert_fromTIMESTAMP_IMPALA_LOCALTIMEZONE", scope = FunctionTemplate.FunctionScope.SIMPLE, nulls = FunctionTemplate.NullHandling.NULL_IF_NULL)
-  public static class ImpalaTimestampConvertFromWithLocalTimezone implements DrillSimpleFunc {
-
-    @Param VarBinaryHolder in;
-    @Output TimeStampHolder out;
-
-
-    @Override
-    public void setup() { }
-
-    @Override
-    public void eval() {
-      org.apache.drill.exec.util.ByteBufUtil.checkBufferLength(in.buffer, in.start, in.end, 12);
-
-      in.buffer.readerIndex(in.start);
-      long nanosOfDay = in.buffer.readLong();
-      int julianDay = in.buffer.readInt();
-      long dateTime = (julianDay - org.apache.drill.exec.store.parquet.ParquetReaderUtility.JULIAN_DAY_NUMBER_FOR_UNIX_EPOCH) *
-          org.joda.time.DateTimeConstants.MILLIS_PER_DAY + (nanosOfDay / org.apache.drill.exec.store.parquet.ParquetReaderUtility.NanoTimeUtils.NANOS_PER_MILLISECOND);
-      out.value = new org.joda.time.DateTime(dateTime, org.joda.time.chrono.JulianChronology.getInstance()).withZoneRetainFields(org.joda.time.DateTimeZone.UTC).getMillis();
-    }
-  }
-
   @FunctionTemplate(name = "convert_fromTIMESTAMP_IMPALA", scope = FunctionTemplate.FunctionScope.SIMPLE, nulls = FunctionTemplate.NullHandling.NULL_IF_NULL)
   public static class ImpalaTimestampConvertFrom implements DrillSimpleFunc {
 
@@ -68,8 +45,11 @@ public class ConvertFromImpalaTimestamp {
       in.buffer.readerIndex(in.start);
       long nanosOfDay = in.buffer.readLong();
       int julianDay = in.buffer.readInt();
-      out.value = (julianDay - org.apache.drill.exec.store.parquet.ParquetReaderUtility.JULIAN_DAY_NUMBER_FOR_UNIX_EPOCH) *
+      long dateTime = (julianDay - org.apache.drill.exec.store.parquet.ParquetReaderUtility.JULIAN_DAY_NUMBER_FOR_UNIX_EPOCH) *
           org.joda.time.DateTimeConstants.MILLIS_PER_DAY + (nanosOfDay / org.apache.drill.exec.store.parquet.ParquetReaderUtility.NanoTimeUtils.NANOS_PER_MILLISECOND);
+      /* Note: This function uses local timezone for drill backward compatibility
+               and to avoid issues while reading hive parquet files */
+      out.value = org.joda.time.DateTimeZone.getDefault().convertUTCToLocal(dateTime);
     }
   }
 }

http://git-wip-us.apache.org/repos/asf/drill/blob/4a0fd56c/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/ParquetReaderUtility.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/ParquetReaderUtility.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/ParquetReaderUtility.java
index a94e220..4247d41 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/ParquetReaderUtility.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/ParquetReaderUtility.java
@@ -20,6 +20,7 @@ package org.apache.drill.exec.store.parquet;
 import org.apache.drill.common.exceptions.UserException;
 import org.apache.drill.common.expression.PathSegment;
 import org.apache.drill.common.expression.SchemaPath;
+import org.apache.drill.exec.expr.holders.NullableTimeStampHolder;
 import org.apache.drill.exec.planner.physical.PlannerSettings;
 import org.apache.drill.exec.server.options.OptionManager;
 import org.apache.drill.exec.store.AbstractRecordReader;
@@ -41,6 +42,7 @@ import org.joda.time.Chronology;
 import org.joda.time.DateTimeConstants;
 import org.apache.parquet.example.data.simple.NanoTime;
 import org.apache.parquet.io.api.Binary;
+import org.joda.time.DateTimeZone;
 
 import java.util.Arrays;
 import java.util.HashMap;
@@ -323,18 +325,29 @@ public class ParquetReaderUtility {
    * @param binaryTimeStampValue
    *          hive, impala timestamp values with nanoseconds precision
    *          are stored in parquet Binary as INT96 (12 constant bytes)
-   *
-   * @return  Unix Timestamp - the number of milliseconds since January 1, 1970, 00:00:00 GMT
-   *          represented by @param binaryTimeStampValue .
+   * @param retainLocalTimezone
+   *          parquet files don't keep local timeZone according to the
+   *          <a href="https://github.com/Parquet/parquet-format/blob/master/LogicalTypes.md#timestamp">Parquet spec</a>,
+   *          but some tools (hive, for example) retain local timezone for parquet files by default
+   *          Note: Impala doesn't retain local timezone by default
+   * @return  Timestamp in milliseconds - the number of milliseconds since January 1, 1970, 00:00:00 GMT
+   *          represented by @param binaryTimeStampValue.
+   *          The nanos precision is cut to millis. Therefore the length of single timestamp value is
+   *          {@value NullableTimeStampHolder#WIDTH} bytes instead of 12 bytes.
    */
-    public static long getDateTimeValueFromBinary(Binary binaryTimeStampValue) {
+    public static long getDateTimeValueFromBinary(Binary binaryTimeStampValue, boolean retainLocalTimezone) {
       // This method represents binaryTimeStampValue as ByteBuffer, where timestamp is stored as sum of
-      // julian day number (32-bit) and nanos of day (64-bit)
+      // julian day number (4 bytes) and nanos of day (8 bytes)
       NanoTime nt = NanoTime.fromBinary(binaryTimeStampValue);
       int julianDay = nt.getJulianDay();
       long nanosOfDay = nt.getTimeOfDayNanos();
-      return (julianDay - JULIAN_DAY_NUMBER_FOR_UNIX_EPOCH) * DateTimeConstants.MILLIS_PER_DAY
+      long dateTime = (julianDay - JULIAN_DAY_NUMBER_FOR_UNIX_EPOCH) * DateTimeConstants.MILLIS_PER_DAY
           + nanosOfDay / NANOS_PER_MILLISECOND;
+      if (retainLocalTimezone) {
+        return DateTimeZone.getDefault().convertUTCToLocal(dateTime);
+      } else {
+        return dateTime;
+      }
     }
   }
 }

http://git-wip-us.apache.org/repos/asf/drill/blob/4a0fd56c/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/columnreaders/NullableFixedByteAlignedReaders.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/columnreaders/NullableFixedByteAlignedReaders.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/columnreaders/NullableFixedByteAlignedReaders.java
index b233a65..759b0f2 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/columnreaders/NullableFixedByteAlignedReaders.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/columnreaders/NullableFixedByteAlignedReaders.java
@@ -23,6 +23,7 @@ import java.nio.ByteBuffer;
 import org.apache.drill.common.exceptions.ExecutionSetupException;
 import org.apache.drill.exec.expr.holders.NullableDecimal28SparseHolder;
 import org.apache.drill.exec.expr.holders.NullableDecimal38SparseHolder;
+import org.apache.drill.exec.expr.holders.NullableTimeStampHolder;
 import org.apache.drill.exec.store.parquet.ParquetReaderUtility;
 import org.apache.drill.exec.util.DecimalUtility;
 import org.apache.drill.exec.vector.NullableBigIntVector;
@@ -110,14 +111,9 @@ public class NullableFixedByteAlignedReaders {
 
   /**
    * Class for reading parquet fixed binary type INT96, which is used for storing hive,
-   * impala timestamp values with nanoseconds precision (12 bytes). So it reads such values as a drill timestamp (8 bytes).
+   * impala timestamp values with nanoseconds precision (12 bytes). It reads such values as a drill timestamp (8 bytes).
    */
   static class NullableFixedBinaryAsTimeStampReader extends NullableFixedByteAlignedReader<NullableTimeStampVector> {
-    /**
-     * The width of each element of the TimeStampVector is 8 byte(s).
-     */
-    private static final int TIMESTAMP_LENGTH_IN_BITS = 64;
-
     NullableFixedBinaryAsTimeStampReader(ParquetRecordReader parentReader, int allocateSize, ColumnDescriptor descriptor,
                               ColumnChunkMetaData columnChunkMetaData, boolean fixedLength, NullableTimeStampVector v, SchemaElement schemaElement) throws ExecutionSetupException {
       super(parentReader, allocateSize, descriptor, columnChunkMetaData, fixedLength, v, schemaElement);
@@ -127,19 +123,18 @@ public class NullableFixedByteAlignedReaders {
     protected void readField(long recordsToReadInThisPass) {
       this.bytebuf = pageReader.pageData;
       if (usingDictionary) {
-        for (int i = 0; i < recordsToReadInThisPass; i++){
+        for (int i = 0; i < recordsToReadInThisPass; i++) {
           Binary binaryTimeStampValue = pageReader.dictionaryValueReader.readBytes();
-          valueVec.getMutator().setSafe(valuesReadInCurrentPass + i, getDateTimeValueFromBinary(binaryTimeStampValue));
+          valueVec.getMutator().setSafe(valuesReadInCurrentPass + i, getDateTimeValueFromBinary(binaryTimeStampValue, true));
         }
       } else {
         for (int i = 0; i < recordsToReadInThisPass; i++) {
           Binary binaryTimeStampValue = pageReader.valueReader.readBytes();
-          valueVec.getMutator().setSafe(valuesReadInCurrentPass + i, getDateTimeValueFromBinary(binaryTimeStampValue));
+          valueVec.getMutator().setSafe(valuesReadInCurrentPass + i, getDateTimeValueFromBinary(binaryTimeStampValue, true));
         }
       }
-      // The nanos precision is cut to millis. Therefore the length of single timestamp value is 8 bytes(s)
-      // instead of 12 byte(s).
-      dataTypeLengthInBits = TIMESTAMP_LENGTH_IN_BITS;
+      // The width of each element of the TimeStampVector is 8 bytes (64 bits) instead of 12 bytes.
+      dataTypeLengthInBits = NullableTimeStampHolder.WIDTH * 8;
     }
   }
 

http://git-wip-us.apache.org/repos/asf/drill/blob/4a0fd56c/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet2/DrillParquetGroupConverter.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet2/DrillParquetGroupConverter.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet2/DrillParquetGroupConverter.java
index 2f2db05..79dc740 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet2/DrillParquetGroupConverter.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet2/DrillParquetGroupConverter.java
@@ -646,7 +646,7 @@ public class DrillParquetGroupConverter extends GroupConverter {
 
     @Override
     public void addBinary(Binary value) {
-      holder.value = getDateTimeValueFromBinary(value);
+      holder.value = getDateTimeValueFromBinary(value, true);
       writer.write(holder);
     }
   }

http://git-wip-us.apache.org/repos/asf/drill/blob/4a0fd56c/exec/java-exec/src/test/java/org/apache/drill/TestBuilder.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/TestBuilder.java b/exec/java-exec/src/test/java/org/apache/drill/TestBuilder.java
index 619959b..36a713f 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/TestBuilder.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/TestBuilder.java
@@ -14,12 +14,13 @@
  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  * See the License for the specific language governing permissions and
  * limitations under the License.
- ******************************************************************************/
+ */
 package org.apache.drill;
 
 import static org.junit.Assert.assertEquals;
 
 import java.io.IOException;
+import java.sql.Timestamp;
 import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.List;
@@ -35,7 +36,6 @@ import org.apache.drill.common.expression.parser.ExprLexer;
 import org.apache.drill.common.expression.parser.ExprParser;
 import org.apache.drill.common.types.TypeProtos;
 import org.apache.drill.common.types.Types;
-import org.apache.drill.exec.memory.BufferAllocator;
 import org.apache.drill.exec.proto.UserBitShared;
 import org.apache.drill.exec.proto.UserBitShared.QueryType;
 import org.apache.drill.exec.proto.UserProtos.PreparedStatementHandle;
@@ -45,6 +45,7 @@ import org.apache.drill.exec.util.Text;
 
 import com.google.common.base.Joiner;
 import com.google.common.base.Preconditions;
+import org.joda.time.DateTimeZone;
 
 public class TestBuilder {
 
@@ -665,4 +666,14 @@ public class TestBuilder {
     }
     return map;
   }
+
+  /**
+   * Helper method for the timestamp values that depend on the local timezone
+   * @param value expected timestamp value in UTC
+   * @return timestamp value for the local timezone
+   */
+  public static Timestamp convertToLocalTimestamp(String value) {
+    long UTCTimestamp = Timestamp.valueOf(value).getTime();
+    return new Timestamp(DateTimeZone.getDefault().convertUTCToLocal(UTCTimestamp));
+  }
 }

http://git-wip-us.apache.org/repos/asf/drill/blob/4a0fd56c/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/writer/TestParquetWriter.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/writer/TestParquetWriter.java b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/writer/TestParquetWriter.java
index 362c943..65e9c38 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/writer/TestParquetWriter.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/writer/TestParquetWriter.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -18,6 +18,7 @@
 package org.apache.drill.exec.physical.impl.writer;
 
 import static org.apache.drill.exec.store.parquet.ParquetRecordWriter.DRILL_VERSION_PROPERTY;
+import static org.apache.drill.TestBuilder.convertToLocalTimestamp;
 import static org.apache.parquet.format.converter.ParquetMetadataConverter.SKIP_ROW_GROUPS;
 import static org.junit.Assert.assertEquals;
 
@@ -25,7 +26,6 @@ import java.io.File;
 import java.io.FileWriter;
 import java.math.BigDecimal;
 import java.sql.Date;
-import java.sql.Timestamp;
 import java.util.Arrays;
 import java.util.ArrayList;
 import java.util.Collection;
@@ -45,8 +45,6 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.log4j.Level;
-import org.apache.parquet.Log;
 import org.apache.parquet.hadoop.ParquetFileReader;
 import org.apache.parquet.hadoop.metadata.ParquetMetadata;
 import org.joda.time.DateTime;
@@ -57,7 +55,6 @@ import org.junit.Ignore;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.TemporaryFolder;
-import org.junit.rules.Timeout;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
 
@@ -759,7 +756,7 @@ public class TestParquetWriter extends BaseTestQuery {
       compareParquetReadersColumnar("field_impala_ts", "cp.`parquet/int96_impala_1.parquet`");
     } finally {
       test("alter session reset %s", ExecConstants.PARQUET_READER_INT96_AS_TIMESTAMP);
-  }
+    }
   }
 
   /*
@@ -774,6 +771,7 @@ public class TestParquetWriter extends BaseTestQuery {
   Test the reading of a binary field as drill timestamp where data is in dicationary _and_ non-dictionary encoded pages
    */
   @Test
+  @Ignore("relies on particular time zone, works for UTC")
   public void testImpalaParquetBinaryAsTimeStamp_DictChange() throws Exception {
     final String WORKING_PATH = TestTools.getWorkingPath();
     final String TEST_RES_PATH = WORKING_PATH + "/src/test/resources";
@@ -837,6 +835,7 @@ public class TestParquetWriter extends BaseTestQuery {
   Test the conversion from int96 to impala timestamp with hive data including nulls. Validate against expected values
   */
   @Test
+  @Ignore("relies on particular time zone")
   public void testHiveParquetTimestampAsInt96_basic() throws Exception {
     final String q = "SELECT cast(convert_from(timestamp_field, 'TIMESTAMP_IMPALA') as varchar(19))  as timestamp_field "
             + "from cp.`parquet/part1/hive_all_types.parquet` ";
@@ -845,7 +844,7 @@ public class TestParquetWriter extends BaseTestQuery {
             .unOrdered()
             .sqlQuery(q)
             .baselineColumns("timestamp_field")
-            .baselineValues("2013-07-06 00:01:00")
+            .baselineValues("2013-07-05 17:01:00")
             .baselineValues((Object)null)
             .go();
   }
@@ -917,10 +916,11 @@ public class TestParquetWriter extends BaseTestQuery {
     try {
       testBuilder()
           .ordered()
-          .sqlQuery("select `%s` from %s", selection, table)
+          .sqlQuery("select `%1$s` from %2$s order by `%1$s`", selection, table)
           .optionSettingQueriesForTestQuery(
               "alter session set `%s` = true", ExecConstants.PARQUET_READER_INT96_AS_TIMESTAMP)
-          .sqlBaselineQuery("select convert_from(`%1$s`, 'TIMESTAMP_IMPALA') as `%1$s` from %2$s", selection, table)
+          .sqlBaselineQuery("select convert_from(`%1$s`, 'TIMESTAMP_IMPALA') as `%1$s` from %2$s order by `%1$s`",
+              selection, table)
           .optionSettingQueriesForBaseline(
               "alter session set `%s` = false", ExecConstants.PARQUET_READER_INT96_AS_TIMESTAMP)
           .build()
@@ -968,13 +968,16 @@ public class TestParquetWriter extends BaseTestQuery {
   public void testInt96TimeStampValueWidth() throws Exception {
     try {
       testBuilder()
-          .ordered()
-          .sqlQuery("select c, d from cp.`parquet/data.snappy.parquet` where d = '2015-07-18 13:52:51'")
+          .unOrdered()
+          .sqlQuery("select c, d from cp.`parquet/data.snappy.parquet` " +
+              "where `a` is not null and `c` is not null and `d` is not null")
           .optionSettingQueriesForTestQuery(
               "alter session set `%s` = true", ExecConstants.PARQUET_READER_INT96_AS_TIMESTAMP)
           .baselineColumns("c", "d")
-          .baselineValues(new DateTime(Date.valueOf("2011-04-11").getTime()),
-              new DateTime(Timestamp.valueOf("2015-07-18 13:52:51").getTime()))
+          .baselineValues(new DateTime(Date.valueOf("2012-12-15").getTime()),
+              new DateTime(convertToLocalTimestamp("2016-04-24 20:06:28")))
+          .baselineValues(new DateTime(Date.valueOf("2011-07-09").getTime()),
+              new DateTime(convertToLocalTimestamp("2015-04-15 22:35:49")))
           .build()
           .run();
     } finally {


[26/27] drill git commit: DRILL-5290: Provide an option to build operator table once for built-in static functions and reuse it across queries.

Posted by jn...@apache.org.
DRILL-5290: Provide an option to build operator table once for built-in static functions and reuse it across queries.

close #757


Project: http://git-wip-us.apache.org/repos/asf/drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/drill/commit/2b5a6f0b
Tree: http://git-wip-us.apache.org/repos/asf/drill/tree/2b5a6f0b
Diff: http://git-wip-us.apache.org/repos/asf/drill/diff/2b5a6f0b

Branch: refs/heads/master
Commit: 2b5a6f0b523859297f86298ef384cd146e425f72
Parents: 4a0fd56
Author: Padma Penumarthy <pp...@yahoo.com>
Authored: Wed Feb 22 10:31:01 2017 -0800
Committer: Jinfeng Ni <jn...@apache.org>
Committed: Thu Mar 2 10:46:48 2017 -0800

----------------------------------------------------------------------
 .../org/apache/drill/exec/ExecConstants.java    |  4 ++++
 .../org/apache/drill/exec/ops/QueryContext.java |  9 +++++++-
 .../drill/exec/server/DrillbitContext.java      | 23 ++++++++++++++++++++
 .../server/options/SystemOptionManager.java     |  3 ++-
 4 files changed, 37 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/drill/blob/2b5a6f0b/exec/java-exec/src/main/java/org/apache/drill/exec/ExecConstants.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/ExecConstants.java b/exec/java-exec/src/main/java/org/apache/drill/exec/ExecConstants.java
index 60d6265..4f0f4d9 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/ExecConstants.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/ExecConstants.java
@@ -437,4 +437,8 @@ public interface ExecConstants {
   String QUERY_PROFILE_DEBUG_OPTION = "exec.query_profile.debug_mode";
   BooleanValidator QUERY_PROFILE_DEBUG_VALIDATOR = new BooleanValidator(
       QUERY_PROFILE_DEBUG_OPTION, false, false);
+
+  String USE_DYNAMIC_UDFS_KEY = "exec.udf.use_dynamic";
+  BooleanValidator USE_DYNAMIC_UDFS = new BooleanValidator(USE_DYNAMIC_UDFS_KEY, true);
+
 }

http://git-wip-us.apache.org/repos/asf/drill/blob/2b5a6f0b/exec/java-exec/src/main/java/org/apache/drill/exec/ops/QueryContext.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/ops/QueryContext.java b/exec/java-exec/src/main/java/org/apache/drill/exec/ops/QueryContext.java
index 707815a..df3f4f4 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/ops/QueryContext.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/ops/QueryContext.java
@@ -91,7 +91,14 @@ public class QueryContext implements AutoCloseable, OptimizerRulesContext, Schem
     executionControls = new ExecutionControls(queryOptions, drillbitContext.getEndpoint());
     plannerSettings = new PlannerSettings(queryOptions, getFunctionRegistry());
     plannerSettings.setNumEndPoints(drillbitContext.getBits().size());
-    table = new DrillOperatorTable(getFunctionRegistry(), drillbitContext.getOptionManager());
+
+    // If we do not need to support dynamic UDFs for this query, just use static operator table
+    // built at the startup. Else, build new operator table from latest version of function registry.
+    if (queryOptions.getOption(ExecConstants.USE_DYNAMIC_UDFS)) {
+      this.table = new DrillOperatorTable(drillbitContext.getFunctionImplementationRegistry(), drillbitContext.getOptionManager());
+    } else {
+      this.table = drillbitContext.getOperatorTable();
+    }
 
     queryContextInfo = Utilities.createQueryContextInfo(session.getDefaultSchemaPath(), session.getSessionId());
     contextInformation = new ContextInformation(session.getCredentials(), queryContextInfo);

http://git-wip-us.apache.org/repos/asf/drill/blob/2b5a6f0b/exec/java-exec/src/main/java/org/apache/drill/exec/server/DrillbitContext.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/server/DrillbitContext.java b/exec/java-exec/src/main/java/org/apache/drill/exec/server/DrillbitContext.java
index 23dc30c..6c68ab2 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/server/DrillbitContext.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/server/DrillbitContext.java
@@ -33,6 +33,7 @@ import org.apache.drill.exec.expr.fn.registry.RemoteFunctionRegistry;
 import org.apache.drill.exec.memory.BufferAllocator;
 import org.apache.drill.exec.physical.impl.OperatorCreatorRegistry;
 import org.apache.drill.exec.planner.PhysicalPlanReader;
+import org.apache.drill.exec.planner.sql.DrillOperatorTable;
 import org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint;
 import org.apache.drill.exec.rpc.control.Controller;
 import org.apache.drill.exec.rpc.control.WorkEventBus;
@@ -62,6 +63,8 @@ public class DrillbitContext implements AutoCloseable {
   private final CodeCompiler compiler;
   private final ScanResult classpathScan;
   private final LogicalPlanPersistence lpPersistence;
+  // operator table for standard SQL operators and functions, Drill built-in UDFs
+  private final DrillOperatorTable table;
 
 
   public DrillbitContext(
@@ -91,6 +94,9 @@ public class DrillbitContext implements AutoCloseable {
     this.systemOptions = new SystemOptionManager(lpPersistence, provider);
     this.functionRegistry = new FunctionImplementationRegistry(context.getConfig(), classpathScan, systemOptions);
     this.compiler = new CodeCompiler(context.getConfig(), systemOptions);
+
+    // This operator table is built once and used for all queries which do not need dynamic UDF support.
+    this.table = new DrillOperatorTable(functionRegistry, systemOptions);
   }
 
   public FunctionImplementationRegistry getFunctionImplementationRegistry() {
@@ -189,6 +195,23 @@ public class DrillbitContext implements AutoCloseable {
 
   public RemoteFunctionRegistry getRemoteFunctionRegistry() { return functionRegistry.getRemoteFunctionRegistry(); }
 
+  /**
+   * Use the operator table built during startup when "exec.udf.use_dynamic" option
+   * is set to false.
+   * This operator table has standard SQL functions, operators and drill
+   * built-in user defined functions (UDFs).
+   * It does not include dynamic user defined functions (UDFs) that get added/removed
+   * at run time.
+   * This operator table is meant to be used for high throughput,
+   * low latency operational queries, for which cost of building operator table is
+   * high, both in terms of CPU and heap memory usage.
+   *
+   * @return - Operator table
+   */
+  public DrillOperatorTable getOperatorTable() {
+    return table;
+  }
+
   @Override
   public void close() throws Exception {
     getOptionManager().close();

http://git-wip-us.apache.org/repos/asf/drill/blob/2b5a6f0b/exec/java-exec/src/main/java/org/apache/drill/exec/server/options/SystemOptionManager.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/server/options/SystemOptionManager.java b/exec/java-exec/src/main/java/org/apache/drill/exec/server/options/SystemOptionManager.java
index 425131c..4a846c0 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/server/options/SystemOptionManager.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/server/options/SystemOptionManager.java
@@ -165,7 +165,8 @@ public class SystemOptionManager extends BaseOptionManager implements AutoClosea
       ExecConstants.DYNAMIC_UDF_SUPPORT_ENABLED_VALIDATOR,
       ExecConstants.EXTERNAL_SORT_DISABLE_MANAGED_OPTION,
       ExecConstants.ENABLE_QUERY_PROFILE_VALIDATOR,
-      ExecConstants.QUERY_PROFILE_DEBUG_VALIDATOR
+      ExecConstants.QUERY_PROFILE_DEBUG_VALIDATOR,
+      ExecConstants.USE_DYNAMIC_UDFS
     };
     final Map<String, OptionValidator> tmp = new HashMap<>();
     for (final OptionValidator validator : validators) {


[24/27] drill git commit: DRILL-5266: Parquet returns low-density batches

Posted by jn...@apache.org.
DRILL-5266: Parquet returns low-density batches

Fixes one glaring problem related to bit/byte confusion.

Includes a few clean-up items found along the way.

Additional fixes from code review comments

More code clean up from code review

close #749


Project: http://git-wip-us.apache.org/repos/asf/drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/drill/commit/8cded5ae
Tree: http://git-wip-us.apache.org/repos/asf/drill/tree/8cded5ae
Diff: http://git-wip-us.apache.org/repos/asf/drill/diff/8cded5ae

Branch: refs/heads/master
Commit: 8cded5ae124db86f42e1274762305ca04b373a51
Parents: 33fc25c
Author: Paul Rogers <pr...@maprtech.com>
Authored: Wed Feb 15 20:51:17 2017 -0800
Committer: Jinfeng Ni <jn...@apache.org>
Committed: Wed Mar 1 23:46:25 2017 -0800

----------------------------------------------------------------------
 .../parquet/columnreaders/ColumnReader.java     | 42 ++++++---------
 .../columnreaders/FixedWidthRepeatedReader.java |  1 +
 .../NullableVarLengthValuesColumn.java          |  6 +--
 .../columnreaders/ParquetRecordReader.java      | 40 ++++++--------
 .../columnreaders/VarLenBinaryReader.java       | 56 +++++++++++---------
 5 files changed, 66 insertions(+), 79 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/drill/blob/8cded5ae/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/columnreaders/ColumnReader.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/columnreaders/ColumnReader.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/columnreaders/ColumnReader.java
index c45642b..5eaf286 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/columnreaders/ColumnReader.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/columnreaders/ColumnReader.java
@@ -101,8 +101,10 @@ public abstract class ColumnReader<V extends ValueVector> {
     }
     if (columnDescriptor.getType() != PrimitiveType.PrimitiveTypeName.BINARY) {
       if (columnDescriptor.getType() == PrimitiveTypeName.FIXED_LEN_BYTE_ARRAY) {
+        // Here "bits" means "bytes"
         dataTypeLengthInBits = columnDescriptor.getTypeLength() * 8;
       } else {
+        // While here, "bits" means "bits"
         dataTypeLengthInBits = ParquetRecordReader.getTypeLengthInBits(columnDescriptor.getType());
       }
     }
@@ -124,7 +126,7 @@ public abstract class ColumnReader<V extends ValueVector> {
     reset();
     if(recordsToReadInThisPass>0) {
       do {
-        determineSize(recordsToReadInThisPass, 0);
+        determineSize(recordsToReadInThisPass);
 
       } while (valuesReadInCurrentPass < recordsToReadInThisPass && pageReader.hasPage());
     }
@@ -153,9 +155,7 @@ public abstract class ColumnReader<V extends ValueVector> {
           .pushContext("File: ", this.parentReader.getHadoopPath().toString() )
           .build(logger);
       throw ex;
-
     }
-
   }
 
   protected abstract void readField(long recordsToRead);
@@ -170,27 +170,17 @@ public abstract class ColumnReader<V extends ValueVector> {
    * @return - true if we should stop reading
    * @throws IOException
    */
-  public boolean determineSize(long recordsReadInCurrentPass, Integer lengthVarFieldsInCurrentRecord) throws IOException {
-
-    boolean doneReading = readPage();
-    if (doneReading) {
-      return true;
-    }
+  public boolean determineSize(long recordsReadInCurrentPass) throws IOException {
 
-    doneReading = processPageData((int) recordsReadInCurrentPass);
-    if (doneReading) {
+    if (readPage()) {
       return true;
     }
 
-    // Never used in this code path. Hard to remove because the method is overidden by subclasses
-    lengthVarFieldsInCurrentRecord = -1;
-
-    doneReading = checkVectorCapacityReached();
-    if (doneReading) {
+    if (processPageData((int) recordsReadInCurrentPass)) {
       return true;
     }
 
-    return false;
+    return checkVectorCapacityReached();
   }
 
   protected Future<Integer> readRecordsAsync(int recordsToRead){
@@ -264,17 +254,20 @@ public abstract class ColumnReader<V extends ValueVector> {
   protected void hitRowGroupEnd() {}
 
   protected boolean checkVectorCapacityReached() {
+    // Here "bits" means "bytes"
+    // But, inside "capacity", "bits" sometimes means "bits".
+    // Note that bytesReadInCurrentPass is never updated, so this next
+    // line is a no-op.
     if (bytesReadInCurrentPass + dataTypeLengthInBits > capacity()) {
       logger.debug("Reached the capacity of the data vector in a variable length value vector.");
       return true;
     }
-    else if (valuesReadInCurrentPass > valueVec.getValueCapacity()) {
-      return true;
-    }
-    return false;
+    // No op: already checked this earlier and would not be here if this
+    // condition is true.
+    return valuesReadInCurrentPass > valueVec.getValueCapacity();
   }
 
-  // copied out of parquet library, didn't want to deal with the uneeded throws statement they had declared
+  // copied out of Parquet library, didn't want to deal with the uneeded throws statement they had declared
   public static int readIntLittleEndian(DrillBuf in, int offset) {
     int ch4 = in.getByte(offset) & 0xff;
     int ch3 = in.getByte(offset + 1) & 0xff;
@@ -285,7 +278,7 @@ public abstract class ColumnReader<V extends ValueVector> {
 
   private class ColumnReaderProcessPagesTask implements Callable<Long> {
 
-    private final ColumnReader parent = ColumnReader.this;
+    private final ColumnReader<V> parent = ColumnReader.this;
     private final long recordsToReadInThisPass;
 
     public ColumnReaderProcessPagesTask(long recordsToReadInThisPass){
@@ -305,12 +298,11 @@ public abstract class ColumnReader<V extends ValueVector> {
         Thread.currentThread().setName(oldname);
       }
     }
-
   }
 
   private class ColumnReaderReadRecordsTask implements Callable<Integer> {
 
-    private final ColumnReader parent = ColumnReader.this;
+    private final ColumnReader<V> parent = ColumnReader.this;
     private final int recordsToRead;
 
     public ColumnReaderReadRecordsTask(int recordsToRead){

http://git-wip-us.apache.org/repos/asf/drill/blob/8cded5ae/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/columnreaders/FixedWidthRepeatedReader.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/columnreaders/FixedWidthRepeatedReader.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/columnreaders/FixedWidthRepeatedReader.java
index f70c8d5..6db7110 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/columnreaders/FixedWidthRepeatedReader.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/columnreaders/FixedWidthRepeatedReader.java
@@ -140,6 +140,7 @@ public class FixedWidthRepeatedReader extends VarLengthColumn<RepeatedValueVecto
     }
   }
 
+  @SuppressWarnings("resource")
   @Override
   protected boolean readAndStoreValueSizeInformation() {
     int numLeftoverVals = 0;

http://git-wip-us.apache.org/repos/asf/drill/blob/8cded5ae/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/columnreaders/NullableVarLengthValuesColumn.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/columnreaders/NullableVarLengthValuesColumn.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/columnreaders/NullableVarLengthValuesColumn.java
index c96064b..3a7a54b 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/columnreaders/NullableVarLengthValuesColumn.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/columnreaders/NullableVarLengthValuesColumn.java
@@ -91,12 +91,8 @@ public abstract class NullableVarLengthValuesColumn<V extends ValueVector> exten
       dataTypeLengthInBits = pageReader.pageData.getInt((int) pageReader.readyToReadPosInBytes);
     }
     // I think this also needs to happen if it is null for the random access
-    boolean success = setSafe(valuesReadInCurrentPass + pageReader.valuesReadyToRead, pageReader.pageData,
+    return ! setSafe(valuesReadInCurrentPass + pageReader.valuesReadyToRead, pageReader.pageData,
         (int) pageReader.readyToReadPosInBytes + 4, dataTypeLengthInBits);
-    if ( ! success ) {
-      return true;
-    }
-    return false;
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/drill/blob/8cded5ae/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/columnreaders/ParquetRecordReader.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/columnreaders/ParquetRecordReader.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/columnreaders/ParquetRecordReader.java
index 79901ed..93c1214 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/columnreaders/ParquetRecordReader.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/columnreaders/ParquetRecordReader.java
@@ -26,8 +26,6 @@ import java.util.Map;
 import java.util.concurrent.Future;
 import java.util.concurrent.TimeUnit;
 
-import com.google.common.base.Stopwatch;
-import com.google.common.collect.ImmutableList;
 import org.apache.drill.common.exceptions.DrillRuntimeException;
 import org.apache.drill.common.exceptions.ExecutionSetupException;
 import org.apache.drill.common.expression.SchemaPath;
@@ -53,16 +51,15 @@ import org.apache.drill.exec.vector.complex.RepeatedValueVector;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.parquet.column.ColumnDescriptor;
-import org.apache.parquet.format.FileMetaData;
 import org.apache.parquet.format.SchemaElement;
-import org.apache.parquet.format.converter.ParquetMetadataConverter;
 import org.apache.parquet.hadoop.CodecFactory;
-import org.apache.parquet.hadoop.ParquetFileWriter;
 import org.apache.parquet.hadoop.metadata.BlockMetaData;
 import org.apache.parquet.hadoop.metadata.ColumnChunkMetaData;
 import org.apache.parquet.hadoop.metadata.ParquetMetadata;
 import org.apache.parquet.schema.PrimitiveType;
 
+import com.google.common.base.Stopwatch;
+import com.google.common.collect.ImmutableList;
 import com.google.common.collect.Lists;
 
 public class ParquetRecordReader extends AbstractRecordReader {
@@ -72,7 +69,8 @@ public class ParquetRecordReader extends AbstractRecordReader {
   private static final int NUMBER_OF_VECTORS = 1;
   private static final long DEFAULT_BATCH_LENGTH = 256 * 1024 * NUMBER_OF_VECTORS; // 256kb
   private static final long DEFAULT_BATCH_LENGTH_IN_BITS = DEFAULT_BATCH_LENGTH * 8; // 256kb
-  private static final char DEFAULT_RECORDS_TO_READ_IF_NOT_FIXED_WIDTH = 32*1024;
+  private static final char DEFAULT_RECORDS_TO_READ_IF_VARIABLE_WIDTH = 32*1024; // 32K
+  private static final int DEFAULT_RECORDS_TO_READ_IF_FIXED_WIDTH = 64*1024 - 1; // 64K - 1, max SV2 can address
   private static final int NUM_RECORDS_TO_READ_NOT_SPECIFIED = -1;
 
   // When no column is required by the downstrea operator, ask SCAN to return a DEFAULT column. If such column does not exist,
@@ -91,12 +89,10 @@ public class ParquetRecordReader extends AbstractRecordReader {
   private boolean allFieldsFixedLength;
   private int recordsPerBatch;
   private OperatorContext operatorContext;
-//  private long totalRecords;
-//  private long rowGroupOffset;
 
   private List<ColumnReader<?>> columnStatuses;
   private FileSystem fileSystem;
-  private long batchSize;
+  private final long batchSize;
   private long numRecordsToRead; // number of records to read
 
   Path hadoopPath;
@@ -128,6 +124,7 @@ public class ParquetRecordReader extends AbstractRecordReader {
   public boolean enforceTotalSize;
   public long readQueueSize;
 
+  @SuppressWarnings("unused")
   private String name;
 
 
@@ -333,6 +330,7 @@ public class ParquetRecordReader extends AbstractRecordReader {
     }
   }
 
+  @SuppressWarnings({ "resource", "unchecked" })
   @Override
   public void setup(OperatorContext operatorContext, OutputMutator output) throws ExecutionSetupException {
     this.operatorContext = operatorContext;
@@ -341,7 +339,6 @@ public class ParquetRecordReader extends AbstractRecordReader {
       nullFilledVectors = new ArrayList<>();
     }
     columnStatuses = new ArrayList<>();
-//    totalRecords = footer.getBlocks().get(rowGroupIndex).getRowCount();
     List<ColumnDescriptor> columns = footer.getFileMetaData().getSchema().getColumns();
     allFieldsFixedLength = true;
     ColumnDescriptor column;
@@ -350,8 +347,6 @@ public class ParquetRecordReader extends AbstractRecordReader {
     mockRecordsRead = 0;
 
     MaterializedField field;
-//    ParquetMetadataConverter metaConverter = new ParquetMetadataConverter();
-    FileMetaData fileMetaData;
 
     logger.debug("Reading row group({}) with {} records in file {}.", rowGroupIndex, footer.getBlocks().get(rowGroupIndex).getRowCount(),
         hadoopPath.toUri().getPath());
@@ -374,19 +369,18 @@ public class ParquetRecordReader extends AbstractRecordReader {
       columnsToScan++;
       int dataTypeLength = getDataTypeLength(column, se);
       if (dataTypeLength == -1) {
-          allFieldsFixedLength = false;
-        } else {
+        allFieldsFixedLength = false;
+      } else {
         bitWidthAllFixedFields += dataTypeLength;
-        }
       }
-//    rowGroupOffset = footer.getBlocks().get(rowGroupIndex).getColumns().get(0).getFirstDataPageOffset();
+    }
 
     if (columnsToScan != 0  && allFieldsFixedLength) {
       recordsPerBatch = (int) Math.min(Math.min(batchSize / bitWidthAllFixedFields,
-          footer.getBlocks().get(0).getColumns().get(0).getValueCount()), 65535);
+          footer.getBlocks().get(0).getColumns().get(0).getValueCount()), DEFAULT_RECORDS_TO_READ_IF_FIXED_WIDTH);
     }
     else {
-      recordsPerBatch = DEFAULT_RECORDS_TO_READ_IF_NOT_FIXED_WIDTH;
+      recordsPerBatch = DEFAULT_RECORDS_TO_READ_IF_VARIABLE_WIDTH;
     }
 
     try {
@@ -526,7 +520,7 @@ public class ParquetRecordReader extends AbstractRecordReader {
       futures.add(f);
     }
     Exception exception = null;
-    for(Future f: futures){
+    for(Future<Long> f: futures){
       if(exception != null) {
         f.cancel(true);
       } else {
@@ -567,7 +561,7 @@ public class ParquetRecordReader extends AbstractRecordReader {
           parquetReaderStats.timeProcess.addAndGet(timer.elapsed(TimeUnit.NANOSECONDS));
           return 0;
         }
-        recordsToRead = Math.min(DEFAULT_RECORDS_TO_READ_IF_NOT_FIXED_WIDTH, footer.getBlocks().get(rowGroupIndex).getRowCount() - mockRecordsRead);
+        recordsToRead = Math.min(DEFAULT_RECORDS_TO_READ_IF_VARIABLE_WIDTH, footer.getBlocks().get(rowGroupIndex).getRowCount() - mockRecordsRead);
 
         // Pick the minimum of recordsToRead calculated above and numRecordsToRead (based on rowCount and limit).
         recordsToRead = Math.min(recordsToRead, numRecordsToRead);
@@ -585,7 +579,7 @@ public class ParquetRecordReader extends AbstractRecordReader {
       if (allFieldsFixedLength) {
         recordsToRead = Math.min(recordsPerBatch, firstColumnStatus.columnChunkMetaData.getValueCount() - firstColumnStatus.totalValuesRead);
       } else {
-        recordsToRead = DEFAULT_RECORDS_TO_READ_IF_NOT_FIXED_WIDTH;
+        recordsToRead = DEFAULT_RECORDS_TO_READ_IF_VARIABLE_WIDTH;
 
       }
 
@@ -595,7 +589,7 @@ public class ParquetRecordReader extends AbstractRecordReader {
       if (allFieldsFixedLength) {
         readAllFixedFields(recordsToRead);
       } else { // variable length columns
-        long fixedRecordsToRead = varLengthReader.readFields(recordsToRead, firstColumnStatus);
+        long fixedRecordsToRead = varLengthReader.readFields(recordsToRead);
         readAllFixedFields(fixedRecordsToRead);
       }
 
@@ -644,7 +638,7 @@ public class ParquetRecordReader extends AbstractRecordReader {
     codecFactory.release();
 
     if (varLengthReader != null) {
-      for (final VarLengthColumn r : varLengthReader.columns) {
+      for (final VarLengthColumn<?> r : varLengthReader.columns) {
         r.clear();
       }
       varLengthReader.columns.clear();

http://git-wip-us.apache.org/repos/asf/drill/blob/8cded5ae/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/columnreaders/VarLenBinaryReader.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/columnreaders/VarLenBinaryReader.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/columnreaders/VarLenBinaryReader.java
index 9bfc3aa..b598ac8 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/columnreaders/VarLenBinaryReader.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/columnreaders/VarLenBinaryReader.java
@@ -33,24 +33,34 @@ public class VarLenBinaryReader {
   ParquetRecordReader parentReader;
   final List<VarLengthColumn<? extends ValueVector>> columns;
   final boolean useAsyncTasks;
+  private final long targetRecordCount;
 
   public VarLenBinaryReader(ParquetRecordReader parentReader, List<VarLengthColumn<? extends ValueVector>> columns) {
     this.parentReader = parentReader;
     this.columns = columns;
     useAsyncTasks = parentReader.useAsyncColReader;
+
+    // Can't read any more records than fixed width fields will fit.
+    // Note: this calculation is very likely wrong; it is a simplified
+    // version of earlier code, but probably needs even more attention.
+
+    int totalFixedFieldWidth = parentReader.getBitWidthAllFixedFields() / 8;
+    if (totalFixedFieldWidth == 0) {
+      targetRecordCount = 0;
+    } else {
+      targetRecordCount = parentReader.getBatchSize() / totalFixedFieldWidth;
+    }
   }
 
   /**
    * Reads as many variable length values as possible.
    *
    * @param recordsToReadInThisPass - the number of records recommended for reading form the reader
-   * @param firstColumnStatus - a reference to the first column status in the parquet file to grab metatdata from
+   * @param firstColumnStatus - a reference to the first column status in the Parquet file to grab metatdata from
    * @return - the number of fixed length fields that will fit in the batch
    * @throws IOException
    */
-  public long readFields(long recordsToReadInThisPass, ColumnReader<?> firstColumnStatus) throws IOException {
-
-    long recordsReadInCurrentPass = 0;
+  public long readFields(long recordsToReadInThisPass) throws IOException {
 
     // write the first 0 offset
     for (VarLengthColumn<?> columnReader : columns) {
@@ -58,10 +68,16 @@ public class VarLenBinaryReader {
     }
     Stopwatch timer = Stopwatch.createStarted();
 
-    recordsReadInCurrentPass = determineSizesSerial(recordsToReadInThisPass);
-    if(useAsyncTasks){
+    // Can't read any more records than fixed width fields will fit.
+
+    if (targetRecordCount > 0) {
+      recordsToReadInThisPass = Math.min(recordsToReadInThisPass, targetRecordCount);
+    }
+    long recordsReadInCurrentPass = determineSizesSerial(recordsToReadInThisPass);
+
+    if(useAsyncTasks) {
       readRecordsParallel(recordsReadInCurrentPass);
-    }else{
+    } else {
       readRecordsSerial(recordsReadInCurrentPass);
     }
 
@@ -70,33 +86,21 @@ public class VarLenBinaryReader {
     return recordsReadInCurrentPass;
   }
 
-
   private long determineSizesSerial(long recordsToReadInThisPass) throws IOException {
-    int lengthVarFieldsInCurrentRecord = 0;
-    boolean exitLengthDeterminingLoop = false;
-    long totalVariableLengthData = 0;
-    long recordsReadInCurrentPass = 0;
-    do {
+
+    int recordsReadInCurrentPass = 0;
+    top: do {
       for (VarLengthColumn<?> columnReader : columns) {
-        if (!exitLengthDeterminingLoop) {
-          exitLengthDeterminingLoop =
-              columnReader.determineSize(recordsReadInCurrentPass, lengthVarFieldsInCurrentRecord);
-        } else {
-          break;
+        // Return status is "done reading", meaning stop if true.
+        if (columnReader.determineSize(recordsReadInCurrentPass)) {
+          break top;
         }
       }
-      // check that the next record will fit in the batch
-      if (exitLengthDeterminingLoop ||
-          (recordsReadInCurrentPass + 1) * parentReader.getBitWidthAllFixedFields()
-              + totalVariableLengthData + lengthVarFieldsInCurrentRecord > parentReader.getBatchSize()) {
-        break;
-      }
       for (VarLengthColumn<?> columnReader : columns) {
         columnReader.updateReadyToReadPosition();
         columnReader.currDefLevel = -1;
       }
       recordsReadInCurrentPass++;
-      totalVariableLengthData += lengthVarFieldsInCurrentRecord;
     } while (recordsReadInCurrentPass < recordsToReadInThisPass);
 
     return recordsReadInCurrentPass;
@@ -118,7 +122,7 @@ public class VarLenBinaryReader {
       futures.add(f);
     }
     Exception exception = null;
-    for(Future f: futures){
+    for(Future<Integer> f: futures){
       if(exception != null) {
         f.cancel(true);
       } else {


[23/27] drill git commit: DRILL-5252: Fix a condition that always returns true

Posted by jn...@apache.org.
DRILL-5252: Fix a condition that always returns true

close #745


Project: http://git-wip-us.apache.org/repos/asf/drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/drill/commit/33fc25ca
Tree: http://git-wip-us.apache.org/repos/asf/drill/tree/33fc25ca
Diff: http://git-wip-us.apache.org/repos/asf/drill/diff/33fc25ca

Branch: refs/heads/master
Commit: 33fc25ca2a4cd93fa40025f167f78655777ab47b
Parents: dcbcb94
Author: jc@lifove.net <jc...@lifove.net>
Authored: Fri Feb 10 21:08:00 2017 -0500
Committer: Jinfeng Ni <jn...@apache.org>
Committed: Wed Mar 1 23:46:24 2017 -0800

----------------------------------------------------------------------
 .../src/main/java/org/apache/drill/exec/expr/EqualityVisitor.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/drill/blob/33fc25ca/exec/java-exec/src/main/java/org/apache/drill/exec/expr/EqualityVisitor.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/expr/EqualityVisitor.java b/exec/java-exec/src/main/java/org/apache/drill/exec/expr/EqualityVisitor.java
index 433e95f..5f79f32 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/expr/EqualityVisitor.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/expr/EqualityVisitor.java
@@ -290,7 +290,7 @@ class EqualityVisitor extends AbstractExprVisitor<Boolean,LogicalExpression,Runt
     if (!(value instanceof TypedNullConstant)) {
       return false;
     }
-    return e.getMajorType().equals(e.getMajorType());
+    return value.getMajorType().equals(e.getMajorType());
   }
 
   @Override


[07/27] drill git commit: DRILL-5301: Add C++ client support for Server metadata API

Posted by jn...@apache.org.
http://git-wip-us.apache.org/repos/asf/drill/blob/d3238b1b/contrib/native/client/src/protobuf/User.pb.cc
----------------------------------------------------------------------
diff --git a/contrib/native/client/src/protobuf/User.pb.cc b/contrib/native/client/src/protobuf/User.pb.cc
index aee70b8..be3f001 100644
--- a/contrib/native/client/src/protobuf/User.pb.cc
+++ b/contrib/native/client/src/protobuf/User.pb.cc
@@ -99,6 +99,18 @@ const ::google::protobuf::internal::GeneratedMessageReflection*
 const ::google::protobuf::Descriptor* CreatePreparedStatementResp_descriptor_ = NULL;
 const ::google::protobuf::internal::GeneratedMessageReflection*
   CreatePreparedStatementResp_reflection_ = NULL;
+const ::google::protobuf::Descriptor* GetServerMetaReq_descriptor_ = NULL;
+const ::google::protobuf::internal::GeneratedMessageReflection*
+  GetServerMetaReq_reflection_ = NULL;
+const ::google::protobuf::Descriptor* ConvertSupport_descriptor_ = NULL;
+const ::google::protobuf::internal::GeneratedMessageReflection*
+  ConvertSupport_reflection_ = NULL;
+const ::google::protobuf::Descriptor* GetServerMetaResp_descriptor_ = NULL;
+const ::google::protobuf::internal::GeneratedMessageReflection*
+  GetServerMetaResp_reflection_ = NULL;
+const ::google::protobuf::Descriptor* ServerMeta_descriptor_ = NULL;
+const ::google::protobuf::internal::GeneratedMessageReflection*
+  ServerMeta_reflection_ = NULL;
 const ::google::protobuf::Descriptor* RunQuery_descriptor_ = NULL;
 const ::google::protobuf::internal::GeneratedMessageReflection*
   RunQuery_reflection_ = NULL;
@@ -109,6 +121,16 @@ const ::google::protobuf::EnumDescriptor* HandshakeStatus_descriptor_ = NULL;
 const ::google::protobuf::EnumDescriptor* RequestStatus_descriptor_ = NULL;
 const ::google::protobuf::EnumDescriptor* ColumnSearchability_descriptor_ = NULL;
 const ::google::protobuf::EnumDescriptor* ColumnUpdatability_descriptor_ = NULL;
+const ::google::protobuf::EnumDescriptor* CollateSupport_descriptor_ = NULL;
+const ::google::protobuf::EnumDescriptor* CorrelationNamesSupport_descriptor_ = NULL;
+const ::google::protobuf::EnumDescriptor* DateTimeLiteralsSupport_descriptor_ = NULL;
+const ::google::protobuf::EnumDescriptor* GroupBySupport_descriptor_ = NULL;
+const ::google::protobuf::EnumDescriptor* IdentifierCasing_descriptor_ = NULL;
+const ::google::protobuf::EnumDescriptor* NullCollation_descriptor_ = NULL;
+const ::google::protobuf::EnumDescriptor* OrderBySupport_descriptor_ = NULL;
+const ::google::protobuf::EnumDescriptor* OuterJoinSupport_descriptor_ = NULL;
+const ::google::protobuf::EnumDescriptor* SubQuerySupport_descriptor_ = NULL;
+const ::google::protobuf::EnumDescriptor* UnionSupport_descriptor_ = NULL;
 
 }  // namespace
 
@@ -599,7 +621,117 @@ void protobuf_AssignDesc_User_2eproto() {
       ::google::protobuf::DescriptorPool::generated_pool(),
       ::google::protobuf::MessageFactory::generated_factory(),
       sizeof(CreatePreparedStatementResp));
-  RunQuery_descriptor_ = file->message_type(26);
+  GetServerMetaReq_descriptor_ = file->message_type(26);
+  static const int GetServerMetaReq_offsets_[1] = {
+  };
+  GetServerMetaReq_reflection_ =
+    new ::google::protobuf::internal::GeneratedMessageReflection(
+      GetServerMetaReq_descriptor_,
+      GetServerMetaReq::default_instance_,
+      GetServerMetaReq_offsets_,
+      GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(GetServerMetaReq, _has_bits_[0]),
+      GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(GetServerMetaReq, _unknown_fields_),
+      -1,
+      ::google::protobuf::DescriptorPool::generated_pool(),
+      ::google::protobuf::MessageFactory::generated_factory(),
+      sizeof(GetServerMetaReq));
+  ConvertSupport_descriptor_ = file->message_type(27);
+  static const int ConvertSupport_offsets_[2] = {
+    GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(ConvertSupport, from_),
+    GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(ConvertSupport, to_),
+  };
+  ConvertSupport_reflection_ =
+    new ::google::protobuf::internal::GeneratedMessageReflection(
+      ConvertSupport_descriptor_,
+      ConvertSupport::default_instance_,
+      ConvertSupport_offsets_,
+      GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(ConvertSupport, _has_bits_[0]),
+      GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(ConvertSupport, _unknown_fields_),
+      -1,
+      ::google::protobuf::DescriptorPool::generated_pool(),
+      ::google::protobuf::MessageFactory::generated_factory(),
+      sizeof(ConvertSupport));
+  GetServerMetaResp_descriptor_ = file->message_type(28);
+  static const int GetServerMetaResp_offsets_[3] = {
+    GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(GetServerMetaResp, status_),
+    GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(GetServerMetaResp, server_meta_),
+    GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(GetServerMetaResp, error_),
+  };
+  GetServerMetaResp_reflection_ =
+    new ::google::protobuf::internal::GeneratedMessageReflection(
+      GetServerMetaResp_descriptor_,
+      GetServerMetaResp::default_instance_,
+      GetServerMetaResp_offsets_,
+      GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(GetServerMetaResp, _has_bits_[0]),
+      GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(GetServerMetaResp, _unknown_fields_),
+      -1,
+      ::google::protobuf::DescriptorPool::generated_pool(),
+      ::google::protobuf::MessageFactory::generated_factory(),
+      sizeof(GetServerMetaResp));
+  ServerMeta_descriptor_ = file->message_type(29);
+  static const int ServerMeta_offsets_[49] = {
+    GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(ServerMeta, all_tables_selectable_),
+    GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(ServerMeta, blob_included_in_max_row_size_),
+    GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(ServerMeta, catalog_at_start_),
+    GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(ServerMeta, catalog_separator_),
+    GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(ServerMeta, catalog_term_),
+    GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(ServerMeta, collate_support_),
+    GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(ServerMeta, column_aliasing_supported_),
+    GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(ServerMeta, convert_support_),
+    GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(ServerMeta, correlation_names_support_),
+    GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(ServerMeta, date_time_functions_),
+    GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(ServerMeta, date_time_literals_support_),
+    GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(ServerMeta, group_by_support_),
+    GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(ServerMeta, identifier_casing_),
+    GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(ServerMeta, identifier_quote_string_),
+    GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(ServerMeta, like_escape_clause_supported_),
+    GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(ServerMeta, max_binary_literal_length_),
+    GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(ServerMeta, max_catalog_name_length_),
+    GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(ServerMeta, max_char_literal_length_),
+    GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(ServerMeta, max_column_name_length_),
+    GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(ServerMeta, max_columns_in_group_by_),
+    GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(ServerMeta, max_columns_in_order_by_),
+    GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(ServerMeta, max_columns_in_select_),
+    GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(ServerMeta, max_cursor_name_length_),
+    GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(ServerMeta, max_logical_lob_size_),
+    GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(ServerMeta, max_row_size_),
+    GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(ServerMeta, max_schema_name_length_),
+    GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(ServerMeta, max_statement_length_),
+    GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(ServerMeta, max_statements_),
+    GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(ServerMeta, max_table_name_length_),
+    GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(ServerMeta, max_tables_in_select_),
+    GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(ServerMeta, max_user_name_length_),
+    GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(ServerMeta, null_collation_),
+    GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(ServerMeta, null_plus_non_null_equals_null_),
+    GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(ServerMeta, numeric_functions_),
+    GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(ServerMeta, order_by_support_),
+    GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(ServerMeta, outer_join_support_),
+    GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(ServerMeta, quoted_identifier_casing_),
+    GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(ServerMeta, read_only_),
+    GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(ServerMeta, schema_term_),
+    GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(ServerMeta, search_escape_string_),
+    GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(ServerMeta, select_for_update_supported_),
+    GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(ServerMeta, special_characters_),
+    GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(ServerMeta, sql_keywords_),
+    GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(ServerMeta, string_functions_),
+    GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(ServerMeta, subquery_support_),
+    GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(ServerMeta, system_functions_),
+    GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(ServerMeta, table_term_),
+    GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(ServerMeta, transaction_supported_),
+    GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(ServerMeta, union_support_),
+  };
+  ServerMeta_reflection_ =
+    new ::google::protobuf::internal::GeneratedMessageReflection(
+      ServerMeta_descriptor_,
+      ServerMeta::default_instance_,
+      ServerMeta_offsets_,
+      GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(ServerMeta, _has_bits_[0]),
+      GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(ServerMeta, _unknown_fields_),
+      -1,
+      ::google::protobuf::DescriptorPool::generated_pool(),
+      ::google::protobuf::MessageFactory::generated_factory(),
+      sizeof(ServerMeta));
+  RunQuery_descriptor_ = file->message_type(30);
   static const int RunQuery_offsets_[5] = {
     GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(RunQuery, results_mode_),
     GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(RunQuery, type_),
@@ -625,6 +757,16 @@ void protobuf_AssignDesc_User_2eproto() {
   RequestStatus_descriptor_ = file->enum_type(4);
   ColumnSearchability_descriptor_ = file->enum_type(5);
   ColumnUpdatability_descriptor_ = file->enum_type(6);
+  CollateSupport_descriptor_ = file->enum_type(7);
+  CorrelationNamesSupport_descriptor_ = file->enum_type(8);
+  DateTimeLiteralsSupport_descriptor_ = file->enum_type(9);
+  GroupBySupport_descriptor_ = file->enum_type(10);
+  IdentifierCasing_descriptor_ = file->enum_type(11);
+  NullCollation_descriptor_ = file->enum_type(12);
+  OrderBySupport_descriptor_ = file->enum_type(13);
+  OuterJoinSupport_descriptor_ = file->enum_type(14);
+  SubQuerySupport_descriptor_ = file->enum_type(15);
+  UnionSupport_descriptor_ = file->enum_type(16);
 }
 
 namespace {
@@ -690,6 +832,14 @@ void protobuf_RegisterTypes(const ::std::string&) {
   ::google::protobuf::MessageFactory::InternalRegisterGeneratedMessage(
     CreatePreparedStatementResp_descriptor_, &CreatePreparedStatementResp::default_instance());
   ::google::protobuf::MessageFactory::InternalRegisterGeneratedMessage(
+    GetServerMetaReq_descriptor_, &GetServerMetaReq::default_instance());
+  ::google::protobuf::MessageFactory::InternalRegisterGeneratedMessage(
+    ConvertSupport_descriptor_, &ConvertSupport::default_instance());
+  ::google::protobuf::MessageFactory::InternalRegisterGeneratedMessage(
+    GetServerMetaResp_descriptor_, &GetServerMetaResp::default_instance());
+  ::google::protobuf::MessageFactory::InternalRegisterGeneratedMessage(
+    ServerMeta_descriptor_, &ServerMeta::default_instance());
+  ::google::protobuf::MessageFactory::InternalRegisterGeneratedMessage(
     RunQuery_descriptor_, &RunQuery::default_instance());
 }
 
@@ -748,6 +898,14 @@ void protobuf_ShutdownFile_User_2eproto() {
   delete PreparedStatement_reflection_;
   delete CreatePreparedStatementResp::default_instance_;
   delete CreatePreparedStatementResp_reflection_;
+  delete GetServerMetaReq::default_instance_;
+  delete GetServerMetaReq_reflection_;
+  delete ConvertSupport::default_instance_;
+  delete ConvertSupport_reflection_;
+  delete GetServerMetaResp::default_instance_;
+  delete GetServerMetaResp_reflection_;
+  delete ServerMeta::default_instance_;
+  delete ServerMeta_reflection_;
   delete RunQuery::default_instance_;
   delete RunQuery_reflection_;
 }
@@ -759,148 +917,233 @@ void protobuf_AddDesc_User_2eproto() {
   GOOGLE_PROTOBUF_VERIFY_VERSION;
 
   ::exec::protobuf_AddDesc_SchemaDef_2eproto();
+  ::common::protobuf_AddDesc_Types_2eproto();
   ::exec::shared::protobuf_AddDesc_UserBitShared_2eproto();
   ::exec::bit::data::protobuf_AddDesc_BitData_2eproto();
   ::exec::bit::control::protobuf_AddDesc_BitControl_2eproto();
   ::exec::bit::protobuf_AddDesc_ExecutionProtos_2eproto();
   ::google::protobuf::DescriptorPool::InternalAddGeneratedFile(
     "\n\nUser.proto\022\texec.user\032\017SchemaDef.proto"
-    "\032\023UserBitShared.proto\032\rBitData.proto\032\020Bi"
-    "tControl.proto\032\025ExecutionProtos.proto\"&\n"
-    "\010Property\022\013\n\003key\030\001 \002(\t\022\r\n\005value\030\002 \002(\t\"9\n"
-    "\016UserProperties\022\'\n\nproperties\030\001 \003(\0132\023.ex"
-    "ec.user.Property\"\267\001\n\020RpcEndpointInfos\022\014\n"
-    "\004name\030\001 \001(\t\022\017\n\007version\030\002 \001(\t\022\024\n\014majorVer"
-    "sion\030\003 \001(\r\022\024\n\014minorVersion\030\004 \001(\r\022\024\n\014patc"
-    "hVersion\030\005 \001(\r\022\023\n\013application\030\006 \001(\t\022\023\n\013b"
-    "uildNumber\030\007 \001(\r\022\030\n\020versionQualifier\030\010 \001"
-    "(\t\"\375\002\n\022UserToBitHandshake\022.\n\007channel\030\001 \001"
-    "(\0162\027.exec.shared.RpcChannel:\004USER\022\031\n\021sup"
-    "port_listening\030\002 \001(\010\022\023\n\013rpc_version\030\003 \001("
-    "\005\0221\n\013credentials\030\004 \001(\0132\034.exec.shared.Use"
-    "rCredentials\022-\n\nproperties\030\005 \001(\0132\031.exec."
-    "user.UserProperties\022$\n\025support_complex_t"
-    "ypes\030\006 \001(\010:\005false\022\036\n\017support_timeout\030\007 \001"
-    "(\010:\005false\0221\n\014client_infos\030\010 \001(\0132\033.exec.u"
-    "ser.RpcEndpointInfos\022,\n\014sasl_support\030\t \001"
-    "(\0162\026.exec.user.SaslSupport\"S\n\016RequestRes"
-    "ults\022&\n\010query_id\030\001 \001(\0132\024.exec.shared.Que"
-    "ryId\022\031\n\021maximum_responses\030\002 \001(\005\"g\n\025GetQu"
-    "eryPlanFragments\022\r\n\005query\030\001 \002(\t\022$\n\004type\030"
-    "\002 \001(\0162\026.exec.shared.QueryType\022\031\n\nsplit_p"
-    "lan\030\003 \001(\010:\005false\"\316\001\n\022QueryPlanFragments\022"
-    "3\n\006status\030\001 \002(\0162#.exec.shared.QueryResul"
-    "t.QueryState\022&\n\010query_id\030\002 \001(\0132\024.exec.sh"
-    "ared.QueryId\0221\n\tfragments\030\003 \003(\0132\036.exec.b"
-    "it.control.PlanFragment\022(\n\005error\030\004 \001(\0132\031"
-    ".exec.shared.DrillPBError\"\200\002\n\022BitToUserH"
-    "andshake\022\023\n\013rpc_version\030\002 \001(\005\022*\n\006status\030"
-    "\003 \001(\0162\032.exec.user.HandshakeStatus\022\017\n\007err"
-    "orId\030\004 \001(\t\022\024\n\014errorMessage\030\005 \001(\t\0221\n\014serv"
-    "er_infos\030\006 \001(\0132\033.exec.user.RpcEndpointIn"
-    "fos\022 \n\030authenticationMechanisms\030\007 \003(\t\022-\n"
-    "\021supported_methods\030\010 \003(\0162\022.exec.user.Rpc"
-    "Type\"-\n\nLikeFilter\022\017\n\007pattern\030\001 \001(\t\022\016\n\006e"
-    "scape\030\002 \001(\t\"D\n\016GetCatalogsReq\0222\n\023catalog"
-    "_name_filter\030\001 \001(\0132\025.exec.user.LikeFilte"
-    "r\"M\n\017CatalogMetadata\022\024\n\014catalog_name\030\001 \001"
-    "(\t\022\023\n\013description\030\002 \001(\t\022\017\n\007connect\030\003 \001(\t"
-    "\"\223\001\n\017GetCatalogsResp\022(\n\006status\030\001 \001(\0162\030.e"
-    "xec.user.RequestStatus\022,\n\010catalogs\030\002 \003(\013"
-    "2\032.exec.user.CatalogMetadata\022(\n\005error\030\003 "
-    "\001(\0132\031.exec.shared.DrillPBError\"v\n\rGetSch"
-    "emasReq\0222\n\023catalog_name_filter\030\001 \001(\0132\025.e"
-    "xec.user.LikeFilter\0221\n\022schema_name_filte"
-    "r\030\002 \001(\0132\025.exec.user.LikeFilter\"i\n\016Schema"
-    "Metadata\022\024\n\014catalog_name\030\001 \001(\t\022\023\n\013schema"
-    "_name\030\002 \001(\t\022\r\n\005owner\030\003 \001(\t\022\014\n\004type\030\004 \001(\t"
-    "\022\017\n\007mutable\030\005 \001(\t\"\220\001\n\016GetSchemasResp\022(\n\006"
-    "status\030\001 \001(\0162\030.exec.user.RequestStatus\022*"
-    "\n\007schemas\030\002 \003(\0132\031.exec.user.SchemaMetada"
-    "ta\022(\n\005error\030\003 \001(\0132\031.exec.shared.DrillPBE"
-    "rror\"\302\001\n\014GetTablesReq\0222\n\023catalog_name_fi"
-    "lter\030\001 \001(\0132\025.exec.user.LikeFilter\0221\n\022sch"
-    "ema_name_filter\030\002 \001(\0132\025.exec.user.LikeFi"
-    "lter\0220\n\021table_name_filter\030\003 \001(\0132\025.exec.u"
-    "ser.LikeFilter\022\031\n\021table_type_filter\030\004 \003("
-    "\t\"\\\n\rTableMetadata\022\024\n\014catalog_name\030\001 \001(\t"
-    "\022\023\n\013schema_name\030\002 \001(\t\022\022\n\ntable_name\030\003 \001("
-    "\t\022\014\n\004type\030\004 \001(\t\"\215\001\n\rGetTablesResp\022(\n\006sta"
-    "tus\030\001 \001(\0162\030.exec.user.RequestStatus\022(\n\006t"
-    "ables\030\002 \003(\0132\030.exec.user.TableMetadata\022(\n"
-    "\005error\030\003 \001(\0132\031.exec.shared.DrillPBError\""
-    "\333\001\n\rGetColumnsReq\0222\n\023catalog_name_filter"
-    "\030\001 \001(\0132\025.exec.user.LikeFilter\0221\n\022schema_"
-    "name_filter\030\002 \001(\0132\025.exec.user.LikeFilter"
-    "\0220\n\021table_name_filter\030\003 \001(\0132\025.exec.user."
-    "LikeFilter\0221\n\022column_name_filter\030\004 \001(\0132\025"
-    ".exec.user.LikeFilter\"\251\003\n\016ColumnMetadata"
-    "\022\024\n\014catalog_name\030\001 \001(\t\022\023\n\013schema_name\030\002 "
-    "\001(\t\022\022\n\ntable_name\030\003 \001(\t\022\023\n\013column_name\030\004"
-    " \001(\t\022\030\n\020ordinal_position\030\005 \001(\005\022\025\n\rdefaul"
-    "t_value\030\006 \001(\t\022\023\n\013is_nullable\030\007 \001(\010\022\021\n\tda"
-    "ta_type\030\010 \001(\t\022\027\n\017char_max_length\030\t \001(\005\022\031"
-    "\n\021char_octet_length\030\n \001(\005\022\031\n\021numeric_pre"
-    "cision\030\013 \001(\005\022\037\n\027numeric_precision_radix\030"
-    "\014 \001(\005\022\025\n\rnumeric_scale\030\r \001(\005\022\033\n\023date_tim"
-    "e_precision\030\016 \001(\005\022\025\n\rinterval_type\030\017 \001(\t"
-    "\022\032\n\022interval_precision\030\020 \001(\005\022\023\n\013column_s"
-    "ize\030\021 \001(\005\"\220\001\n\016GetColumnsResp\022(\n\006status\030\001"
-    " \001(\0162\030.exec.user.RequestStatus\022*\n\007column"
-    "s\030\002 \003(\0132\031.exec.user.ColumnMetadata\022(\n\005er"
-    "ror\030\003 \001(\0132\031.exec.shared.DrillPBError\"/\n\032"
-    "CreatePreparedStatementReq\022\021\n\tsql_query\030"
-    "\001 \001(\t\"\326\003\n\024ResultColumnMetadata\022\024\n\014catalo"
-    "g_name\030\001 \001(\t\022\023\n\013schema_name\030\002 \001(\t\022\022\n\ntab"
-    "le_name\030\003 \001(\t\022\023\n\013column_name\030\004 \001(\t\022\r\n\005la"
-    "bel\030\005 \001(\t\022\021\n\tdata_type\030\006 \001(\t\022\023\n\013is_nulla"
-    "ble\030\007 \001(\010\022\021\n\tprecision\030\010 \001(\005\022\r\n\005scale\030\t "
-    "\001(\005\022\016\n\006signed\030\n \001(\010\022\024\n\014display_size\030\013 \001("
-    "\005\022\022\n\nis_aliased\030\014 \001(\010\0225\n\rsearchability\030\r"
-    " \001(\0162\036.exec.user.ColumnSearchability\0223\n\014"
-    "updatability\030\016 \001(\0162\035.exec.user.ColumnUpd"
-    "atability\022\026\n\016auto_increment\030\017 \001(\010\022\030\n\020cas"
-    "e_sensitivity\030\020 \001(\010\022\020\n\010sortable\030\021 \001(\010\022\022\n"
-    "\nclass_name\030\022 \001(\t\022\023\n\013is_currency\030\024 \001(\010\"."
-    "\n\027PreparedStatementHandle\022\023\n\013server_info"
-    "\030\001 \001(\014\"\200\001\n\021PreparedStatement\0220\n\007columns\030"
-    "\001 \003(\0132\037.exec.user.ResultColumnMetadata\0229"
-    "\n\rserver_handle\030\002 \001(\0132\".exec.user.Prepar"
-    "edStatementHandle\"\253\001\n\033CreatePreparedStat"
-    "ementResp\022(\n\006status\030\001 \001(\0162\030.exec.user.Re"
-    "questStatus\0228\n\022prepared_statement\030\002 \001(\0132"
-    "\034.exec.user.PreparedStatement\022(\n\005error\030\003"
-    " \001(\0132\031.exec.shared.DrillPBError\"\353\001\n\010RunQ"
-    "uery\0221\n\014results_mode\030\001 \001(\0162\033.exec.user.Q"
-    "ueryResultsMode\022$\n\004type\030\002 \001(\0162\026.exec.sha"
-    "red.QueryType\022\014\n\004plan\030\003 \001(\t\0221\n\tfragments"
-    "\030\004 \003(\0132\036.exec.bit.control.PlanFragment\022E"
-    "\n\031prepared_statement_handle\030\005 \001(\0132\".exec"
-    ".user.PreparedStatementHandle*\332\003\n\007RpcTyp"
-    "e\022\r\n\tHANDSHAKE\020\000\022\007\n\003ACK\020\001\022\013\n\007GOODBYE\020\002\022\r"
-    "\n\tRUN_QUERY\020\003\022\020\n\014CANCEL_QUERY\020\004\022\023\n\017REQUE"
-    "ST_RESULTS\020\005\022\027\n\023RESUME_PAUSED_QUERY\020\013\022\034\n"
-    "\030GET_QUERY_PLAN_FRAGMENTS\020\014\022\020\n\014GET_CATAL"
-    "OGS\020\016\022\017\n\013GET_SCHEMAS\020\017\022\016\n\nGET_TABLES\020\020\022\017"
-    "\n\013GET_COLUMNS\020\021\022\035\n\031CREATE_PREPARED_STATE"
-    "MENT\020\026\022\016\n\nQUERY_DATA\020\006\022\020\n\014QUERY_HANDLE\020\007"
+    "\032\013Types.proto\032\023UserBitShared.proto\032\rBitD"
+    "ata.proto\032\020BitControl.proto\032\025ExecutionPr"
+    "otos.proto\"&\n\010Property\022\013\n\003key\030\001 \002(\t\022\r\n\005v"
+    "alue\030\002 \002(\t\"9\n\016UserProperties\022\'\n\nproperti"
+    "es\030\001 \003(\0132\023.exec.user.Property\"\267\001\n\020RpcEnd"
+    "pointInfos\022\014\n\004name\030\001 \001(\t\022\017\n\007version\030\002 \001("
+    "\t\022\024\n\014majorVersion\030\003 \001(\r\022\024\n\014minorVersion\030"
+    "\004 \001(\r\022\024\n\014patchVersion\030\005 \001(\r\022\023\n\013applicati"
+    "on\030\006 \001(\t\022\023\n\013buildNumber\030\007 \001(\r\022\030\n\020version"
+    "Qualifier\030\010 \001(\t\"\375\002\n\022UserToBitHandshake\022."
+    "\n\007channel\030\001 \001(\0162\027.exec.shared.RpcChannel"
+    ":\004USER\022\031\n\021support_listening\030\002 \001(\010\022\023\n\013rpc"
+    "_version\030\003 \001(\005\0221\n\013credentials\030\004 \001(\0132\034.ex"
+    "ec.shared.UserCredentials\022-\n\nproperties\030"
+    "\005 \001(\0132\031.exec.user.UserProperties\022$\n\025supp"
+    "ort_complex_types\030\006 \001(\010:\005false\022\036\n\017suppor"
+    "t_timeout\030\007 \001(\010:\005false\0221\n\014client_infos\030\010"
+    " \001(\0132\033.exec.user.RpcEndpointInfos\022,\n\014sas"
+    "l_support\030\t \001(\0162\026.exec.user.SaslSupport\""
+    "S\n\016RequestResults\022&\n\010query_id\030\001 \001(\0132\024.ex"
+    "ec.shared.QueryId\022\031\n\021maximum_responses\030\002"
+    " \001(\005\"g\n\025GetQueryPlanFragments\022\r\n\005query\030\001"
+    " \002(\t\022$\n\004type\030\002 \001(\0162\026.exec.shared.QueryTy"
+    "pe\022\031\n\nsplit_plan\030\003 \001(\010:\005false\"\316\001\n\022QueryP"
+    "lanFragments\0223\n\006status\030\001 \002(\0162#.exec.shar"
+    "ed.QueryResult.QueryState\022&\n\010query_id\030\002 "
+    "\001(\0132\024.exec.shared.QueryId\0221\n\tfragments\030\003"
+    " \003(\0132\036.exec.bit.control.PlanFragment\022(\n\005"
+    "error\030\004 \001(\0132\031.exec.shared.DrillPBError\"\200"
+    "\002\n\022BitToUserHandshake\022\023\n\013rpc_version\030\002 \001"
+    "(\005\022*\n\006status\030\003 \001(\0162\032.exec.user.Handshake"
+    "Status\022\017\n\007errorId\030\004 \001(\t\022\024\n\014errorMessage\030"
+    "\005 \001(\t\0221\n\014server_infos\030\006 \001(\0132\033.exec.user."
+    "RpcEndpointInfos\022 \n\030authenticationMechan"
+    "isms\030\007 \003(\t\022-\n\021supported_methods\030\010 \003(\0162\022."
+    "exec.user.RpcType\"-\n\nLikeFilter\022\017\n\007patte"
+    "rn\030\001 \001(\t\022\016\n\006escape\030\002 \001(\t\"D\n\016GetCatalogsR"
+    "eq\0222\n\023catalog_name_filter\030\001 \001(\0132\025.exec.u"
+    "ser.LikeFilter\"M\n\017CatalogMetadata\022\024\n\014cat"
+    "alog_name\030\001 \001(\t\022\023\n\013description\030\002 \001(\t\022\017\n\007"
+    "connect\030\003 \001(\t\"\223\001\n\017GetCatalogsResp\022(\n\006sta"
+    "tus\030\001 \001(\0162\030.exec.user.RequestStatus\022,\n\010c"
+    "atalogs\030\002 \003(\0132\032.exec.user.CatalogMetadat"
+    "a\022(\n\005error\030\003 \001(\0132\031.exec.shared.DrillPBEr"
+    "ror\"v\n\rGetSchemasReq\0222\n\023catalog_name_fil"
+    "ter\030\001 \001(\0132\025.exec.user.LikeFilter\0221\n\022sche"
+    "ma_name_filter\030\002 \001(\0132\025.exec.user.LikeFil"
+    "ter\"i\n\016SchemaMetadata\022\024\n\014catalog_name\030\001 "
+    "\001(\t\022\023\n\013schema_name\030\002 \001(\t\022\r\n\005owner\030\003 \001(\t\022"
+    "\014\n\004type\030\004 \001(\t\022\017\n\007mutable\030\005 \001(\t\"\220\001\n\016GetSc"
+    "hemasResp\022(\n\006status\030\001 \001(\0162\030.exec.user.Re"
+    "questStatus\022*\n\007schemas\030\002 \003(\0132\031.exec.user"
+    ".SchemaMetadata\022(\n\005error\030\003 \001(\0132\031.exec.sh"
+    "ared.DrillPBError\"\302\001\n\014GetTablesReq\0222\n\023ca"
+    "talog_name_filter\030\001 \001(\0132\025.exec.user.Like"
+    "Filter\0221\n\022schema_name_filter\030\002 \001(\0132\025.exe"
+    "c.user.LikeFilter\0220\n\021table_name_filter\030\003"
+    " \001(\0132\025.exec.user.LikeFilter\022\031\n\021table_typ"
+    "e_filter\030\004 \003(\t\"\\\n\rTableMetadata\022\024\n\014catal"
+    "og_name\030\001 \001(\t\022\023\n\013schema_name\030\002 \001(\t\022\022\n\nta"
+    "ble_name\030\003 \001(\t\022\014\n\004type\030\004 \001(\t\"\215\001\n\rGetTabl"
+    "esResp\022(\n\006status\030\001 \001(\0162\030.exec.user.Reque"
+    "stStatus\022(\n\006tables\030\002 \003(\0132\030.exec.user.Tab"
+    "leMetadata\022(\n\005error\030\003 \001(\0132\031.exec.shared."
+    "DrillPBError\"\333\001\n\rGetColumnsReq\0222\n\023catalo"
+    "g_name_filter\030\001 \001(\0132\025.exec.user.LikeFilt"
+    "er\0221\n\022schema_name_filter\030\002 \001(\0132\025.exec.us"
+    "er.LikeFilter\0220\n\021table_name_filter\030\003 \001(\013"
+    "2\025.exec.user.LikeFilter\0221\n\022column_name_f"
+    "ilter\030\004 \001(\0132\025.exec.user.LikeFilter\"\251\003\n\016C"
+    "olumnMetadata\022\024\n\014catalog_name\030\001 \001(\t\022\023\n\013s"
+    "chema_name\030\002 \001(\t\022\022\n\ntable_name\030\003 \001(\t\022\023\n\013"
+    "column_name\030\004 \001(\t\022\030\n\020ordinal_position\030\005 "
+    "\001(\005\022\025\n\rdefault_value\030\006 \001(\t\022\023\n\013is_nullabl"
+    "e\030\007 \001(\010\022\021\n\tdata_type\030\010 \001(\t\022\027\n\017char_max_l"
+    "ength\030\t \001(\005\022\031\n\021char_octet_length\030\n \001(\005\022\031"
+    "\n\021numeric_precision\030\013 \001(\005\022\037\n\027numeric_pre"
+    "cision_radix\030\014 \001(\005\022\025\n\rnumeric_scale\030\r \001("
+    "\005\022\033\n\023date_time_precision\030\016 \001(\005\022\025\n\rinterv"
+    "al_type\030\017 \001(\t\022\032\n\022interval_precision\030\020 \001("
+    "\005\022\023\n\013column_size\030\021 \001(\005\"\220\001\n\016GetColumnsRes"
+    "p\022(\n\006status\030\001 \001(\0162\030.exec.user.RequestSta"
+    "tus\022*\n\007columns\030\002 \003(\0132\031.exec.user.ColumnM"
+    "etadata\022(\n\005error\030\003 \001(\0132\031.exec.shared.Dri"
+    "llPBError\"/\n\032CreatePreparedStatementReq\022"
+    "\021\n\tsql_query\030\001 \001(\t\"\326\003\n\024ResultColumnMetad"
+    "ata\022\024\n\014catalog_name\030\001 \001(\t\022\023\n\013schema_name"
+    "\030\002 \001(\t\022\022\n\ntable_name\030\003 \001(\t\022\023\n\013column_nam"
+    "e\030\004 \001(\t\022\r\n\005label\030\005 \001(\t\022\021\n\tdata_type\030\006 \001("
+    "\t\022\023\n\013is_nullable\030\007 \001(\010\022\021\n\tprecision\030\010 \001("
+    "\005\022\r\n\005scale\030\t \001(\005\022\016\n\006signed\030\n \001(\010\022\024\n\014disp"
+    "lay_size\030\013 \001(\005\022\022\n\nis_aliased\030\014 \001(\010\0225\n\rse"
+    "archability\030\r \001(\0162\036.exec.user.ColumnSear"
+    "chability\0223\n\014updatability\030\016 \001(\0162\035.exec.u"
+    "ser.ColumnUpdatability\022\026\n\016auto_increment"
+    "\030\017 \001(\010\022\030\n\020case_sensitivity\030\020 \001(\010\022\020\n\010sort"
+    "able\030\021 \001(\010\022\022\n\nclass_name\030\022 \001(\t\022\023\n\013is_cur"
+    "rency\030\024 \001(\010\".\n\027PreparedStatementHandle\022\023"
+    "\n\013server_info\030\001 \001(\014\"\200\001\n\021PreparedStatemen"
+    "t\0220\n\007columns\030\001 \003(\0132\037.exec.user.ResultCol"
+    "umnMetadata\0229\n\rserver_handle\030\002 \001(\0132\".exe"
+    "c.user.PreparedStatementHandle\"\253\001\n\033Creat"
+    "ePreparedStatementResp\022(\n\006status\030\001 \001(\0162\030"
+    ".exec.user.RequestStatus\0228\n\022prepared_sta"
+    "tement\030\002 \001(\0132\034.exec.user.PreparedStateme"
+    "nt\022(\n\005error\030\003 \001(\0132\031.exec.shared.DrillPBE"
+    "rror\"\022\n\020GetServerMetaReq\"P\n\016ConvertSuppo"
+    "rt\022\037\n\004from\030\001 \002(\0162\021.common.MinorType\022\035\n\002t"
+    "o\030\002 \002(\0162\021.common.MinorType\"\223\001\n\021GetServer"
+    "MetaResp\022(\n\006status\030\001 \001(\0162\030.exec.user.Req"
+    "uestStatus\022*\n\013server_meta\030\002 \001(\0132\025.exec.u"
+    "ser.ServerMeta\022(\n\005error\030\003 \001(\0132\031.exec.sha"
+    "red.DrillPBError\"\377\r\n\nServerMeta\022\035\n\025all_t"
+    "ables_selectable\030\001 \001(\010\022%\n\035blob_included_"
+    "in_max_row_size\030\002 \001(\010\022\030\n\020catalog_at_star"
+    "t\030\003 \001(\010\022\031\n\021catalog_separator\030\004 \001(\t\022\024\n\014ca"
+    "talog_term\030\005 \001(\t\0222\n\017collate_support\030\006 \003("
+    "\0162\031.exec.user.CollateSupport\022!\n\031column_a"
+    "liasing_supported\030\007 \001(\010\0222\n\017convert_suppo"
+    "rt\030\010 \003(\0132\031.exec.user.ConvertSupport\022E\n\031c"
+    "orrelation_names_support\030\t \001(\0162\".exec.us"
+    "er.CorrelationNamesSupport\022\033\n\023date_time_"
+    "functions\030\n \003(\t\022F\n\032date_time_literals_su"
+    "pport\030\013 \003(\0162\".exec.user.DateTimeLiterals"
+    "Support\0223\n\020group_by_support\030\014 \001(\0162\031.exec"
+    ".user.GroupBySupport\0226\n\021identifier_casin"
+    "g\030\r \001(\0162\033.exec.user.IdentifierCasing\022\037\n\027"
+    "identifier_quote_string\030\016 \001(\t\022$\n\034like_es"
+    "cape_clause_supported\030\017 \001(\010\022!\n\031max_binar"
+    "y_literal_length\030\020 \001(\r\022\037\n\027max_catalog_na"
+    "me_length\030\021 \001(\r\022\037\n\027max_char_literal_leng"
+    "th\030\022 \001(\r\022\036\n\026max_column_name_length\030\023 \001(\r"
+    "\022\037\n\027max_columns_in_group_by\030\024 \001(\r\022\037\n\027max"
+    "_columns_in_order_by\030\025 \001(\r\022\035\n\025max_column"
+    "s_in_select\030\026 \001(\r\022\036\n\026max_cursor_name_len"
+    "gth\030\027 \001(\r\022\034\n\024max_logical_lob_size\030\030 \001(\r\022"
+    "\024\n\014max_row_size\030\031 \001(\r\022\036\n\026max_schema_name"
+    "_length\030\032 \001(\r\022\034\n\024max_statement_length\030\033 "
+    "\001(\r\022\026\n\016max_statements\030\034 \001(\r\022\035\n\025max_table"
+    "_name_length\030\035 \001(\r\022\034\n\024max_tables_in_sele"
+    "ct\030\036 \001(\r\022\034\n\024max_user_name_length\030\037 \001(\r\0220"
+    "\n\016null_collation\030  \001(\0162\030.exec.user.NullC"
+    "ollation\022&\n\036null_plus_non_null_equals_nu"
+    "ll\030! \001(\010\022\031\n\021numeric_functions\030\" \003(\t\0223\n\020o"
+    "rder_by_support\030# \003(\0162\031.exec.user.OrderB"
+    "ySupport\0227\n\022outer_join_support\030$ \003(\0162\033.e"
+    "xec.user.OuterJoinSupport\022=\n\030quoted_iden"
+    "tifier_casing\030% \001(\0162\033.exec.user.Identifi"
+    "erCasing\022\021\n\tread_only\030& \001(\010\022\023\n\013schema_te"
+    "rm\030\' \001(\t\022\034\n\024search_escape_string\030( \001(\t\022#"
+    "\n\033select_for_update_supported\030) \001(\010\022\032\n\022s"
+    "pecial_characters\030* \001(\t\022\024\n\014sql_keywords\030"
+    "+ \003(\t\022\030\n\020string_functions\030, \003(\t\0224\n\020subqu"
+    "ery_support\030- \003(\0162\032.exec.user.SubQuerySu"
+    "pport\022\030\n\020system_functions\030. \003(\t\022\022\n\ntable"
+    "_term\030/ \001(\t\022\035\n\025transaction_supported\0300 \001"
+    "(\010\022.\n\runion_support\0301 \003(\0162\027.exec.user.Un"
+    "ionSupport\"\353\001\n\010RunQuery\0221\n\014results_mode\030"
+    "\001 \001(\0162\033.exec.user.QueryResultsMode\022$\n\004ty"
+    "pe\030\002 \001(\0162\026.exec.shared.QueryType\022\014\n\004plan"
+    "\030\003 \001(\t\0221\n\tfragments\030\004 \003(\0132\036.exec.bit.con"
+    "trol.PlanFragment\022E\n\031prepared_statement_"
+    "handle\030\005 \001(\0132\".exec.user.PreparedStateme"
+    "ntHandle*\320\003\n\007RpcType\022\r\n\tHANDSHAKE\020\000\022\007\n\003A"
+    "CK\020\001\022\013\n\007GOODBYE\020\002\022\r\n\tRUN_QUERY\020\003\022\020\n\014CANC"
+    "EL_QUERY\020\004\022\023\n\017REQUEST_RESULTS\020\005\022\027\n\023RESUM"
+    "E_PAUSED_QUERY\020\013\022\034\n\030GET_QUERY_PLAN_FRAGM"
+    "ENTS\020\014\022\020\n\014GET_CATALOGS\020\016\022\017\n\013GET_SCHEMAS\020"
+    "\017\022\016\n\nGET_TABLES\020\020\022\017\n\013GET_COLUMNS\020\021\022\035\n\031CR"
+    "EATE_PREPARED_STATEMENT\020\026\022\023\n\017GET_SERVER_"
+    "META\020\010\022\016\n\nQUERY_DATA\020\006\022\020\n\014QUERY_HANDLE\020\007"
     "\022\030\n\024QUERY_PLAN_FRAGMENTS\020\r\022\014\n\010CATALOGS\020\022"
     "\022\013\n\007SCHEMAS\020\023\022\n\n\006TABLES\020\024\022\013\n\007COLUMNS\020\025\022\026"
-    "\n\022PREPARED_STATEMENT\020\027\022\026\n\022REQ_META_FUNCT"
-    "IONS\020\010\022\026\n\022RESP_FUNCTION_LIST\020\t\022\020\n\014QUERY_"
-    "RESULT\020\n\022\020\n\014SASL_MESSAGE\020\030*6\n\013SaslSuppor"
-    "t\022\030\n\024UNKNOWN_SASL_SUPPORT\020\000\022\r\n\tSASL_AUTH"
-    "\020\001*#\n\020QueryResultsMode\022\017\n\013STREAM_FULL\020\001*"
-    "q\n\017HandshakeStatus\022\013\n\007SUCCESS\020\001\022\030\n\024RPC_V"
-    "ERSION_MISMATCH\020\002\022\017\n\013AUTH_FAILED\020\003\022\023\n\017UN"
-    "KNOWN_FAILURE\020\004\022\021\n\rAUTH_REQUIRED\020\005*D\n\rRe"
-    "questStatus\022\022\n\016UNKNOWN_STATUS\020\000\022\006\n\002OK\020\001\022"
-    "\n\n\006FAILED\020\002\022\013\n\007TIMEOUT\020\003*Y\n\023ColumnSearch"
-    "ability\022\031\n\025UNKNOWN_SEARCHABILITY\020\000\022\010\n\004NO"
-    "NE\020\001\022\010\n\004CHAR\020\002\022\n\n\006NUMBER\020\003\022\007\n\003ALL\020\004*K\n\022C"
-    "olumnUpdatability\022\030\n\024UNKNOWN_UPDATABILIT"
-    "Y\020\000\022\r\n\tREAD_ONLY\020\001\022\014\n\010WRITABLE\020\002B+\n\033org."
-    "apache.drill.exec.protoB\nUserProtosH\001", 5477);
+    "\n\022PREPARED_STATEMENT\020\027\022\017\n\013SERVER_META\020\t\022"
+    "\020\n\014QUERY_RESULT\020\n\022\020\n\014SASL_MESSAGE\020\030*6\n\013S"
+    "aslSupport\022\030\n\024UNKNOWN_SASL_SUPPORT\020\000\022\r\n\t"
+    "SASL_AUTH\020\001*#\n\020QueryResultsMode\022\017\n\013STREA"
+    "M_FULL\020\001*q\n\017HandshakeStatus\022\013\n\007SUCCESS\020\001"
+    "\022\030\n\024RPC_VERSION_MISMATCH\020\002\022\017\n\013AUTH_FAILE"
+    "D\020\003\022\023\n\017UNKNOWN_FAILURE\020\004\022\021\n\rAUTH_REQUIRE"
+    "D\020\005*D\n\rRequestStatus\022\022\n\016UNKNOWN_STATUS\020\000"
+    "\022\006\n\002OK\020\001\022\n\n\006FAILED\020\002\022\013\n\007TIMEOUT\020\003*Y\n\023Col"
+    "umnSearchability\022\031\n\025UNKNOWN_SEARCHABILIT"
+    "Y\020\000\022\010\n\004NONE\020\001\022\010\n\004CHAR\020\002\022\n\n\006NUMBER\020\003\022\007\n\003A"
+    "LL\020\004*K\n\022ColumnUpdatability\022\030\n\024UNKNOWN_UP"
+    "DATABILITY\020\000\022\r\n\tREAD_ONLY\020\001\022\014\n\010WRITABLE\020"
+    "\002*1\n\016CollateSupport\022\016\n\nCS_UNKNOWN\020\000\022\017\n\013C"
+    "S_GROUP_BY\020\001*J\n\027CorrelationNamesSupport\022"
+    "\013\n\007CN_NONE\020\001\022\026\n\022CN_DIFFERENT_NAMES\020\002\022\n\n\006"
+    "CN_ANY\020\003*\271\003\n\027DateTimeLiteralsSupport\022\016\n\n"
+    "DL_UNKNOWN\020\000\022\013\n\007DL_DATE\020\001\022\013\n\007DL_TIME\020\002\022\020"
+    "\n\014DL_TIMESTAMP\020\003\022\024\n\020DL_INTERVAL_YEAR\020\004\022\025"
+    "\n\021DL_INTERVAL_MONTH\020\005\022\023\n\017DL_INTERVAL_DAY"
+    "\020\006\022\024\n\020DL_INTERVAL_HOUR\020\007\022\026\n\022DL_INTERVAL_"
+    "MINUTE\020\010\022\026\n\022DL_INTERVAL_SECOND\020\t\022\035\n\031DL_I"
+    "NTERVAL_YEAR_TO_MONTH\020\n\022\033\n\027DL_INTERVAL_D"
+    "AY_TO_HOUR\020\013\022\035\n\031DL_INTERVAL_DAY_TO_MINUT"
+    "E\020\014\022\035\n\031DL_INTERVAL_DAY_TO_SECOND\020\r\022\036\n\032DL"
+    "_INTERVAL_HOUR_TO_MINUTE\020\016\022\036\n\032DL_INTERVA"
+    "L_HOUR_TO_SECOND\020\017\022 \n\034DL_INTERVAL_MINUTE"
+    "_TO_SECOND\020\020*Y\n\016GroupBySupport\022\013\n\007GB_NON"
+    "E\020\001\022\022\n\016GB_SELECT_ONLY\020\002\022\024\n\020GB_BEYOND_SEL"
+    "ECT\020\003\022\020\n\014GB_UNRELATED\020\004*x\n\020IdentifierCas"
+    "ing\022\016\n\nIC_UNKNOWN\020\000\022\023\n\017IC_STORES_LOWER\020\001"
+    "\022\023\n\017IC_STORES_MIXED\020\002\022\023\n\017IC_STORES_UPPER"
+    "\020\003\022\025\n\021IC_SUPPORTS_MIXED\020\004*X\n\rNullCollati"
+    "on\022\016\n\nNC_UNKNOWN\020\000\022\017\n\013NC_AT_START\020\001\022\r\n\tN"
+    "C_AT_END\020\002\022\013\n\007NC_HIGH\020\003\022\n\n\006NC_LOW\020\004*E\n\016O"
+    "rderBySupport\022\016\n\nOB_UNKNOWN\020\000\022\020\n\014OB_UNRE"
+    "LATED\020\001\022\021\n\rOB_EXPRESSION\020\002*\226\001\n\020OuterJoin"
+    "Support\022\016\n\nOJ_UNKNOWN\020\000\022\013\n\007OJ_LEFT\020\001\022\014\n\010"
+    "OJ_RIGHT\020\002\022\013\n\007OJ_FULL\020\003\022\r\n\tOJ_NESTED\020\004\022\022"
+    "\n\016OJ_NOT_ORDERED\020\005\022\014\n\010OJ_INNER\020\006\022\031\n\025OJ_A"
+    "LL_COMPARISON_OPS\020\007*\204\001\n\017SubQuerySupport\022"
+    "\016\n\nSQ_UNKNOWN\020\000\022\021\n\rSQ_CORRELATED\020\001\022\024\n\020SQ"
+    "_IN_COMPARISON\020\002\022\020\n\014SQ_IN_EXISTS\020\003\022\020\n\014SQ"
+    "_IN_INSERT\020\004\022\024\n\020SQ_IN_QUANTIFIED\020\005*;\n\014Un"
+    "ionSupport\022\r\n\tU_UNKNOWN\020\000\022\013\n\007U_UNION\020\001\022\017"
+    "\n\013U_UNION_ALL\020\002B+\n\033org.apache.drill.exec"
+    ".protoB\nUserProtosH\001", 8820);
   ::google::protobuf::MessageFactory::InternalRegisterGeneratedFile(
     "User.proto", &protobuf_RegisterTypes);
   Property::default_instance_ = new Property();
@@ -929,6 +1172,10 @@ void protobuf_AddDesc_User_2eproto() {
   PreparedStatementHandle::default_instance_ = new PreparedStatementHandle();
   PreparedStatement::default_instance_ = new PreparedStatement();
   CreatePreparedStatementResp::default_instance_ = new CreatePreparedStatementResp();
+  GetServerMetaReq::default_instance_ = new GetServerMetaReq();
+  ConvertSupport::default_instance_ = new ConvertSupport();
+  GetServerMetaResp::default_instance_ = new GetServerMetaResp();
+  ServerMeta::default_instance_ = new ServerMeta();
   RunQuery::default_instance_ = new RunQuery();
   Property::default_instance_->InitAsDefaultInstance();
   UserProperties::default_instance_->InitAsDefaultInstance();
@@ -956,6 +1203,10 @@ void protobuf_AddDesc_User_2eproto() {
   PreparedStatementHandle::default_instance_->InitAsDefaultInstance();
   PreparedStatement::default_instance_->InitAsDefaultInstance();
   CreatePreparedStatementResp::default_instance_->InitAsDefaultInstance();
+  GetServerMetaReq::default_instance_->InitAsDefaultInstance();
+  ConvertSupport::default_instance_->InitAsDefaultInstance();
+  GetServerMetaResp::default_instance_->InitAsDefaultInstance();
+  ServerMeta::default_instance_->InitAsDefaultInstance();
   RunQuery::default_instance_->InitAsDefaultInstance();
   ::google::protobuf::internal::OnShutdown(&protobuf_ShutdownFile_User_2eproto);
 }
@@ -1095,6 +1346,182 @@ bool ColumnUpdatability_IsValid(int value) {
   }
 }
 
+const ::google::protobuf::EnumDescriptor* CollateSupport_descriptor() {
+  protobuf_AssignDescriptorsOnce();
+  return CollateSupport_descriptor_;
+}
+bool CollateSupport_IsValid(int value) {
+  switch(value) {
+    case 0:
+    case 1:
+      return true;
+    default:
+      return false;
+  }
+}
+
+const ::google::protobuf::EnumDescriptor* CorrelationNamesSupport_descriptor() {
+  protobuf_AssignDescriptorsOnce();
+  return CorrelationNamesSupport_descriptor_;
+}
+bool CorrelationNamesSupport_IsValid(int value) {
+  switch(value) {
+    case 1:
+    case 2:
+    case 3:
+      return true;
+    default:
+      return false;
+  }
+}
+
+const ::google::protobuf::EnumDescriptor* DateTimeLiteralsSupport_descriptor() {
+  protobuf_AssignDescriptorsOnce();
+  return DateTimeLiteralsSupport_descriptor_;
+}
+bool DateTimeLiteralsSupport_IsValid(int value) {
+  switch(value) {
+    case 0:
+    case 1:
+    case 2:
+    case 3:
+    case 4:
+    case 5:
+    case 6:
+    case 7:
+    case 8:
+    case 9:
+    case 10:
+    case 11:
+    case 12:
+    case 13:
+    case 14:
+    case 15:
+    case 16:
+      return true;
+    default:
+      return false;
+  }
+}
+
+const ::google::protobuf::EnumDescriptor* GroupBySupport_descriptor() {
+  protobuf_AssignDescriptorsOnce();
+  return GroupBySupport_descriptor_;
+}
+bool GroupBySupport_IsValid(int value) {
+  switch(value) {
+    case 1:
+    case 2:
+    case 3:
+    case 4:
+      return true;
+    default:
+      return false;
+  }
+}
+
+const ::google::protobuf::EnumDescriptor* IdentifierCasing_descriptor() {
+  protobuf_AssignDescriptorsOnce();
+  return IdentifierCasing_descriptor_;
+}
+bool IdentifierCasing_IsValid(int value) {
+  switch(value) {
+    case 0:
+    case 1:
+    case 2:
+    case 3:
+    case 4:
+      return true;
+    default:
+      return false;
+  }
+}
+
+const ::google::protobuf::EnumDescriptor* NullCollation_descriptor() {
+  protobuf_AssignDescriptorsOnce();
+  return NullCollation_descriptor_;
+}
+bool NullCollation_IsValid(int value) {
+  switch(value) {
+    case 0:
+    case 1:
+    case 2:
+    case 3:
+    case 4:
+      return true;
+    default:
+      return false;
+  }
+}
+
+const ::google::protobuf::EnumDescriptor* OrderBySupport_descriptor() {
+  protobuf_AssignDescriptorsOnce();
+  return OrderBySupport_descriptor_;
+}
+bool OrderBySupport_IsValid(int value) {
+  switch(value) {
+    case 0:
+    case 1:
+    case 2:
+      return true;
+    default:
+      return false;
+  }
+}
+
+const ::google::protobuf::EnumDescriptor* OuterJoinSupport_descriptor() {
+  protobuf_AssignDescriptorsOnce();
+  return OuterJoinSupport_descriptor_;
+}
+bool OuterJoinSupport_IsValid(int value) {
+  switch(value) {
+    case 0:
+    case 1:
+    case 2:
+    case 3:
+    case 4:
+    case 5:
+    case 6:
+    case 7:
+      return true;
+    default:
+      return false;
+  }
+}
+
+const ::google::protobuf::EnumDescriptor* SubQuerySupport_descriptor() {
+  protobuf_AssignDescriptorsOnce();
+  return SubQuerySupport_descriptor_;
+}
+bool SubQuerySupport_IsValid(int value) {
+  switch(value) {
+    case 0:
+    case 1:
+    case 2:
+    case 3:
+    case 4:
+    case 5:
+      return true;
+    default:
+      return false;
+  }
+}
+
+const ::google::protobuf::EnumDescriptor* UnionSupport_descriptor() {
+  protobuf_AssignDescriptorsOnce();
+  return UnionSupport_descriptor_;
+}
+bool UnionSupport_IsValid(int value) {
+  switch(value) {
+    case 0:
+    case 1:
+    case 2:
+      return true;
+    default:
+      return false;
+  }
+}
+
 
 // ===================================================================
 
@@ -10995,6 +11422,3153 @@ void CreatePreparedStatementResp::Swap(CreatePreparedStatementResp* other) {
 // ===================================================================
 
 #ifndef _MSC_VER
+#endif  // !_MSC_VER
+
+GetServerMetaReq::GetServerMetaReq()
+  : ::google::protobuf::Message() {
+  SharedCtor();
+}
+
+void GetServerMetaReq::InitAsDefaultInstance() {
+}
+
+GetServerMetaReq::GetServerMetaReq(const GetServerMetaReq& from)
+  : ::google::protobuf::Message() {
+  SharedCtor();
+  MergeFrom(from);
+}
+
+void GetServerMetaReq::SharedCtor() {
+  _cached_size_ = 0;
+  ::memset(_has_bits_, 0, sizeof(_has_bits_));
+}
+
+GetServerMetaReq::~GetServerMetaReq() {
+  SharedDtor();
+}
+
+void GetServerMetaReq::SharedDtor() {
+  if (this != default_instance_) {
+  }
+}
+
+void GetServerMetaReq::SetCachedSize(int size) const {
+  GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN();
+  _cached_size_ = size;
+  GOOGLE_SAFE_CONCURRENT_WRITES_END();
+}
+const ::google::protobuf::Descriptor* GetServerMetaReq::descriptor() {
+  protobuf_AssignDescriptorsOnce();
+  return GetServerMetaReq_descriptor_;
+}
+
+const GetServerMetaReq& GetServerMetaReq::default_instance() {
+  if (default_instance_ == NULL) protobuf_AddDesc_User_2eproto();
+  return *default_instance_;
+}
+
+GetServerMetaReq* GetServerMetaReq::default_instance_ = NULL;
+
+GetServerMetaReq* GetServerMetaReq::New() const {
+  return new GetServerMetaReq;
+}
+
+void GetServerMetaReq::Clear() {
+  ::memset(_has_bits_, 0, sizeof(_has_bits_));
+  mutable_unknown_fields()->Clear();
+}
+
+bool GetServerMetaReq::MergePartialFromCodedStream(
+    ::google::protobuf::io::CodedInputStream* input) {
+#define DO_(EXPRESSION) if (!(EXPRESSION)) return false
+  ::google::protobuf::uint32 tag;
+  while ((tag = input->ReadTag()) != 0) {
+    if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+        ::google::protobuf::internal::WireFormatLite::WIRETYPE_END_GROUP) {
+      return true;
+    }
+    DO_(::google::protobuf::internal::WireFormat::SkipField(
+          input, tag, mutable_unknown_fields()));
+  }
+  return true;
+#undef DO_
+}
+
+void GetServerMetaReq::SerializeWithCachedSizes(
+    ::google::protobuf::io::CodedOutputStream* output) const {
+  if (!unknown_fields().empty()) {
+    ::google::protobuf::internal::WireFormat::SerializeUnknownFields(
+        unknown_fields(), output);
+  }
+}
+
+::google::protobuf::uint8* GetServerMetaReq::SerializeWithCachedSizesToArray(
+    ::google::protobuf::uint8* target) const {
+  if (!unknown_fields().empty()) {
+    target = ::google::protobuf::internal::WireFormat::SerializeUnknownFieldsToArray(
+        unknown_fields(), target);
+  }
+  return target;
+}
+
+int GetServerMetaReq::ByteSize() const {
+  int total_size = 0;
+
+  if (!unknown_fields().empty()) {
+    total_size +=
+      ::google::protobuf::internal::WireFormat::ComputeUnknownFieldsSize(
+        unknown_fields());
+  }
+  GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN();
+  _cached_size_ = total_size;
+  GOOGLE_SAFE_CONCURRENT_WRITES_END();
+  return total_size;
+}
+
+void GetServerMetaReq::MergeFrom(const ::google::protobuf::Message& from) {
+  GOOGLE_CHECK_NE(&from, this);
+  const GetServerMetaReq* source =
+    ::google::protobuf::internal::dynamic_cast_if_available<const GetServerMetaReq*>(
+      &from);
+  if (source == NULL) {
+    ::google::protobuf::internal::ReflectionOps::Merge(from, this);
+  } else {
+    MergeFrom(*source);
+  }
+}
+
+void GetServerMetaReq::MergeFrom(const GetServerMetaReq& from) {
+  GOOGLE_CHECK_NE(&from, this);
+  mutable_unknown_fields()->MergeFrom(from.unknown_fields());
+}
+
+void GetServerMetaReq::CopyFrom(const ::google::protobuf::Message& from) {
+  if (&from == this) return;
+  Clear();
+  MergeFrom(from);
+}
+
+void GetServerMetaReq::CopyFrom(const GetServerMetaReq& from) {
+  if (&from == this) return;
+  Clear();
+  MergeFrom(from);
+}
+
+bool GetServerMetaReq::IsInitialized() const {
+
+  return true;
+}
+
+void GetServerMetaReq::Swap(GetServerMetaReq* other) {
+  if (other != this) {
+    _unknown_fields_.Swap(&other->_unknown_fields_);
+    std::swap(_cached_size_, other->_cached_size_);
+  }
+}
+
+::google::protobuf::Metadata GetServerMetaReq::GetMetadata() const {
+  protobuf_AssignDescriptorsOnce();
+  ::google::protobuf::Metadata metadata;
+  metadata.descriptor = GetServerMetaReq_descriptor_;
+  metadata.reflection = GetServerMetaReq_reflection_;
+  return metadata;
+}
+
+
+// ===================================================================
+
+#ifndef _MSC_VER
+const int ConvertSupport::kFromFieldNumber;
+const int ConvertSupport::kToFieldNumber;
+#endif  // !_MSC_VER
+
+ConvertSupport::ConvertSupport()
+  : ::google::protobuf::Message() {
+  SharedCtor();
+}
+
+void ConvertSupport::InitAsDefaultInstance() {
+}
+
+ConvertSupport::ConvertSupport(const ConvertSupport& from)
+  : ::google::protobuf::Message() {
+  SharedCtor();
+  MergeFrom(from);
+}
+
+void ConvertSupport::SharedCtor() {
+  _cached_size_ = 0;
+  from_ = 0;
+  to_ = 0;
+  ::memset(_has_bits_, 0, sizeof(_has_bits_));
+}
+
+ConvertSupport::~ConvertSupport() {
+  SharedDtor();
+}
+
+void ConvertSupport::SharedDtor() {
+  if (this != default_instance_) {
+  }
+}
+
+void ConvertSupport::SetCachedSize(int size) const {
+  GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN();
+  _cached_size_ = size;
+  GOOGLE_SAFE_CONCURRENT_WRITES_END();
+}
+const ::google::protobuf::Descriptor* ConvertSupport::descriptor() {
+  protobuf_AssignDescriptorsOnce();
+  return ConvertSupport_descriptor_;
+}
+
+const ConvertSupport& ConvertSupport::default_instance() {
+  if (default_instance_ == NULL) protobuf_AddDesc_User_2eproto();
+  return *default_instance_;
+}
+
+ConvertSupport* ConvertSupport::default_instance_ = NULL;
+
+ConvertSupport* ConvertSupport::New() const {
+  return new ConvertSupport;
+}
+
+void ConvertSupport::Clear() {
+  if (_has_bits_[0 / 32] & (0xffu << (0 % 32))) {
+    from_ = 0;
+    to_ = 0;
+  }
+  ::memset(_has_bits_, 0, sizeof(_has_bits_));
+  mutable_unknown_fields()->Clear();
+}
+
+bool ConvertSupport::MergePartialFromCodedStream(
+    ::google::protobuf::io::CodedInputStream* input) {
+#define DO_(EXPRESSION) if (!(EXPRESSION)) return false
+  ::google::protobuf::uint32 tag;
+  while ((tag = input->ReadTag()) != 0) {
+    switch (::google::protobuf::internal::WireFormatLite::GetTagFieldNumber(tag)) {
+      // required .common.MinorType from = 1;
+      case 1: {
+        if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+            ::google::protobuf::internal::WireFormatLite::WIRETYPE_VARINT) {
+          int value;
+          DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive<
+                   int, ::google::protobuf::internal::WireFormatLite::TYPE_ENUM>(
+                 input, &value)));
+          if (::common::MinorType_IsValid(value)) {
+            set_from(static_cast< ::common::MinorType >(value));
+          } else {
+            mutable_unknown_fields()->AddVarint(1, value);
+          }
+        } else {
+          goto handle_uninterpreted;
+        }
+        if (input->ExpectTag(16)) goto parse_to;
+        break;
+      }
+
+      // required .common.MinorType to = 2;
+      case 2: {
+        if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+            ::google::protobuf::internal::WireFormatLite::WIRETYPE_VARINT) {
+         parse_to:
+          int value;
+          DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive<
+                   int, ::google::protobuf::internal::WireFormatLite::TYPE_ENUM>(
+                 input, &value)));
+          if (::common::MinorType_IsValid(value)) {
+            set_to(static_cast< ::common::MinorType >(value));
+          } else {
+            mutable_unknown_fields()->AddVarint(2, value);
+          }
+        } else {
+          goto handle_uninterpreted;
+        }
+        if (input->ExpectAtEnd()) return true;
+        break;
+      }
+
+      default: {
+      handle_uninterpreted:
+        if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+            ::google::protobuf::internal::WireFormatLite::WIRETYPE_END_GROUP) {
+          return true;
+        }
+        DO_(::google::protobuf::internal::WireFormat::SkipField(
+              input, tag, mutable_unknown_fields()));
+        break;
+      }
+    }
+  }
+  return true;
+#undef DO_
+}
+
+void ConvertSupport::SerializeWithCachedSizes(
+    ::google::protobuf::io::CodedOutputStream* output) const {
+  // required .common.MinorType from = 1;
+  if (has_from()) {
+    ::google::protobuf::internal::WireFormatLite::WriteEnum(
+      1, this->from(), output);
+  }
+
+  // required .common.MinorType to = 2;
+  if (has_to()) {
+    ::google::protobuf::internal::WireFormatLite::WriteEnum(
+      2, this->to(), output);
+  }
+
+  if (!unknown_fields().empty()) {
+    ::google::protobuf::internal::WireFormat::SerializeUnknownFields(
+        unknown_fields(), output);
+  }
+}
+
+::google::protobuf::uint8* ConvertSupport::SerializeWithCachedSizesToArray(
+    ::google::protobuf::uint8* target) const {
+  // required .common.MinorType from = 1;
+  if (has_from()) {
+    target = ::google::protobuf::internal::WireFormatLite::WriteEnumToArray(
+      1, this->from(), target);
+  }
+
+  // required .common.MinorType to = 2;
+  if (has_to()) {
+    target = ::google::protobuf::internal::WireFormatLite::WriteEnumToArray(
+      2, this->to(), target);
+  }
+
+  if (!unknown_fields().empty()) {
+    target = ::google::protobuf::internal::WireFormat::SerializeUnknownFieldsToArray(
+        unknown_fields(), target);
+  }
+  return target;
+}
+
+int ConvertSupport::ByteSize() const {
+  int total_size = 0;
+
+  if (_has_bits_[0 / 32] & (0xffu << (0 % 32))) {
+    // required .common.MinorType from = 1;
+    if (has_from()) {
+      total_size += 1 +
+        ::google::protobuf::internal::WireFormatLite::EnumSize(this->from());
+    }
+
+    // required .common.MinorType to = 2;
+    if (has_to()) {
+      total_size += 1 +
+        ::google::protobuf::internal::WireFormatLite::EnumSize(this->to());
+    }
+
+  }
+  if (!unknown_fields().empty()) {
+    total_size +=
+      ::google::protobuf::internal::WireFormat::ComputeUnknownFieldsSize(
+        unknown_fields());
+  }
+  GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN();
+  _cached_size_ = total_size;
+  GOOGLE_SAFE_CONCURRENT_WRITES_END();
+  return total_size;
+}
+
+void ConvertSupport::MergeFrom(const ::google::protobuf::Message& from) {
+  GOOGLE_CHECK_NE(&from, this);
+  const ConvertSupport* source =
+    ::google::protobuf::internal::dynamic_cast_if_available<const ConvertSupport*>(
+      &from);
+  if (source == NULL) {
+    ::google::protobuf::internal::ReflectionOps::Merge(from, this);
+  } else {
+    MergeFrom(*source);
+  }
+}
+
+void ConvertSupport::MergeFrom(const ConvertSupport& from) {
+  GOOGLE_CHECK_NE(&from, this);
+  if (from._has_bits_[0 / 32] & (0xffu << (0 % 32))) {
+    if (from.has_from()) {
+      set_from(from.from());
+    }
+    if (from.has_to()) {
+      set_to(from.to());
+    }
+  }
+  mutable_unknown_fields()->MergeFrom(from.unknown_fields());
+}
+
+void ConvertSupport::CopyFrom(const ::google::protobuf::Message& from) {
+  if (&from == this) return;
+  Clear();
+  MergeFrom(from);
+}
+
+void ConvertSupport::CopyFrom(const ConvertSupport& from) {
+  if (&from == this) return;
+  Clear();
+  MergeFrom(from);
+}
+
+bool ConvertSupport::IsInitialized() const {
+  if ((_has_bits_[0] & 0x00000003) != 0x00000003) return false;
+
+  return true;
+}
+
+void ConvertSupport::Swap(ConvertSupport* other) {
+  if (other != this) {
+    std::swap(from_, other->from_);
+    std::swap(to_, other->to_);
+    std::swap(_has_bits_[0], other->_has_bits_[0]);
+    _unknown_fields_.Swap(&other->_unknown_fields_);
+    std::swap(_cached_size_, other->_cached_size_);
+  }
+}
+
+::google::protobuf::Metadata ConvertSupport::GetMetadata() const {
+  protobuf_AssignDescriptorsOnce();
+  ::google::protobuf::Metadata metadata;
+  metadata.descriptor = ConvertSupport_descriptor_;
+  metadata.reflection = ConvertSupport_reflection_;
+  return metadata;
+}
+
+
+// ===================================================================
+
+#ifndef _MSC_VER
+const int GetServerMetaResp::kStatusFieldNumber;
+const int GetServerMetaResp::kServerMetaFieldNumber;
+const int GetServerMetaResp::kErrorFieldNumber;
+#endif  // !_MSC_VER
+
+GetServerMetaResp::GetServerMetaResp()
+  : ::google::protobuf::Message() {
+  SharedCtor();
+}
+
+void GetServerMetaResp::InitAsDefaultInstance() {
+  server_meta_ = const_cast< ::exec::user::ServerMeta*>(&::exec::user::ServerMeta::default_instance());
+  error_ = const_cast< ::exec::shared::DrillPBError*>(&::exec::shared::DrillPBError::default_instance());
+}
+
+GetServerMetaResp::GetServerMetaResp(const GetServerMetaResp& from)
+  : ::google::protobuf::Message() {
+  SharedCtor();
+  MergeFrom(from);
+}
+
+void GetServerMetaResp::SharedCtor() {
+  _cached_size_ = 0;
+  status_ = 0;
+  server_meta_ = NULL;
+  error_ = NULL;
+  ::memset(_has_bits_, 0, sizeof(_has_bits_));
+}
+
+GetServerMetaResp::~GetServerMetaResp() {
+  SharedDtor();
+}
+
+void GetServerMetaResp::SharedDtor() {
+  if (this != default_instance_) {
+    delete server_meta_;
+    delete error_;
+  }
+}
+
+void GetServerMetaResp::SetCachedSize(int size) const {
+  GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN();
+  _cached_size_ = size;
+  GOOGLE_SAFE_CONCURRENT_WRITES_END();
+}
+const ::google::protobuf::Descriptor* GetServerMetaResp::descriptor() {
+  protobuf_AssignDescriptorsOnce();
+  return GetServerMetaResp_descriptor_;
+}
+
+const GetServerMetaResp& GetServerMetaResp::default_instance() {
+  if (default_instance_ == NULL) protobuf_AddDesc_User_2eproto();
+  return *default_instance_;
+}
+
+GetServerMetaResp* GetServerMetaResp::default_instance_ = NULL;
+
+GetServerMetaResp* GetServerMetaResp::New() const {
+  return new GetServerMetaResp;
+}
+
+void GetServerMetaResp::Clear() {
+  if (_has_bits_[0 / 32] & (0xffu << (0 % 32))) {
+    status_ = 0;
+    if (has_server_meta()) {
+      if (server_meta_ != NULL) server_meta_->::exec::user::ServerMeta::Clear();
+    }
+    if (has_error()) {
+      if (error_ != NULL) error_->::exec::shared::DrillPBError::Clear();
+    }
+  }
+  ::memset(_has_bits_, 0, sizeof(_has_bits_));
+  mutable_unknown_fields()->Clear();
+}
+
+bool GetServerMetaResp::MergePartialFromCodedStream(
+    ::google::protobuf::io::CodedInputStream* input) {
+#define DO_(EXPRESSION) if (!(EXPRESSION)) return false
+  ::google::protobuf::uint32 tag;
+  while ((tag = input->ReadTag()) != 0) {
+    switch (::google::protobuf::internal::WireFormatLite::GetTagFieldNumber(tag)) {
+      // optional .exec.user.RequestStatus status = 1;
+      case 1: {
+        if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+            ::google::protobuf::internal::WireFormatLite::WIRETYPE_VARINT) {
+          int value;
+          DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive<
+                   int, ::google::protobuf::internal::WireFormatLite::TYPE_ENUM>(
+                 input, &value)));
+          if (::exec::user::RequestStatus_IsValid(value)) {
+            set_status(static_cast< ::exec::user::RequestStatus >(value));
+          } else {
+            mutable_unknown_fields()->AddVarint(1, value);
+          }
+        } else {
+          goto handle_uninterpreted;
+        }
+        if (input->ExpectTag(18)) goto parse_server_meta;
+        break;
+      }
+
+      // optional .exec.user.ServerMeta server_meta = 2;
+      case 2: {
+        if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+            ::google::protobuf::internal::WireFormatLite::WIRETYPE_LENGTH_DELIMITED) {
+         parse_server_meta:
+          DO_(::google::protobuf::internal::WireFormatLite::ReadMessageNoVirtual(
+               input, mutable_server_meta()));
+        } else {
+          goto handle_uninterpreted;
+        }
+        if (input->ExpectTag(26)) goto parse_error;
+        break;
+      }
+
+      // optional .exec.shared.DrillPBError error = 3;
+      case 3: {
+        if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+            ::google::protobuf::internal::WireFormatLite::WIRETYPE_LENGTH_DELIMITED) {
+         parse_error:
+          DO_(::google::protobuf::internal::WireFormatLite::ReadMessageNoVirtual(
+               input, mutable_error()));
+        } else {
+          goto handle_uninterpreted;
+        }
+        if (input->ExpectAtEnd()) return true;
+        break;
+      }
+
+      default: {
+      handle_uninterpreted:
+        if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+            ::google::protobuf::internal::WireFormatLite::WIRETYPE_END_GROUP) {
+          return true;
+        }
+        DO_(::google::protobuf::internal::WireFormat::SkipField(
+              input, tag, mutable_unknown_fields()));
+        break;
+      }
+    }
+  }
+  return true;
+#undef DO_
+}
+
+void GetServerMetaResp::SerializeWithCachedSizes(
+    ::google::protobuf::io::CodedOutputStream* output) const {
+  // optional .exec.user.RequestStatus status = 1;
+  if (has_status()) {
+    ::google::protobuf::internal::WireFormatLite::WriteEnum(
+      1, this->status(), output);
+  }
+
+  // optional .exec.user.ServerMeta server_meta = 2;
+  if (has_server_meta()) {
+    ::google::protobuf::internal::WireFormatLite::WriteMessageMaybeToArray(
+      2, this->server_meta(), output);
+  }
+
+  // optional .exec.shared.DrillPBError error = 3;
+  if (has_error()) {
+    ::google::protobuf::internal::WireFormatLite::WriteMessageMaybeToArray(
+      3, this->error(), output);
+  }
+
+  if (!unknown_fields().empty()) {
+    ::google::protobuf::internal::WireFormat::SerializeUnknownFields(
+        unknown_fields(), output);
+  }
+}
+
+::google::protobuf::uint8* GetServerMetaResp::SerializeWithCachedSizesToArray(
+    ::google::protobuf::uint8* target) const {
+  // optional .exec.user.RequestStatus status = 1;
+  if (has_status()) {
+    target = ::google::protobuf::internal::WireFormatLite::WriteEnumToArray(
+      1, this->status(), target);
+  }
+
+  // optional .exec.user.ServerMeta server_meta = 2;
+  if (has_server_meta()) {
+    target = ::google::protobuf::internal::WireFormatLite::
+      WriteMessageNoVirtualToArray(
+        2, this->server_meta(), target);
+  }
+
+  // optional .exec.shared.DrillPBError error = 3;
+  if (has_error()) {
+    target = ::google::protobuf::internal::WireFormatLite::
+      WriteMessageNoVirtualToArray(
+        3, this->error(), target);
+  }
+
+  if (!unknown_fields().empty()) {
+    target = ::google::protobuf::internal::WireFormat::SerializeUnknownFieldsToArray(
+        unknown_fields(), target);
+  }
+  return target;
+}
+
+int GetServerMetaResp::ByteSize() const {
+  int total_size = 0;
+
+  if (_has_bits_[0 / 32] & (0xffu << (0 % 32))) {
+    // optional .exec.user.RequestStatus status = 1;
+    if (has_status()) {
+      total_size += 1 +
+        ::google::protobuf::internal::WireFormatLite::EnumSize(this->status());
+    }
+
+    // optional .exec.user.ServerMeta server_meta = 2;
+    if (has_server_meta()) {
+      total_size += 1 +
+        ::google::protobuf::internal::WireFormatLite::MessageSizeNoVirtual(
+          this->server_meta());
+    }
+
+    // optional .exec.shared.DrillPBError error = 3;
+    if (has_error()) {
+      total_size += 1 +
+        ::google::protobuf::internal::WireFormatLite::MessageSizeNoVirtual(
+          this->error());
+    }
+
+  }
+  if (!unknown_fields().empty()) {
+    total_size +=
+      ::google::protobuf::internal::WireFormat::ComputeUnknownFieldsSize(
+        unknown_fields());
+  }
+  GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN();
+  _cached_size_ = total_size;
+  GOOGLE_SAFE_CONCURRENT_WRITES_END();
+  return total_size;
+}
+
+void GetServerMetaResp::MergeFrom(const ::google::protobuf::Message& from) {
+  GOOGLE_CHECK_NE(&from, this);
+  const GetServerMetaResp* source =
+    ::google::protobuf::internal::dynamic_cast_if_available<const GetServerMetaResp*>(
+      &from);
+  if (source == NULL) {
+    ::google::protobuf::internal::ReflectionOps::Merge(from, this);
+  } else {
+    MergeFrom(*source);
+  }
+}
+
+void GetServerMetaResp::MergeFrom(const GetServerMetaResp& from) {
+  GOOGLE_CHECK_NE(&from, this);
+  if (from._has_bits_[0 / 32] & (0xffu << (0 % 32))) {
+    if (from.has_status()) {
+      set_status(from.status());
+    }
+    if (from.has_server_meta()) {
+      mutable_server_meta()->::exec::user::ServerMeta::MergeFrom(from.server_meta());
+    }
+    if (from.has_error()) {
+      mutable_error()->::exec::shared::DrillPBError::MergeFrom(from.error());
+    }
+  }
+  mutable_unknown_fields()->MergeFrom(from.unknown_fields());
+}
+
+void GetServerMetaResp::CopyFrom(const ::google::protobuf::Message& from) {
+  if (&from == this) return;
+  Clear();
+  MergeFrom(from);
+}
+
+void GetServerMetaResp::CopyFrom(const GetServerMetaResp& from) {
+  if (&from == this) return;
+  Clear();
+  MergeFrom(from);
+}
+
+bool GetServerMetaResp::IsInitialized() const {
+
+  if (has_server_meta()) {
+    if (!this->server_meta().IsInitialized()) return false;
+  }
+  return true;
+}
+
+void GetServerMetaResp::Swap(GetServerMetaResp* other) {
+  if (other != this) {
+    std::swap(status_, other->status_);
+    std::swap(server_meta_, other->server_meta_);
+    std::swap(error_, other->error_);
+    std::swap(_has_bits_[0], other->_has_bits_[0]);
+    _unknown_fields_.Swap(&other->_unknown_fields_);
+    std::swap(_cached_size_, other->_cached_size_);
+  }
+}
+
+::google::protobuf::Metadata GetServerMetaResp::GetMetadata() const {
+  protobuf_AssignDescriptorsOnce();
+  ::google::protobuf::Metadata metadata;
+  metadata.descriptor = GetServerMetaResp_descriptor_;
+  metadata.reflection = GetServerMetaResp_reflection_;
+  return metadata;
+}
+
+
+// ===================================================================
+
+#ifndef _MSC_VER
+const int ServerMeta::kAllTablesSelectableFieldNumber;
+const int ServerMeta::kBlobIncludedInMaxRowSizeFieldNumber;
+const int ServerMeta::kCatalogAtStartFieldNumber;
+const int ServerMeta::kCatalogSeparatorFieldNumber;
+const int ServerMeta::kCatalogTermFieldNumber;
+const int ServerMeta::kCollateSupportFieldNumber;
+const int ServerMeta::kColumnAliasingSupportedFieldNumber;
+const int ServerMeta::kConvertSupportFieldNumber;
+const int ServerMeta::kCorrelationNamesSupportFieldNumber;
+const int ServerMeta::kDateTimeFunctionsFieldNumber;
+const int ServerMeta::kDateTimeLiteralsSupportFieldNumber;
+const int ServerMeta::kGroupBySupportFieldNumber;
+const int ServerMeta::kIdentifierCasingFieldNumber;
+const int ServerMeta::kIdentifierQuoteStringFieldNumber;
+const int ServerMeta::kLikeEscapeClauseSupportedFieldNumber;
+const int ServerMeta::kMaxBinaryLiteralLengthFieldNumber;
+const int ServerMeta::kMaxCatalogNameLengthFieldNumber;
+const int ServerMeta::kMaxCharLiteralLengthFieldNumber;
+const int ServerMeta::kMaxColumnNameLengthFieldNumber;
+const int ServerMeta::kMaxColumnsInGroupByFieldNumber;
+const int ServerMeta::kMaxColumnsInOrderByFieldNumber;
+const int ServerMeta::kMaxColumnsInSelectFieldNumber;
+const int ServerMeta::kMaxCursorNameLengthFieldNumber;
+const int ServerMeta::kMaxLogicalLobSizeFieldNumber;
+const int ServerMeta::kMaxRowSizeFieldNumber;
+const int ServerMeta::kMaxSchemaNameLengthFieldNumber;
+const int ServerMeta::kMaxStatementLengthFieldNumber;
+const int ServerMeta::kMaxStatementsFieldNumber;
+const int ServerMeta::kMaxTableNameLengthFieldNumber;
+const int ServerMeta::kMaxTablesInSelectFieldNumber;
+const int ServerMeta::kMaxUserNameLengthFieldNumber;
+const int ServerMeta::kNullCollationFieldNumber;
+const int ServerMeta::kNullPlusNonNullEqualsNullFieldNumber;
+const int ServerMeta::kNumericFunctionsFieldNumber;
+const int ServerMeta::kOrderBySupportFieldNumber;
+const int ServerMeta::kOuterJoinSupportFieldNumber;
+const int ServerMeta::kQuotedIdentifierCasingFieldNumber;
+const int ServerMeta::kReadOnlyFieldNumber;
+const int ServerMeta::kSchemaTermFieldNumber;
+const int ServerMeta::kSearchEscapeStringFieldNumber;
+const int ServerMeta::kSelectForUpdateSupportedFieldNumber;
+const int ServerMeta::kSpecialCharactersFieldNumber;
+const int ServerMeta::kSqlKeywordsFieldNumber;
+const int ServerMeta::kStringFunctionsFieldNumber;
+const int ServerMeta::kSubquerySupportFieldNumber;
+const int ServerMeta::kSystemFunctionsFieldNumber;
+const int ServerMeta::kTableTermFieldNumber;
+const int ServerMeta::kTransactionSupportedFieldNumber;
+const int ServerMeta::kUnionSupportFieldNumber;
+#endif  // !_MSC_VER
+
+ServerMeta::ServerMeta()
+  : ::google::protobuf::Message() {
+  SharedCtor();
+}
+
+void ServerMeta::InitAsDefaultInstance() {
+}
+
+ServerMeta::ServerMeta(const ServerMeta& from)
+  : ::google::protobuf::Message() {
+  SharedCtor();
+  MergeFrom(from);
+}
+
+void ServerMeta::SharedCtor() {
+  _cached_size_ = 0;
+  all_tables_selectable_ = false;
+  blob_included_in_max_row_size_ = false;
+  catalog_at_start_ = false;
+  catalog_separator_ = const_cast< ::std::string*>(&::google::protobuf::internal::kEmptyString);
+  catalog_term_ = const_cast< ::std::string*>(&::google::protobuf::internal::kEmptyString);
+  column_aliasing_supported_ = false;
+  correlation_names_support_ = 1;
+  group_by_support_ = 1;
+  identifier_casing_ = 0;
+  identifier_quote_string_ = const_cast< ::std::string*>(&::google::protobuf::internal::kEmptyString);
+  like_escape_clause_supported_ = false;
+  max_binary_literal_length_ = 0u;
+  max_catalog_name_length_ = 0u;
+  max_char_literal_length_ = 0u;
+  max_column_name_length_ = 0u;
+  max_columns_in_group_by_ = 0u;
+  max_columns_in_order_by_ = 0u;
+  max_columns_in_select_ = 0u;
+  max_cursor_name_length_ = 0u;
+  max_logical_lob_size_ = 0u;
+  max_row_size_ = 0u;
+  max_schema_name_length_ = 0u;
+  max_statement_length_ = 0u;
+  max_statements_ = 0u;
+  max_table_name_length_ = 0u;
+  max_tables_in_select_ = 0u;
+  max_user_name_length_ = 0u;
+  null_collation_ = 0;
+  null_plus_non_null_equals_null_ = false;
+  quoted_identifier_casing_ = 0;
+  read_only_ = false;
+  schema_term_ = const_cast< ::std::string*>(&::google::protobuf::internal::kEmptyString);
+  search_escape_string_ = const_cast< ::std::string*>(&::google::protobuf::internal::kEmptyString);
+  select_for_update_supported_ = false;
+  special_characters_ = const_cast< ::std::string*>(&::google::protobuf::internal::kEmptyString);
+  table_term_ = const_cast< ::std::string*>(&::google::protobuf::internal::kEmptyString);
+  transaction_supported_ = false;
+  ::memset(_has_bits_, 0, sizeof(_has_bits_));
+}
+
+ServerMeta::~ServerMeta() {
+  SharedDtor();
+}
+
+void ServerMeta::SharedDtor() {
+  if (catalog_separator_ != &::google::protobuf::internal::kEmptyString) {
+    delete catalog_separator_;
+  }
+  if (catalog_term_ != &::google::protobuf::internal::kEmptyString) {
+    delete catalog_term_;
+  }
+  if (identifier_quote_string_ != &::google::protobuf::internal::kEmptyString) {
+    delete identifier_quote_string_;
+  }
+  if (schema_term_ != &::google::protobuf::internal::kEmptyString) {
+    delete schema_term_;
+  }
+  if (search_escape_string_ != &::google::protobuf::internal::kEmptyString) {
+    delete search_escape_string_;
+  }
+  if (special_characters_ != &::google::protobuf::internal::kEmptyString) {
+    delete special_characters_;
+  }
+  if (table_term_ != &::google::protobuf::internal::kEmptyString) {
+    delete table_term_;
+  }
+  if (this != default_instance_) {
+  }
+}
+
+void ServerMeta::SetCachedSize(int size) const {
+  GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN();
+  _cached_size_ = size;
+  GOOGLE_SAFE_CONCURRENT_WRITES_END();
+}
+const ::google::protobuf::Descriptor* ServerMeta::descriptor() {
+  protobuf_AssignDescriptorsOnce();
+  return ServerMeta_descriptor_;
+}
+
+const ServerMeta& ServerMeta::default_instance() {
+  if (default_instance_ == NULL) protobuf_AddDesc_User_2eproto();
+  return *default_instance_;
+}
+
+ServerMeta* ServerMeta::default_instance_ = NULL;
+
+ServerMeta* ServerMeta::New() const {
+  return new ServerMeta;
+}
+
+void ServerMeta::Clear() {
+  if (_has_bits_[0 / 32] & (0xffu << (0 % 32))) {
+    all_tables_selectable_ = false;
+    blob_included_in_max_row_size_ = false;
+    catalog_at_start_ = false;
+    if (has_catalog_separator()) {
+      if (catalog_separator_ != &::google::protobuf::internal::kEmptyString) {
+        catalog_separator_->clear();
+      }
+    }
+    if (has_catalog_term()) {
+      if (catalog_term_ != &::google::protobuf::internal::kEmptyString) {
+        catalog_term_->clear();
+      }
+    }
+    column_aliasing_supported_ = false;
+  }
+  if (_has_bits_[8 / 32] & (0xffu << (8 % 32))) {
+    correlation_names_support_ = 1;
+    group_by_support_ = 1;
+    identifier_casing_ = 0;
+    if (has_identifier_quote_string()) {
+      if (identifier_quote_string_ != &::google::protobuf::internal::kEmptyString) {
+        identifier_quote_string_->clear();
+      }
+    }
+    like_escape_clause_supported_ = false;
+    max_binary_literal_length_ = 0u;
+  }
+  if (_has_bits_[16 / 32] & (0xffu << (16 % 32))) {
+    max_catalog_name_length_ = 0u;
+    max_char_literal_length_ = 0u;
+    max_column_name_length_ = 0u;
+    max_columns_in_group_by_ = 0u;
+    max_columns_in_order_by_ = 0u;
+    max_columns_in_select_ = 0u;
+    max_cursor_name_length_ = 0u;
+    max_logical_lob_size_ = 0u;
+  }
+  if (_has_bits_[24 / 32] & (0xffu << (24 % 32))) {
+    max_row_size_ = 0u;
+    max_schema_name_length_ = 0u;
+    max_statement_length_ = 0u;
+    max_statements_ = 0u;
+    max_table_name_length_ = 0u;
+    max_tables_in_select_ = 0u;
+    max_user_name_length_ = 0u;
+    null_collation_ = 0;
+  }
+  if (_has_bits_[32 / 32] & (0xffu << (32 % 32))) {
+    null_plus_non_null_equals_null_ = false;
+    quoted_identifier_casing_ = 0;
+    read_only_ = false;
+    if (has_schema_term()) {
+      if (schema_term_ != &::google::protobuf::internal::kEmptyString) {
+        schema_term_->clear();
+      }
+    }
+    if (has_search_escape_string()) {
+      if (search_escape_string_ != &::google::protobuf::internal::kEmptyString) {
+        search_escape_string_->clear();
+      }
+    }
+  }
+  if (_has_bits_[40 / 32] & (0xffu << (40 % 32))) {
+    select_for_update_supported_ = false;
+    if (has_special_characters()) {
+      if (special_characters_ != &::google::protobuf::internal::kEmptyString) {
+        special_characters_->clear();
+      }
+    }
+    if (has_table_term()) {
+      if (table_term_ != &::google::protobuf::internal::kEmptyString) {
+        table_term_->clear();
+      }
+    }
+    transaction_supported_ = false;
+  }
+  collate_support_.Clear();
+  convert_support_.Clear();
+  date_time_functions_.Clear();
+  date_time_literals_support_.Clear();
+  numeric_functions_.Clear();
+  order_by_support_.Clear();
+  outer_join_support_.Clear();
+  sql_keywords_.Clear();
+  string_functions_.Clear();
+  subquery_support_.Clear();
+  system_functions_.Clear();
+  union_support_.Clear();
+  ::memset(_has_bits_, 0, sizeof(_has_bits_));
+  mutable_unknown_fields()->Clear();
+}
+
+bool ServerMeta::MergePartialFromCodedStream(
+    ::google::protobuf::io::CodedInputStream* input) {
+#define DO_(EXPRESSION) if (!(EXPRESSION)) return false
+  ::google::protobuf::uint32 tag;
+  while ((tag = input->ReadTag()) != 0) {
+    switch (::google::protobuf::internal::WireFormatLite::GetTagFieldNumber(tag)) {
+      // optional bool all_tables_selectable = 1;
+      case 1: {
+        if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+            ::google::protobuf::internal::WireFormatLite::WIRETYPE_VARINT) {
+          DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive<
+                   bool, ::google::protobuf::internal::WireFormatLite::TYPE_BOOL>(
+                 input, &all_tables_selectable_)));
+          set_has_all_tables_selectable();
+        } else {
+          goto handle_uninterpreted;
+        }
+        if (input->ExpectTag(16)) goto parse_blob_included_in_max_row_size;
+        break;
+      }
+
+      // optional bool blob_included_in_max_row_size = 2;
+      case 2: {
+        if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+            ::google::protobuf::internal::WireFormatLite::WIRETYPE_VARINT) {
+         parse_blob_included_in_max_row_size:
+          DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive<
+                   bool, ::google::protobuf::internal::WireFormatLite::TYPE_BOOL>(
+                 input, &blob_included_in_max_row_size_)));
+          set_has_blob_included_in_max_row_size();
+        } else {
+          goto handle_uninterpreted;
+        }
+        if (input->ExpectTag(24)) goto parse_catalog_at_start;
+        break;
+      }
+
+      // optional bool catalog_at_start = 3;
+      case 3: {
+        if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+            ::google::protobuf::internal::WireFormatLite::WIRETYPE_VARINT) {
+         parse_catalog_at_start:
+          DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive<
+                   bool, ::google::protobuf::internal::WireFormatLite::TYPE_BOOL>(
+                 input, &catalog_at_start_)));
+          set_has_catalog_at_start();
+        } else {
+          goto handle_uninterpreted;
+        }
+        if (input->ExpectTag(34)) goto parse_catalog_separator;
+        break;
+      }
+
+      // optional string catalog_separator = 4;
+      case 4: {
+        if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+            ::google::protobuf::internal::WireFormatLite::WIRETYPE_LENGTH_DELIMITED) {
+         parse_catalog_separator:
+          DO_(::google::protobuf::internal::WireFormatLite::ReadString(
+                input, this->mutable_catalog_separator()));
+          ::google::protobuf::internal::WireFormat::VerifyUTF8String(
+            this->catalog_separator().data(), this->catalog_separator().length(),
+            ::google::protobuf::internal::WireFormat::PARSE);
+        } else {
+          goto handle_uninterpreted;
+        }
+        if (input->ExpectTag(42)) goto parse_catalog_term;
+        break;
+      }
+
+      // optional string catalog_term = 5;
+      case 5: {
+        if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+            ::google::protobuf::internal::WireFormatLite::WIRETYPE_LENGTH_DELIMITED) {
+         parse_catalog_term:
+          DO_(::google::protobuf::internal::WireFormatLite::ReadString(
+                input, this->mutable_catalog_term()));
+          ::google::protobuf::internal::WireFormat::VerifyUTF8String(
+            this->catalog_term().data(), this->catalog_term().length(),
+            ::google::protobuf::internal::WireFormat::PARSE);
+        } else {
+          goto handle_uninterpreted;
+        }
+        if (input->ExpectTag(48)) goto parse_collate_support;
+        break;
+      }
+
+      // repeated .exec.user.CollateSupport collate_support = 6;
+      case 6: {
+        if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+            ::google::protobuf::internal::WireFormatLite::WIRETYPE_VARINT) {
+         parse_collate_support:
+          int value;
+          DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive<
+                   int, ::google::protobuf::internal::WireFormatLite::TYPE_ENUM>(
+                 input, &value)));
+          if (::exec::user::CollateSupport_IsValid(value)) {
+            add_collate_support(static_cast< ::exec::user::CollateSupport >(value));
+          } else {
+            mutable_unknown_fields()->AddVarint(6, value);
+          }
+        } else if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag)
+                   == ::google::protobuf::internal::WireFormatLite::
+                      WIRETYPE_LENGTH_DELIMITED) {
+          DO_((::google::protobuf::internal::WireFormatLite::ReadPackedEnumNoInline(
+                 input,
+                 &::exec::user::CollateSupport_IsValid,
+                 this->mutable_collate_support())));
+        } else {
+          goto handle_uninterpreted;
+        }
+        if (input->ExpectTag(48)) goto parse_collate_support;
+        if (input->ExpectTag(56)) goto parse_column_aliasing_supported;
+        break;
+      }
+
+      // optional bool column_aliasing_supported = 7;
+      case 7: {
+        if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+            ::google::protobuf::internal::WireFormatLite::WIRETYPE_VARINT) {
+         parse_column_aliasing_supported:
+          DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive<
+                   bool, ::google::protobuf::internal::WireFormatLite::TYPE_BOOL>(
+                 input, &column_aliasing_supported_)));
+          set_has_column_aliasing_supported();
+        } else {
+          goto handle_uninterpreted;
+        }
+        if (input->ExpectTag(66)) goto parse_convert_support;
+        break;
+      }
+
+      // repeated .exec.user.ConvertSupport convert_support = 8;
+      case 8: {
+        if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+            ::google::protobuf::internal::WireFormatLite::WIRETYPE_LENGTH_DELIMITED) {
+         parse_convert_support:
+          DO_(::google::protobuf::internal::WireFormatLite::ReadMessageNoVirtual(
+                input, add_convert_support()));
+        } else {
+          goto handle_uninterpreted;
+        }
+        if (input->ExpectTag(66)) goto parse_convert_support;
+        if (input->ExpectTag(72)) goto parse_correlation_names_support;
+        break;
+      }
+
+      // optional .exec.user.CorrelationNamesSupport correlation_names_support = 9;
+      case 9: {
+        if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+            ::google::protobuf::internal::WireFormatLite::WIRETYPE_VARINT) {
+         parse_correlation_names_support:
+          int value;
+          DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive<
+                   int, ::google::protobuf::internal::WireFormatLite::TYPE_ENUM>(
+                 input, &value)));
+          if (::exec::user::CorrelationNamesSupport_IsValid(value)) {
+            set_correlation_names_support(static_cast< ::exec::user::CorrelationNamesSupport >(value));
+          } else {
+            mutable_unknown_fields()->AddVarint(9, value);
+          }
+        } else {
+          goto handle_uninterpreted;
+        }
+        if (input->ExpectTag(82)) goto parse_date_time_functions;
+        break;
+      }
+
+      // repeated string date_time_functions = 10;
+      case 10: {
+        if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+            ::google::protobuf::internal::WireFormatLite::WIRETYPE_LENGTH_DELIMITED) {
+         parse_date_time_functions:
+          DO_(::google::protobuf::internal::WireFormatLite::ReadString(
+                input, this->add_date_time_functions()));
+          ::google::protobuf::internal::WireFormat::VerifyUTF8String(
+            this->date_time_functions(this->date_time_functions_size() - 1).data(),
+            this->date_time_functions(this->date_time_functions_size() - 1).length(),
+            ::google::protobuf::internal::WireFormat::PARSE);
+        } else {
+          goto handle_uninterpreted;
+        }
+        if (input->ExpectTag(82)) goto parse_date_time_functions;
+        if (input->ExpectTag(88)) goto parse_date_time_literals_support;
+        break;
+      }
+
+      // repeated .exec.user.DateTimeLiteralsSupport date_time_literals_support = 11;
+      case 11: {
+        if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+            ::google::protobuf::internal::WireFormatLite::WIRETYPE_VARINT) {
+         parse_date_time_literals_support:
+          int value;
+          DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive<
+                   int, ::google::protobuf::internal::WireFormatLite::TYPE_ENUM>(
+                 input, &value)));
+          if (::exec::user::DateTimeLiteralsSupport_IsValid(value)) {
+            add_date_time_literals_support(static_cast< ::exec::user::DateTimeLiteralsSupport >(value));
+          } else {
+            mutable_unknown_fields()->AddVarint(11, value);
+          }
+        } else if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag)
+                   == ::google::protobuf::internal::WireFormatLite::
+                      WIRETYPE_LENGTH_DELIMITED) {
+          DO_((::google::protobuf::internal::WireFormatLite::ReadPackedEnumNoInline(
+                 input,
+                 &::exec::user::DateTimeLiteralsSupport_IsValid,
+                 this->mutable_date_time_literals_support())));
+        } else {
+          goto handle_uninterpreted;
+        }
+        if (input->ExpectTag(88)) goto parse_date_time_literals_support;
+        if (input->ExpectTag(96)) goto parse_group_by_support;
+        break;
+      }
+
+      // optional .exec.user.GroupBySupport group_by_support = 12;
+      case 12: {
+        if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+            ::google::protobuf::internal::WireFormatLite::WIRETYPE_VARINT) {
+         parse_group_by_support:
+          int value;
+          DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive<
+                   int, ::google::protobuf::internal::WireFormatLite::TYPE_ENUM>(
+                 input, &value)));
+          if (::exec::user::GroupBySupport_IsValid(value)) {
+            set_group_by_support(static_cast< ::exec::user::GroupBySupport >(value));
+          } else {
+            mutable_unknown_fields()->AddVarint(12, value);
+          }
+        } else {
+          goto handle_uninterpreted;
+        }
+        if (input->ExpectTag(104)) goto parse_identifier_casing;
+        break;
+      }
+
+      // optional .exec.user.IdentifierCasing identifier_casing = 13;
+      case 13: {
+        if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+            ::google::protobuf::internal::WireFormatLite::WIRETYPE_VARINT) {
+         parse_identifier_casing:
+          int value;
+          DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive<
+                   int, ::google::protobuf::internal::WireFormatLite::TYPE_ENUM>(
+                 input, &value)));
+          if (::exec::user::IdentifierCasing_IsValid(value)) {
+            set_identifier_casing(static_cast< ::exec::user::IdentifierCasing >(value));
+          } else {
+            mutable_unknown_fields()->AddVarint(13, value);
+          }
+        } else {
+          goto handle_uninterpreted;
+        }
+        if (input->ExpectTag(114)) goto parse_identifier_quote_string;
+        break;
+      }
+
+      // optional string identifier_quote_string = 14;
+      case 14: {
+        if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+            ::google::protobuf::internal::WireFormatLite::WIRETYPE_LENGTH_DELIMITED) {
+         parse_identifier_quote_string:
+          DO_(::google::protobuf::internal::WireFormatLite::ReadString(
+                input, this->mutable_identifier_quote_string()));
+          ::google::protobuf::internal::WireFormat::VerifyUTF8String(
+            this->identifier_quote_string().data(), this->identifier_quote_string().length(),
+            ::google::protobuf::internal::WireFormat::PARSE);
+        } else {
+          goto handle_uninterpreted;
+        }
+        if (input->ExpectTag(120)) goto parse_like_escape_clause_supported;
+        break;
+      }
+
+      // optional bool like_escape_clause_supported = 15;
+      case 15: {
+        if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+            ::google::protobuf::internal::WireFormatLite::WIRETYPE_VARINT) {
+         parse_like_escape_clause_supported:
+          DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive<
+                   bool, ::google::protobuf::internal::WireFormatLite::TYPE_BOOL>(
+                 input, &like_escape_clause_supported_)));
+          set_has_like_escape_clause_supported();
+        } else {
+          goto handle_uninterpreted;
+        }
+        if (input->ExpectTag(128)) goto parse_max_binary_literal_length;
+        break;
+      }
+
+      // optional uint32 max_binary_literal_length = 16;
+      case 16: {
+        if (::google::prot

<TRUNCATED>

[27/27] drill git commit: DRILL-5287: Provide option to skip updates of ephemeral state changes in Zookeeper

Posted by jn...@apache.org.
DRILL-5287: Provide option to skip updates of ephemeral state changes in Zookeeper

close #758


Project: http://git-wip-us.apache.org/repos/asf/drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/drill/commit/7ebb985e
Tree: http://git-wip-us.apache.org/repos/asf/drill/tree/7ebb985e
Diff: http://git-wip-us.apache.org/repos/asf/drill/diff/7ebb985e

Branch: refs/heads/master
Commit: 7ebb985edc823692673a42276b4e2a80fd1f256c
Parents: 2b5a6f0
Author: Padma Penumarthy <pp...@yahoo.com>
Authored: Tue Feb 21 13:20:57 2017 -0800
Committer: Jinfeng Ni <jn...@apache.org>
Committed: Thu Mar 2 10:50:24 2017 -0800

----------------------------------------------------------------------
 .../org/apache/drill/exec/ExecConstants.java    |  4 ++++
 .../server/options/SystemOptionManager.java     |  3 ++-
 .../drill/exec/work/foreman/QueryManager.java   | 20 +++++++++++++++-----
 3 files changed, 21 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/drill/blob/7ebb985e/exec/java-exec/src/main/java/org/apache/drill/exec/ExecConstants.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/ExecConstants.java b/exec/java-exec/src/main/java/org/apache/drill/exec/ExecConstants.java
index 4f0f4d9..da3a312 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/ExecConstants.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/ExecConstants.java
@@ -441,4 +441,8 @@ public interface ExecConstants {
   String USE_DYNAMIC_UDFS_KEY = "exec.udf.use_dynamic";
   BooleanValidator USE_DYNAMIC_UDFS = new BooleanValidator(USE_DYNAMIC_UDFS_KEY, true);
 
+
+  String QUERY_TRANSIENT_STATE_UPDATE_KEY = "exec.query.progress.update";
+  BooleanValidator QUERY_TRANSIENT_STATE_UPDATE = new BooleanValidator(QUERY_TRANSIENT_STATE_UPDATE_KEY, true);
+
 }

http://git-wip-us.apache.org/repos/asf/drill/blob/7ebb985e/exec/java-exec/src/main/java/org/apache/drill/exec/server/options/SystemOptionManager.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/server/options/SystemOptionManager.java b/exec/java-exec/src/main/java/org/apache/drill/exec/server/options/SystemOptionManager.java
index 4a846c0..fa73e06 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/server/options/SystemOptionManager.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/server/options/SystemOptionManager.java
@@ -166,7 +166,8 @@ public class SystemOptionManager extends BaseOptionManager implements AutoClosea
       ExecConstants.EXTERNAL_SORT_DISABLE_MANAGED_OPTION,
       ExecConstants.ENABLE_QUERY_PROFILE_VALIDATOR,
       ExecConstants.QUERY_PROFILE_DEBUG_VALIDATOR,
-      ExecConstants.USE_DYNAMIC_UDFS
+      ExecConstants.USE_DYNAMIC_UDFS,
+      ExecConstants.QUERY_TRANSIENT_STATE_UPDATE
     };
     final Map<String, OptionValidator> tmp = new HashMap<>();
     for (final OptionValidator validator : validators) {

http://git-wip-us.apache.org/repos/asf/drill/blob/7ebb985e/exec/java-exec/src/main/java/org/apache/drill/exec/work/foreman/QueryManager.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/work/foreman/QueryManager.java b/exec/java-exec/src/main/java/org/apache/drill/exec/work/foreman/QueryManager.java
index c3bde6e..7305025 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/work/foreman/QueryManager.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/work/foreman/QueryManager.java
@@ -29,6 +29,7 @@ import java.util.concurrent.atomic.AtomicInteger;
 import org.apache.drill.common.exceptions.DrillRuntimeException;
 import org.apache.drill.common.exceptions.UserException;
 import org.apache.drill.common.exceptions.UserRemoteException;
+import org.apache.drill.exec.ExecConstants;
 import org.apache.drill.exec.coord.ClusterCoordinator;
 import org.apache.drill.exec.coord.store.TransientStore;
 import org.apache.drill.exec.coord.store.TransientStoreConfig;
@@ -109,6 +110,9 @@ public class QueryManager implements AutoCloseable {
   // How many fragments have finished their execution.
   private final AtomicInteger finishedFragments = new AtomicInteger(0);
 
+  // Is the query saved in transient store
+  private boolean inTransientStore;
+
   public QueryManager(final QueryId queryId, final RunQuery runQuery, final PersistentStoreProvider storeProvider,
       final ClusterCoordinator coordinator, final Foreman foreman) {
     this.queryId =  queryId;
@@ -282,13 +286,21 @@ public class QueryManager implements AutoCloseable {
     }
   }
 
-  QueryState updateEphemeralState(final QueryState queryState) {
-    switch (queryState) {
+  void updateEphemeralState(final QueryState queryState) {
+      // If query is already in zk transient store, ignore the transient state update option.
+      // Else, they will not be removed from transient store upon completion.
+      if (!inTransientStore &&
+          !foreman.getQueryContext().getOptions().getOption(ExecConstants.QUERY_TRANSIENT_STATE_UPDATE)) {
+        return;
+      }
+
+      switch (queryState) {
       case ENQUEUED:
       case STARTING:
       case RUNNING:
       case CANCELLATION_REQUESTED:
         transientProfiles.put(stringQueryId, getQueryInfo());  // store as ephemeral query profile.
+        inTransientStore = true;
         break;
 
       case COMPLETED:
@@ -296,17 +308,15 @@ public class QueryManager implements AutoCloseable {
       case FAILED:
         try {
           transientProfiles.remove(stringQueryId);
+          inTransientStore = false;
         } catch(final Exception e) {
           logger.warn("Failure while trying to delete the estore profile for this query.", e);
         }
-
         break;
 
       default:
         throw new IllegalStateException("unrecognized queryState " + queryState);
     }
-
-    return queryState;
   }
 
   void writeFinalProfile(UserException ex) {