You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@drill.apache.org by am...@apache.org on 2018/03/30 14:41:14 UTC

[1/7] drill git commit: DRILL-6300: Refresh protobuf C++ source files

Repository: drill
Updated Branches:
  refs/heads/master f1cfaaf3a -> a264e7feb


DRILL-6300: Refresh protobuf C++ source files

close apache/drill#1194


Project: http://git-wip-us.apache.org/repos/asf/drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/drill/commit/bfc86f17
Tree: http://git-wip-us.apache.org/repos/asf/drill/tree/bfc86f17
Diff: http://git-wip-us.apache.org/repos/asf/drill/diff/bfc86f17

Branch: refs/heads/master
Commit: bfc86f1719bc348a74886e8f29e49bed6fdfe8df
Parents: f1cfaaf
Author: Volodymyr Vysotskyi <vv...@gmail.com>
Authored: Thu Mar 29 18:51:08 2018 +0300
Committer: Aman Sinha <as...@maprtech.com>
Committed: Thu Mar 29 23:17:18 2018 -0700

----------------------------------------------------------------------
 contrib/native/client/readme.linux              |  12 +-
 .../client/src/protobuf/Coordination.pb.cc      |  98 +++++++-
 .../client/src/protobuf/Coordination.pb.h       |  89 ++++++-
 .../client/src/protobuf/UserBitShared.pb.cc     | 239 ++++++++++---------
 .../client/src/protobuf/UserBitShared.pb.h      |  24 +-
 5 files changed, 326 insertions(+), 136 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/drill/blob/bfc86f17/contrib/native/client/readme.linux
----------------------------------------------------------------------
diff --git a/contrib/native/client/readme.linux b/contrib/native/client/readme.linux
index 34c791b..92fe519 100644
--- a/contrib/native/client/readme.linux
+++ b/contrib/native/client/readme.linux
@@ -47,7 +47,7 @@ Install Prerequisites
     $> sudo yum install cppunit-devel
 
 3.2) Download Zookeeper from :
-    - http://apache.mirror.quintex.com/zookeeper/zookeeper-3.4.6/
+    - https://archive.apache.org/dist/zookeeper/zookeeper-3.4.6/
     - untar and then follow instructions in ZOOKEEPER_DIR/src/c/README to build and install the client libs
 
 3.3) run autoreconf
@@ -57,7 +57,7 @@ Install Prerequisites
     $> ./configure --enable-debug --with-syncapi --enable-static --enable-shared
     $> make && sudo make install
 
-4) Install boost. The minumim version required is 1.53, which will probably have to be built from source
+4) Install boost. The minimum version required is 1.53, which will probably have to be built from source
 
     # Remove any previous boost
     $> sudo yum -y erase boost
@@ -69,7 +69,9 @@ Install Prerequisites
     #install the binary rpms
     #(Note: the "rpm" utility does not clean up old versions very well.)
     $> sudo yum -y install ~/rpmbuild/RPMS/x86_64/*
-
+OR
+    # Build boost for Drill using instruction from readme.boost.
+    # Uncomment set(Boost_NAMESPACE drill_boost) line in client/CMakeLists.txt
 OR 
     Download and build using boost build. 
     See this link for how to build: http://www.boost.org/doc/libs/1_53_0/more/getting_started/unix-variants.html#prepare-to-use-a-boost-library-binary 
@@ -117,12 +119,12 @@ Build drill client
 Test
 ----
 Run query submitter from the command line
-    $> querySubmitter query='select * from dfs.`/Users/pchandra/work/data/tpc-h/customer.parquet`' type=sql connectStr=local=10.250.0.146:31010 api=async logLevel=trace user=yourUserName password=yourPassWord
+    $> querySubmitter query='SELECT * FROM cp.`employee.json` LIMIT 20' type=sql connectStr=local=10.250.0.146:31010 api=async logLevel=trace user=yourUserName password=yourPassWord
 
 Valgrind
 --------
 Examples to run Valgrind and see the log in Valkyrie
-    $> valgrind --leak-check=yes --xml=yes --xml-file=qs-vg-log-a.xml querySubmitter query='select LINEITEM from dfs.`/Users/pchandra/work/data/tpc-h/customer.parquet`' type=sql connectStr=local=10.250.0.146:31010 api=async logLevel=trace
+    $> valgrind --leak-check=yes --xml=yes --xml-file=qs-vg-log-a.xml querySubmitter query='SELECT * FROM cp.`employee.json` LIMIT 20' type=sql connectStr=local=10.250.0.146:31010 api=async logLevel=trace
     $> valkyrie -l qs-vg-log-a.xml
 
 

http://git-wip-us.apache.org/repos/asf/drill/blob/bfc86f17/contrib/native/client/src/protobuf/Coordination.pb.cc
----------------------------------------------------------------------
diff --git a/contrib/native/client/src/protobuf/Coordination.pb.cc b/contrib/native/client/src/protobuf/Coordination.pb.cc
index 923481b..14b3103 100644
--- a/contrib/native/client/src/protobuf/Coordination.pb.cc
+++ b/contrib/native/client/src/protobuf/Coordination.pb.cc
@@ -23,6 +23,7 @@ namespace {
 const ::google::protobuf::Descriptor* DrillbitEndpoint_descriptor_ = NULL;
 const ::google::protobuf::internal::GeneratedMessageReflection*
   DrillbitEndpoint_reflection_ = NULL;
+const ::google::protobuf::EnumDescriptor* DrillbitEndpoint_State_descriptor_ = NULL;
 const ::google::protobuf::Descriptor* DrillServiceInstance_descriptor_ = NULL;
 const ::google::protobuf::internal::GeneratedMessageReflection*
   DrillServiceInstance_reflection_ = NULL;
@@ -40,13 +41,14 @@ void protobuf_AssignDesc_Coordination_2eproto() {
       "Coordination.proto");
   GOOGLE_CHECK(file != NULL);
   DrillbitEndpoint_descriptor_ = file->message_type(0);
-  static const int DrillbitEndpoint_offsets_[6] = {
+  static const int DrillbitEndpoint_offsets_[7] = {
     GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(DrillbitEndpoint, address_),
     GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(DrillbitEndpoint, user_port_),
     GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(DrillbitEndpoint, control_port_),
     GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(DrillbitEndpoint, data_port_),
     GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(DrillbitEndpoint, roles_),
     GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(DrillbitEndpoint, version_),
+    GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(DrillbitEndpoint, state_),
   };
   DrillbitEndpoint_reflection_ =
     new ::google::protobuf::internal::GeneratedMessageReflection(
@@ -59,6 +61,7 @@ void protobuf_AssignDesc_Coordination_2eproto() {
       ::google::protobuf::DescriptorPool::generated_pool(),
       ::google::protobuf::MessageFactory::generated_factory(),
       sizeof(DrillbitEndpoint));
+  DrillbitEndpoint_State_descriptor_ = DrillbitEndpoint_descriptor_->enum_type(0);
   DrillServiceInstance_descriptor_ = file->message_type(1);
   static const int DrillServiceInstance_offsets_[3] = {
     GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(DrillServiceInstance, id_),
@@ -133,19 +136,21 @@ void protobuf_AddDesc_Coordination_2eproto() {
   GOOGLE_PROTOBUF_VERIFY_VERSION;
 
   ::google::protobuf::DescriptorPool::InternalAddGeneratedFile(
-    "\n\022Coordination.proto\022\004exec\"\214\001\n\020DrillbitE"
+    "\n\022Coordination.proto\022\004exec\"\367\001\n\020DrillbitE"
     "ndpoint\022\017\n\007address\030\001 \001(\t\022\021\n\tuser_port\030\002 "
     "\001(\005\022\024\n\014control_port\030\003 \001(\005\022\021\n\tdata_port\030\004"
     " \001(\005\022\032\n\005roles\030\005 \001(\0132\013.exec.Roles\022\017\n\007vers"
-    "ion\030\006 \001(\t\"i\n\024DrillServiceInstance\022\n\n\002id\030"
-    "\001 \001(\t\022\033\n\023registrationTimeUTC\030\002 \001(\003\022(\n\010en"
-    "dpoint\030\003 \001(\0132\026.exec.DrillbitEndpoint\"\227\001\n"
-    "\005Roles\022\027\n\tsql_query\030\001 \001(\010:\004true\022\032\n\014logic"
-    "al_plan\030\002 \001(\010:\004true\022\033\n\rphysical_plan\030\003 \001"
-    "(\010:\004true\022\033\n\rjava_executor\030\004 \001(\010:\004true\022\037\n"
-    "\021distributed_cache\030\005 \001(\010:\004trueB3\n\033org.ap"
-    "ache.drill.exec.protoB\022CoordinationProto"
-    "sH\001", 483);
+    "ion\030\006 \001(\t\022+\n\005state\030\007 \001(\0162\034.exec.Drillbit"
+    "Endpoint.State\"<\n\005State\022\013\n\007STARTUP\020\000\022\n\n\006"
+    "ONLINE\020\001\022\r\n\tQUIESCENT\020\002\022\013\n\007OFFLINE\020\003\"i\n\024"
+    "DrillServiceInstance\022\n\n\002id\030\001 \001(\t\022\033\n\023regi"
+    "strationTimeUTC\030\002 \001(\003\022(\n\010endpoint\030\003 \001(\0132"
+    "\026.exec.DrillbitEndpoint\"\227\001\n\005Roles\022\027\n\tsql"
+    "_query\030\001 \001(\010:\004true\022\032\n\014logical_plan\030\002 \001(\010"
+    ":\004true\022\033\n\rphysical_plan\030\003 \001(\010:\004true\022\033\n\rj"
+    "ava_executor\030\004 \001(\010:\004true\022\037\n\021distributed_"
+    "cache\030\005 \001(\010:\004trueB3\n\033org.apache.drill.ex"
+    "ec.protoB\022CoordinationProtosH\001", 590);
   ::google::protobuf::MessageFactory::InternalRegisterGeneratedFile(
     "Coordination.proto", &protobuf_RegisterTypes);
   DrillbitEndpoint::default_instance_ = new DrillbitEndpoint();
@@ -166,6 +171,31 @@ struct StaticDescriptorInitializer_Coordination_2eproto {
 
 // ===================================================================
 
+const ::google::protobuf::EnumDescriptor* DrillbitEndpoint_State_descriptor() {
+  protobuf_AssignDescriptorsOnce();
+  return DrillbitEndpoint_State_descriptor_;
+}
+bool DrillbitEndpoint_State_IsValid(int value) {
+  switch(value) {
+    case 0:
+    case 1:
+    case 2:
+    case 3:
+      return true;
+    default:
+      return false;
+  }
+}
+
+#ifndef _MSC_VER
+const DrillbitEndpoint_State DrillbitEndpoint::STARTUP;
+const DrillbitEndpoint_State DrillbitEndpoint::ONLINE;
+const DrillbitEndpoint_State DrillbitEndpoint::QUIESCENT;
+const DrillbitEndpoint_State DrillbitEndpoint::OFFLINE;
+const DrillbitEndpoint_State DrillbitEndpoint::State_MIN;
+const DrillbitEndpoint_State DrillbitEndpoint::State_MAX;
+const int DrillbitEndpoint::State_ARRAYSIZE;
+#endif  // _MSC_VER
 #ifndef _MSC_VER
 const int DrillbitEndpoint::kAddressFieldNumber;
 const int DrillbitEndpoint::kUserPortFieldNumber;
@@ -173,6 +203,7 @@ const int DrillbitEndpoint::kControlPortFieldNumber;
 const int DrillbitEndpoint::kDataPortFieldNumber;
 const int DrillbitEndpoint::kRolesFieldNumber;
 const int DrillbitEndpoint::kVersionFieldNumber;
+const int DrillbitEndpoint::kStateFieldNumber;
 #endif  // !_MSC_VER
 
 DrillbitEndpoint::DrillbitEndpoint()
@@ -198,6 +229,7 @@ void DrillbitEndpoint::SharedCtor() {
   data_port_ = 0;
   roles_ = NULL;
   version_ = const_cast< ::std::string*>(&::google::protobuf::internal::kEmptyString);
+  state_ = 0;
   ::memset(_has_bits_, 0, sizeof(_has_bits_));
 }
 
@@ -256,6 +288,7 @@ void DrillbitEndpoint::Clear() {
         version_->clear();
       }
     }
+    state_ = 0;
   }
   ::memset(_has_bits_, 0, sizeof(_has_bits_));
   mutable_unknown_fields()->Clear();
@@ -358,6 +391,27 @@ bool DrillbitEndpoint::MergePartialFromCodedStream(
         } else {
           goto handle_uninterpreted;
         }
+        if (input->ExpectTag(56)) goto parse_state;
+        break;
+      }
+
+      // optional .exec.DrillbitEndpoint.State state = 7;
+      case 7: {
+        if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+            ::google::protobuf::internal::WireFormatLite::WIRETYPE_VARINT) {
+         parse_state:
+          int value;
+          DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive<
+                   int, ::google::protobuf::internal::WireFormatLite::TYPE_ENUM>(
+                 input, &value)));
+          if (::exec::DrillbitEndpoint_State_IsValid(value)) {
+            set_state(static_cast< ::exec::DrillbitEndpoint_State >(value));
+          } else {
+            mutable_unknown_fields()->AddVarint(7, value);
+          }
+        } else {
+          goto handle_uninterpreted;
+        }
         if (input->ExpectAtEnd()) return true;
         break;
       }
@@ -419,6 +473,12 @@ void DrillbitEndpoint::SerializeWithCachedSizes(
       6, this->version(), output);
   }
 
+  // optional .exec.DrillbitEndpoint.State state = 7;
+  if (has_state()) {
+    ::google::protobuf::internal::WireFormatLite::WriteEnum(
+      7, this->state(), output);
+  }
+
   if (!unknown_fields().empty()) {
     ::google::protobuf::internal::WireFormat::SerializeUnknownFields(
         unknown_fields(), output);
@@ -469,6 +529,12 @@ void DrillbitEndpoint::SerializeWithCachedSizes(
         6, this->version(), target);
   }
 
+  // optional .exec.DrillbitEndpoint.State state = 7;
+  if (has_state()) {
+    target = ::google::protobuf::internal::WireFormatLite::WriteEnumToArray(
+      7, this->state(), target);
+  }
+
   if (!unknown_fields().empty()) {
     target = ::google::protobuf::internal::WireFormat::SerializeUnknownFieldsToArray(
         unknown_fields(), target);
@@ -522,6 +588,12 @@ int DrillbitEndpoint::ByteSize() const {
           this->version());
     }
 
+    // optional .exec.DrillbitEndpoint.State state = 7;
+    if (has_state()) {
+      total_size += 1 +
+        ::google::protobuf::internal::WireFormatLite::EnumSize(this->state());
+    }
+
   }
   if (!unknown_fields().empty()) {
     total_size +=
@@ -567,6 +639,9 @@ void DrillbitEndpoint::MergeFrom(const DrillbitEndpoint& from) {
     if (from.has_version()) {
       set_version(from.version());
     }
+    if (from.has_state()) {
+      set_state(from.state());
+    }
   }
   mutable_unknown_fields()->MergeFrom(from.unknown_fields());
 }
@@ -596,6 +671,7 @@ void DrillbitEndpoint::Swap(DrillbitEndpoint* other) {
     std::swap(data_port_, other->data_port_);
     std::swap(roles_, other->roles_);
     std::swap(version_, other->version_);
+    std::swap(state_, other->state_);
     std::swap(_has_bits_[0], other->_has_bits_[0]);
     _unknown_fields_.Swap(&other->_unknown_fields_);
     std::swap(_cached_size_, other->_cached_size_);

http://git-wip-us.apache.org/repos/asf/drill/blob/bfc86f17/contrib/native/client/src/protobuf/Coordination.pb.h
----------------------------------------------------------------------
diff --git a/contrib/native/client/src/protobuf/Coordination.pb.h b/contrib/native/client/src/protobuf/Coordination.pb.h
index 14d6d28..e52c70c 100644
--- a/contrib/native/client/src/protobuf/Coordination.pb.h
+++ b/contrib/native/client/src/protobuf/Coordination.pb.h
@@ -23,6 +23,7 @@
 #include <google/protobuf/message.h>
 #include <google/protobuf/repeated_field.h>
 #include <google/protobuf/extension_set.h>
+#include <google/protobuf/generated_enum_reflection.h>
 #include <google/protobuf/unknown_field_set.h>
 // @@protoc_insertion_point(includes)
 
@@ -37,6 +38,27 @@ class DrillbitEndpoint;
 class DrillServiceInstance;
 class Roles;
 
+enum DrillbitEndpoint_State {
+  DrillbitEndpoint_State_STARTUP = 0,
+  DrillbitEndpoint_State_ONLINE = 1,
+  DrillbitEndpoint_State_QUIESCENT = 2,
+  DrillbitEndpoint_State_OFFLINE = 3
+};
+bool DrillbitEndpoint_State_IsValid(int value);
+const DrillbitEndpoint_State DrillbitEndpoint_State_State_MIN = DrillbitEndpoint_State_STARTUP;
+const DrillbitEndpoint_State DrillbitEndpoint_State_State_MAX = DrillbitEndpoint_State_OFFLINE;
+const int DrillbitEndpoint_State_State_ARRAYSIZE = DrillbitEndpoint_State_State_MAX + 1;
+
+const ::google::protobuf::EnumDescriptor* DrillbitEndpoint_State_descriptor();
+inline const ::std::string& DrillbitEndpoint_State_Name(DrillbitEndpoint_State value) {
+  return ::google::protobuf::internal::NameOfEnum(
+    DrillbitEndpoint_State_descriptor(), value);
+}
+inline bool DrillbitEndpoint_State_Parse(
+    const ::std::string& name, DrillbitEndpoint_State* value) {
+  return ::google::protobuf::internal::ParseNamedEnum<DrillbitEndpoint_State>(
+    DrillbitEndpoint_State_descriptor(), name, value);
+}
 // ===================================================================
 
 class DrillbitEndpoint : public ::google::protobuf::Message {
@@ -91,6 +113,32 @@ class DrillbitEndpoint : public ::google::protobuf::Message {
 
   // nested types ----------------------------------------------------
 
+  typedef DrillbitEndpoint_State State;
+  static const State STARTUP = DrillbitEndpoint_State_STARTUP;
+  static const State ONLINE = DrillbitEndpoint_State_ONLINE;
+  static const State QUIESCENT = DrillbitEndpoint_State_QUIESCENT;
+  static const State OFFLINE = DrillbitEndpoint_State_OFFLINE;
+  static inline bool State_IsValid(int value) {
+    return DrillbitEndpoint_State_IsValid(value);
+  }
+  static const State State_MIN =
+    DrillbitEndpoint_State_State_MIN;
+  static const State State_MAX =
+    DrillbitEndpoint_State_State_MAX;
+  static const int State_ARRAYSIZE =
+    DrillbitEndpoint_State_State_ARRAYSIZE;
+  static inline const ::google::protobuf::EnumDescriptor*
+  State_descriptor() {
+    return DrillbitEndpoint_State_descriptor();
+  }
+  static inline const ::std::string& State_Name(State value) {
+    return DrillbitEndpoint_State_Name(value);
+  }
+  static inline bool State_Parse(const ::std::string& name,
+      State* value) {
+    return DrillbitEndpoint_State_Parse(name, value);
+  }
+
   // accessors -------------------------------------------------------
 
   // optional string address = 1;
@@ -147,6 +195,13 @@ class DrillbitEndpoint : public ::google::protobuf::Message {
   inline ::std::string* release_version();
   inline void set_allocated_version(::std::string* version);
 
+  // optional .exec.DrillbitEndpoint.State state = 7;
+  inline bool has_state() const;
+  inline void clear_state();
+  static const int kStateFieldNumber = 7;
+  inline ::exec::DrillbitEndpoint_State state() const;
+  inline void set_state(::exec::DrillbitEndpoint_State value);
+
   // @@protoc_insertion_point(class_scope:exec.DrillbitEndpoint)
  private:
   inline void set_has_address();
@@ -161,6 +216,8 @@ class DrillbitEndpoint : public ::google::protobuf::Message {
   inline void clear_has_roles();
   inline void set_has_version();
   inline void clear_has_version();
+  inline void set_has_state();
+  inline void clear_has_state();
 
   ::google::protobuf::UnknownFieldSet _unknown_fields_;
 
@@ -168,11 +225,12 @@ class DrillbitEndpoint : public ::google::protobuf::Message {
   ::google::protobuf::int32 user_port_;
   ::google::protobuf::int32 control_port_;
   ::exec::Roles* roles_;
-  ::std::string* version_;
   ::google::protobuf::int32 data_port_;
+  int state_;
+  ::std::string* version_;
 
   mutable int _cached_size_;
-  ::google::protobuf::uint32 _has_bits_[(6 + 31) / 32];
+  ::google::protobuf::uint32 _has_bits_[(7 + 31) / 32];
 
   friend void  protobuf_AddDesc_Coordination_2eproto();
   friend void protobuf_AssignDesc_Coordination_2eproto();
@@ -663,6 +721,29 @@ inline void DrillbitEndpoint::set_allocated_version(::std::string* version) {
   }
 }
 
+// optional .exec.DrillbitEndpoint.State state = 7;
+inline bool DrillbitEndpoint::has_state() const {
+  return (_has_bits_[0] & 0x00000040u) != 0;
+}
+inline void DrillbitEndpoint::set_has_state() {
+  _has_bits_[0] |= 0x00000040u;
+}
+inline void DrillbitEndpoint::clear_has_state() {
+  _has_bits_[0] &= ~0x00000040u;
+}
+inline void DrillbitEndpoint::clear_state() {
+  state_ = 0;
+  clear_has_state();
+}
+inline ::exec::DrillbitEndpoint_State DrillbitEndpoint::state() const {
+  return static_cast< ::exec::DrillbitEndpoint_State >(state_);
+}
+inline void DrillbitEndpoint::set_state(::exec::DrillbitEndpoint_State value) {
+  assert(::exec::DrillbitEndpoint_State_IsValid(value));
+  set_has_state();
+  state_ = value;
+}
+
 // -------------------------------------------------------------------
 
 // DrillServiceInstance
@@ -920,6 +1001,10 @@ inline void Roles::set_distributed_cache(bool value) {
 namespace google {
 namespace protobuf {
 
+template <>
+inline const EnumDescriptor* GetEnumDescriptor< ::exec::DrillbitEndpoint_State>() {
+  return ::exec::DrillbitEndpoint_State_descriptor();
+}
 
 }  // namespace google
 }  // namespace protobuf

http://git-wip-us.apache.org/repos/asf/drill/blob/bfc86f17/contrib/native/client/src/protobuf/UserBitShared.pb.cc
----------------------------------------------------------------------
diff --git a/contrib/native/client/src/protobuf/UserBitShared.pb.cc b/contrib/native/client/src/protobuf/UserBitShared.pb.cc
index 189eda2..4350082 100644
--- a/contrib/native/client/src/protobuf/UserBitShared.pb.cc
+++ b/contrib/native/client/src/protobuf/UserBitShared.pb.cc
@@ -650,126 +650,129 @@ void protobuf_AddDesc_UserBitShared_2eproto() {
     "s.proto\032\022Coordination.proto\032\017SchemaDef.p"
     "roto\"$\n\017UserCredentials\022\021\n\tuser_name\030\001 \001"
     "(\t\"\'\n\007QueryId\022\r\n\005part1\030\001 \001(\020\022\r\n\005part2\030\002 "
-    "\001(\020\"\255\003\n\014DrillPBError\022\020\n\010error_id\030\001 \001(\t\022("
+    "\001(\020\"\355\003\n\014DrillPBError\022\020\n\010error_id\030\001 \001(\t\022("
     "\n\010endpoint\030\002 \001(\0132\026.exec.DrillbitEndpoint"
     "\0227\n\nerror_type\030\003 \001(\0162#.exec.shared.Drill"
     "PBError.ErrorType\022\017\n\007message\030\004 \001(\t\0220\n\tex"
     "ception\030\005 \001(\0132\035.exec.shared.ExceptionWra"
     "pper\0220\n\rparsing_error\030\006 \003(\0132\031.exec.share"
-    "d.ParsingError\"\262\001\n\tErrorType\022\016\n\nCONNECTI"
+    "d.ParsingError\"\362\001\n\tErrorType\022\016\n\nCONNECTI"
     "ON\020\000\022\r\n\tDATA_READ\020\001\022\016\n\nDATA_WRITE\020\002\022\014\n\010F"
     "UNCTION\020\003\022\t\n\005PARSE\020\004\022\016\n\nPERMISSION\020\005\022\010\n\004"
     "PLAN\020\006\022\014\n\010RESOURCE\020\007\022\n\n\006SYSTEM\020\010\022\031\n\025UNSU"
-    "PPORTED_OPERATION\020\t\022\016\n\nVALIDATION\020\n\"\246\001\n\020"
-    "ExceptionWrapper\022\027\n\017exception_class\030\001 \001("
-    "\t\022\017\n\007message\030\002 \001(\t\022:\n\013stack_trace\030\003 \003(\0132"
-    "%.exec.shared.StackTraceElementWrapper\022,"
-    "\n\005cause\030\004 \001(\0132\035.exec.shared.ExceptionWra"
-    "pper\"\205\001\n\030StackTraceElementWrapper\022\022\n\ncla"
-    "ss_name\030\001 \001(\t\022\021\n\tfile_name\030\002 \001(\t\022\023\n\013line"
-    "_number\030\003 \001(\005\022\023\n\013method_name\030\004 \001(\t\022\030\n\020is"
-    "_native_method\030\005 \001(\010\"\\\n\014ParsingError\022\024\n\014"
-    "start_column\030\002 \001(\005\022\021\n\tstart_row\030\003 \001(\005\022\022\n"
-    "\nend_column\030\004 \001(\005\022\017\n\007end_row\030\005 \001(\005\"~\n\016Re"
-    "cordBatchDef\022\024\n\014record_count\030\001 \001(\005\022+\n\005fi"
-    "eld\030\002 \003(\0132\034.exec.shared.SerializedField\022"
-    ")\n!carries_two_byte_selection_vector\030\003 \001"
-    "(\010\"\205\001\n\010NamePart\022(\n\004type\030\001 \001(\0162\032.exec.sha"
-    "red.NamePart.Type\022\014\n\004name\030\002 \001(\t\022$\n\005child"
-    "\030\003 \001(\0132\025.exec.shared.NamePart\"\033\n\004Type\022\010\n"
-    "\004NAME\020\000\022\t\n\005ARRAY\020\001\"\324\001\n\017SerializedField\022%"
-    "\n\nmajor_type\030\001 \001(\0132\021.common.MajorType\022(\n"
-    "\tname_part\030\002 \001(\0132\025.exec.shared.NamePart\022"
-    "+\n\005child\030\003 \003(\0132\034.exec.shared.SerializedF"
-    "ield\022\023\n\013value_count\030\004 \001(\005\022\027\n\017var_byte_le"
-    "ngth\030\005 \001(\005\022\025\n\rbuffer_length\030\007 \001(\005\"7\n\nNod"
-    "eStatus\022\017\n\007node_id\030\001 \001(\005\022\030\n\020memory_footp"
-    "rint\030\002 \001(\003\"\225\002\n\013QueryResult\0228\n\013query_stat"
-    "e\030\001 \001(\0162#.exec.shared.QueryResult.QueryS"
-    "tate\022&\n\010query_id\030\002 \001(\0132\024.exec.shared.Que"
-    "ryId\022(\n\005error\030\003 \003(\0132\031.exec.shared.DrillP"
-    "BError\"z\n\nQueryState\022\014\n\010STARTING\020\000\022\013\n\007RU"
-    "NNING\020\001\022\r\n\tCOMPLETED\020\002\022\014\n\010CANCELED\020\003\022\n\n\006"
-    "FAILED\020\004\022\032\n\026CANCELLATION_REQUESTED\020\005\022\014\n\010"
-    "ENQUEUED\020\006\"p\n\tQueryData\022&\n\010query_id\030\001 \001("
-    "\0132\024.exec.shared.QueryId\022\021\n\trow_count\030\002 \001"
-    "(\005\022(\n\003def\030\003 \001(\0132\033.exec.shared.RecordBatc"
-    "hDef\"\330\001\n\tQueryInfo\022\r\n\005query\030\001 \001(\t\022\r\n\005sta"
-    "rt\030\002 \001(\003\0222\n\005state\030\003 \001(\0162#.exec.shared.Qu"
-    "eryResult.QueryState\022\017\n\004user\030\004 \001(\t:\001-\022\'\n"
-    "\007foreman\030\005 \001(\0132\026.exec.DrillbitEndpoint\022\024"
-    "\n\014options_json\030\006 \001(\t\022\022\n\ntotal_cost\030\007 \001(\001"
-    "\022\025\n\nqueue_name\030\010 \001(\t:\001-\"\242\004\n\014QueryProfile"
-    "\022 \n\002id\030\001 \001(\0132\024.exec.shared.QueryId\022$\n\004ty"
-    "pe\030\002 \001(\0162\026.exec.shared.QueryType\022\r\n\005star"
-    "t\030\003 \001(\003\022\013\n\003end\030\004 \001(\003\022\r\n\005query\030\005 \001(\t\022\014\n\004p"
-    "lan\030\006 \001(\t\022\'\n\007foreman\030\007 \001(\0132\026.exec.Drillb"
-    "itEndpoint\0222\n\005state\030\010 \001(\0162#.exec.shared."
-    "QueryResult.QueryState\022\027\n\017total_fragment"
-    "s\030\t \001(\005\022\032\n\022finished_fragments\030\n \001(\005\022;\n\020f"
-    "ragment_profile\030\013 \003(\0132!.exec.shared.Majo"
-    "rFragmentProfile\022\017\n\004user\030\014 \001(\t:\001-\022\r\n\005err"
-    "or\030\r \001(\t\022\024\n\014verboseError\030\016 \001(\t\022\020\n\010error_"
-    "id\030\017 \001(\t\022\022\n\nerror_node\030\020 \001(\t\022\024\n\014options_"
-    "json\030\021 \001(\t\022\017\n\007planEnd\030\022 \001(\003\022\024\n\014queueWait"
-    "End\030\023 \001(\003\022\022\n\ntotal_cost\030\024 \001(\001\022\025\n\nqueue_n"
-    "ame\030\025 \001(\t:\001-\"t\n\024MajorFragmentProfile\022\031\n\021"
-    "major_fragment_id\030\001 \001(\005\022A\n\026minor_fragmen"
-    "t_profile\030\002 \003(\0132!.exec.shared.MinorFragm"
-    "entProfile\"\350\002\n\024MinorFragmentProfile\022)\n\005s"
-    "tate\030\001 \001(\0162\032.exec.shared.FragmentState\022("
-    "\n\005error\030\002 \001(\0132\031.exec.shared.DrillPBError"
-    "\022\031\n\021minor_fragment_id\030\003 \001(\005\0226\n\020operator_"
-    "profile\030\004 \003(\0132\034.exec.shared.OperatorProf"
-    "ile\022\022\n\nstart_time\030\005 \001(\003\022\020\n\010end_time\030\006 \001("
-    "\003\022\023\n\013memory_used\030\007 \001(\003\022\027\n\017max_memory_use"
-    "d\030\010 \001(\003\022(\n\010endpoint\030\t \001(\0132\026.exec.Drillbi"
-    "tEndpoint\022\023\n\013last_update\030\n \001(\003\022\025\n\rlast_p"
-    "rogress\030\013 \001(\003\"\377\001\n\017OperatorProfile\0221\n\rinp"
-    "ut_profile\030\001 \003(\0132\032.exec.shared.StreamPro"
-    "file\022\023\n\013operator_id\030\003 \001(\005\022\025\n\roperator_ty"
-    "pe\030\004 \001(\005\022\023\n\013setup_nanos\030\005 \001(\003\022\025\n\rprocess"
-    "_nanos\030\006 \001(\003\022#\n\033peak_local_memory_alloca"
-    "ted\030\007 \001(\003\022(\n\006metric\030\010 \003(\0132\030.exec.shared."
-    "MetricValue\022\022\n\nwait_nanos\030\t \001(\003\"B\n\rStrea"
-    "mProfile\022\017\n\007records\030\001 \001(\003\022\017\n\007batches\030\002 \001"
-    "(\003\022\017\n\007schemas\030\003 \001(\003\"J\n\013MetricValue\022\021\n\tme"
-    "tric_id\030\001 \001(\005\022\022\n\nlong_value\030\002 \001(\003\022\024\n\014dou"
-    "ble_value\030\003 \001(\001\")\n\010Registry\022\035\n\003jar\030\001 \003(\013"
-    "2\020.exec.shared.Jar\"/\n\003Jar\022\014\n\004name\030\001 \001(\t\022"
-    "\032\n\022function_signature\030\002 \003(\t\"W\n\013SaslMessa"
-    "ge\022\021\n\tmechanism\030\001 \001(\t\022\014\n\004data\030\002 \001(\014\022\'\n\006s"
-    "tatus\030\003 \001(\0162\027.exec.shared.SaslStatus*5\n\n"
-    "RpcChannel\022\017\n\013BIT_CONTROL\020\000\022\014\n\010BIT_DATA\020"
-    "\001\022\010\n\004USER\020\002*V\n\tQueryType\022\007\n\003SQL\020\001\022\013\n\007LOG"
-    "ICAL\020\002\022\014\n\010PHYSICAL\020\003\022\r\n\tEXECUTION\020\004\022\026\n\022P"
-    "REPARED_STATEMENT\020\005*\207\001\n\rFragmentState\022\013\n"
-    "\007SENDING\020\000\022\027\n\023AWAITING_ALLOCATION\020\001\022\013\n\007R"
-    "UNNING\020\002\022\014\n\010FINISHED\020\003\022\r\n\tCANCELLED\020\004\022\n\n"
-    "\006FAILED\020\005\022\032\n\026CANCELLATION_REQUESTED\020\006*\360\005"
-    "\n\020CoreOperatorType\022\021\n\rSINGLE_SENDER\020\000\022\024\n"
-    "\020BROADCAST_SENDER\020\001\022\n\n\006FILTER\020\002\022\022\n\016HASH_"
-    "AGGREGATE\020\003\022\r\n\tHASH_JOIN\020\004\022\016\n\nMERGE_JOIN"
-    "\020\005\022\031\n\025HASH_PARTITION_SENDER\020\006\022\t\n\005LIMIT\020\007"
-    "\022\024\n\020MERGING_RECEIVER\020\010\022\034\n\030ORDERED_PARTIT"
-    "ION_SENDER\020\t\022\013\n\007PROJECT\020\n\022\026\n\022UNORDERED_R"
-    "ECEIVER\020\013\022\020\n\014RANGE_SENDER\020\014\022\n\n\006SCREEN\020\r\022"
-    "\034\n\030SELECTION_VECTOR_REMOVER\020\016\022\027\n\023STREAMI"
-    "NG_AGGREGATE\020\017\022\016\n\nTOP_N_SORT\020\020\022\021\n\rEXTERN"
-    "AL_SORT\020\021\022\t\n\005TRACE\020\022\022\t\n\005UNION\020\023\022\014\n\010OLD_S"
-    "ORT\020\024\022\032\n\026PARQUET_ROW_GROUP_SCAN\020\025\022\021\n\rHIV"
-    "E_SUB_SCAN\020\026\022\025\n\021SYSTEM_TABLE_SCAN\020\027\022\021\n\rM"
-    "OCK_SUB_SCAN\020\030\022\022\n\016PARQUET_WRITER\020\031\022\023\n\017DI"
-    "RECT_SUB_SCAN\020\032\022\017\n\013TEXT_WRITER\020\033\022\021\n\rTEXT"
-    "_SUB_SCAN\020\034\022\021\n\rJSON_SUB_SCAN\020\035\022\030\n\024INFO_S"
-    "CHEMA_SUB_SCAN\020\036\022\023\n\017COMPLEX_TO_JSON\020\037\022\025\n"
-    "\021PRODUCER_CONSUMER\020 \022\022\n\016HBASE_SUB_SCAN\020!"
-    "\022\n\n\006WINDOW\020\"\022\024\n\020NESTED_LOOP_JOIN\020#\022\021\n\rAV"
-    "RO_SUB_SCAN\020$\022\021\n\rPCAP_SUB_SCAN\020%*g\n\nSasl"
-    "Status\022\020\n\014SASL_UNKNOWN\020\000\022\016\n\nSASL_START\020\001"
-    "\022\024\n\020SASL_IN_PROGRESS\020\002\022\020\n\014SASL_SUCCESS\020\003"
-    "\022\017\n\013SASL_FAILED\020\004B.\n\033org.apache.drill.ex"
-    "ec.protoB\rUserBitSharedH\001", 4945);
+    "PPORTED_OPERATION\020\t\022\016\n\nVALIDATION\020\n\022\023\n\017E"
+    "XECUTION_ERROR\020\013\022\022\n\016INTERNAL_ERROR\020\014\022\025\n\021"
+    "UNSPECIFIED_ERROR\020\r\"\246\001\n\020ExceptionWrapper"
+    "\022\027\n\017exception_class\030\001 \001(\t\022\017\n\007message\030\002 \001"
+    "(\t\022:\n\013stack_trace\030\003 \003(\0132%.exec.shared.St"
+    "ackTraceElementWrapper\022,\n\005cause\030\004 \001(\0132\035."
+    "exec.shared.ExceptionWrapper\"\205\001\n\030StackTr"
+    "aceElementWrapper\022\022\n\nclass_name\030\001 \001(\t\022\021\n"
+    "\tfile_name\030\002 \001(\t\022\023\n\013line_number\030\003 \001(\005\022\023\n"
+    "\013method_name\030\004 \001(\t\022\030\n\020is_native_method\030\005"
+    " \001(\010\"\\\n\014ParsingError\022\024\n\014start_column\030\002 \001"
+    "(\005\022\021\n\tstart_row\030\003 \001(\005\022\022\n\nend_column\030\004 \001("
+    "\005\022\017\n\007end_row\030\005 \001(\005\"~\n\016RecordBatchDef\022\024\n\014"
+    "record_count\030\001 \001(\005\022+\n\005field\030\002 \003(\0132\034.exec"
+    ".shared.SerializedField\022)\n!carries_two_b"
+    "yte_selection_vector\030\003 \001(\010\"\205\001\n\010NamePart\022"
+    "(\n\004type\030\001 \001(\0162\032.exec.shared.NamePart.Typ"
+    "e\022\014\n\004name\030\002 \001(\t\022$\n\005child\030\003 \001(\0132\025.exec.sh"
+    "ared.NamePart\"\033\n\004Type\022\010\n\004NAME\020\000\022\t\n\005ARRAY"
+    "\020\001\"\324\001\n\017SerializedField\022%\n\nmajor_type\030\001 \001"
+    "(\0132\021.common.MajorType\022(\n\tname_part\030\002 \001(\013"
+    "2\025.exec.shared.NamePart\022+\n\005child\030\003 \003(\0132\034"
+    ".exec.shared.SerializedField\022\023\n\013value_co"
+    "unt\030\004 \001(\005\022\027\n\017var_byte_length\030\005 \001(\005\022\025\n\rbu"
+    "ffer_length\030\007 \001(\005\"7\n\nNodeStatus\022\017\n\007node_"
+    "id\030\001 \001(\005\022\030\n\020memory_footprint\030\002 \001(\003\"\263\002\n\013Q"
+    "ueryResult\0228\n\013query_state\030\001 \001(\0162#.exec.s"
+    "hared.QueryResult.QueryState\022&\n\010query_id"
+    "\030\002 \001(\0132\024.exec.shared.QueryId\022(\n\005error\030\003 "
+    "\003(\0132\031.exec.shared.DrillPBError\"\227\001\n\nQuery"
+    "State\022\014\n\010STARTING\020\000\022\013\n\007RUNNING\020\001\022\r\n\tCOMP"
+    "LETED\020\002\022\014\n\010CANCELED\020\003\022\n\n\006FAILED\020\004\022\032\n\026CAN"
+    "CELLATION_REQUESTED\020\005\022\014\n\010ENQUEUED\020\006\022\r\n\tP"
+    "REPARING\020\007\022\014\n\010PLANNING\020\010\"p\n\tQueryData\022&\n"
+    "\010query_id\030\001 \001(\0132\024.exec.shared.QueryId\022\021\n"
+    "\trow_count\030\002 \001(\005\022(\n\003def\030\003 \001(\0132\033.exec.sha"
+    "red.RecordBatchDef\"\330\001\n\tQueryInfo\022\r\n\005quer"
+    "y\030\001 \001(\t\022\r\n\005start\030\002 \001(\003\0222\n\005state\030\003 \001(\0162#."
+    "exec.shared.QueryResult.QueryState\022\017\n\004us"
+    "er\030\004 \001(\t:\001-\022\'\n\007foreman\030\005 \001(\0132\026.exec.Dril"
+    "lbitEndpoint\022\024\n\014options_json\030\006 \001(\t\022\022\n\nto"
+    "tal_cost\030\007 \001(\001\022\025\n\nqueue_name\030\010 \001(\t:\001-\"\242\004"
+    "\n\014QueryProfile\022 \n\002id\030\001 \001(\0132\024.exec.shared"
+    ".QueryId\022$\n\004type\030\002 \001(\0162\026.exec.shared.Que"
+    "ryType\022\r\n\005start\030\003 \001(\003\022\013\n\003end\030\004 \001(\003\022\r\n\005qu"
+    "ery\030\005 \001(\t\022\014\n\004plan\030\006 \001(\t\022\'\n\007foreman\030\007 \001(\013"
+    "2\026.exec.DrillbitEndpoint\0222\n\005state\030\010 \001(\0162"
+    "#.exec.shared.QueryResult.QueryState\022\027\n\017"
+    "total_fragments\030\t \001(\005\022\032\n\022finished_fragme"
+    "nts\030\n \001(\005\022;\n\020fragment_profile\030\013 \003(\0132!.ex"
+    "ec.shared.MajorFragmentProfile\022\017\n\004user\030\014"
+    " \001(\t:\001-\022\r\n\005error\030\r \001(\t\022\024\n\014verboseError\030\016"
+    " \001(\t\022\020\n\010error_id\030\017 \001(\t\022\022\n\nerror_node\030\020 \001"
+    "(\t\022\024\n\014options_json\030\021 \001(\t\022\017\n\007planEnd\030\022 \001("
+    "\003\022\024\n\014queueWaitEnd\030\023 \001(\003\022\022\n\ntotal_cost\030\024 "
+    "\001(\001\022\025\n\nqueue_name\030\025 \001(\t:\001-\"t\n\024MajorFragm"
+    "entProfile\022\031\n\021major_fragment_id\030\001 \001(\005\022A\n"
+    "\026minor_fragment_profile\030\002 \003(\0132!.exec.sha"
+    "red.MinorFragmentProfile\"\350\002\n\024MinorFragme"
+    "ntProfile\022)\n\005state\030\001 \001(\0162\032.exec.shared.F"
+    "ragmentState\022(\n\005error\030\002 \001(\0132\031.exec.share"
+    "d.DrillPBError\022\031\n\021minor_fragment_id\030\003 \001("
+    "\005\0226\n\020operator_profile\030\004 \003(\0132\034.exec.share"
+    "d.OperatorProfile\022\022\n\nstart_time\030\005 \001(\003\022\020\n"
+    "\010end_time\030\006 \001(\003\022\023\n\013memory_used\030\007 \001(\003\022\027\n\017"
+    "max_memory_used\030\010 \001(\003\022(\n\010endpoint\030\t \001(\0132"
+    "\026.exec.DrillbitEndpoint\022\023\n\013last_update\030\n"
+    " \001(\003\022\025\n\rlast_progress\030\013 \001(\003\"\377\001\n\017Operator"
+    "Profile\0221\n\rinput_profile\030\001 \003(\0132\032.exec.sh"
+    "ared.StreamProfile\022\023\n\013operator_id\030\003 \001(\005\022"
+    "\025\n\roperator_type\030\004 \001(\005\022\023\n\013setup_nanos\030\005 "
+    "\001(\003\022\025\n\rprocess_nanos\030\006 \001(\003\022#\n\033peak_local"
+    "_memory_allocated\030\007 \001(\003\022(\n\006metric\030\010 \003(\0132"
+    "\030.exec.shared.MetricValue\022\022\n\nwait_nanos\030"
+    "\t \001(\003\"B\n\rStreamProfile\022\017\n\007records\030\001 \001(\003\022"
+    "\017\n\007batches\030\002 \001(\003\022\017\n\007schemas\030\003 \001(\003\"J\n\013Met"
+    "ricValue\022\021\n\tmetric_id\030\001 \001(\005\022\022\n\nlong_valu"
+    "e\030\002 \001(\003\022\024\n\014double_value\030\003 \001(\001\")\n\010Registr"
+    "y\022\035\n\003jar\030\001 \003(\0132\020.exec.shared.Jar\"/\n\003Jar\022"
+    "\014\n\004name\030\001 \001(\t\022\032\n\022function_signature\030\002 \003("
+    "\t\"W\n\013SaslMessage\022\021\n\tmechanism\030\001 \001(\t\022\014\n\004d"
+    "ata\030\002 \001(\014\022\'\n\006status\030\003 \001(\0162\027.exec.shared."
+    "SaslStatus*5\n\nRpcChannel\022\017\n\013BIT_CONTROL\020"
+    "\000\022\014\n\010BIT_DATA\020\001\022\010\n\004USER\020\002*V\n\tQueryType\022\007"
+    "\n\003SQL\020\001\022\013\n\007LOGICAL\020\002\022\014\n\010PHYSICAL\020\003\022\r\n\tEX"
+    "ECUTION\020\004\022\026\n\022PREPARED_STATEMENT\020\005*\207\001\n\rFr"
+    "agmentState\022\013\n\007SENDING\020\000\022\027\n\023AWAITING_ALL"
+    "OCATION\020\001\022\013\n\007RUNNING\020\002\022\014\n\010FINISHED\020\003\022\r\n\t"
+    "CANCELLED\020\004\022\n\n\006FAILED\020\005\022\032\n\026CANCELLATION_"
+    "REQUESTED\020\006*\227\006\n\020CoreOperatorType\022\021\n\rSING"
+    "LE_SENDER\020\000\022\024\n\020BROADCAST_SENDER\020\001\022\n\n\006FIL"
+    "TER\020\002\022\022\n\016HASH_AGGREGATE\020\003\022\r\n\tHASH_JOIN\020\004"
+    "\022\016\n\nMERGE_JOIN\020\005\022\031\n\025HASH_PARTITION_SENDE"
+    "R\020\006\022\t\n\005LIMIT\020\007\022\024\n\020MERGING_RECEIVER\020\010\022\034\n\030"
+    "ORDERED_PARTITION_SENDER\020\t\022\013\n\007PROJECT\020\n\022"
+    "\026\n\022UNORDERED_RECEIVER\020\013\022\020\n\014RANGE_SENDER\020"
+    "\014\022\n\n\006SCREEN\020\r\022\034\n\030SELECTION_VECTOR_REMOVE"
+    "R\020\016\022\027\n\023STREAMING_AGGREGATE\020\017\022\016\n\nTOP_N_SO"
+    "RT\020\020\022\021\n\rEXTERNAL_SORT\020\021\022\t\n\005TRACE\020\022\022\t\n\005UN"
+    "ION\020\023\022\014\n\010OLD_SORT\020\024\022\032\n\026PARQUET_ROW_GROUP"
+    "_SCAN\020\025\022\021\n\rHIVE_SUB_SCAN\020\026\022\025\n\021SYSTEM_TAB"
+    "LE_SCAN\020\027\022\021\n\rMOCK_SUB_SCAN\020\030\022\022\n\016PARQUET_"
+    "WRITER\020\031\022\023\n\017DIRECT_SUB_SCAN\020\032\022\017\n\013TEXT_WR"
+    "ITER\020\033\022\021\n\rTEXT_SUB_SCAN\020\034\022\021\n\rJSON_SUB_SC"
+    "AN\020\035\022\030\n\024INFO_SCHEMA_SUB_SCAN\020\036\022\023\n\017COMPLE"
+    "X_TO_JSON\020\037\022\025\n\021PRODUCER_CONSUMER\020 \022\022\n\016HB"
+    "ASE_SUB_SCAN\020!\022\n\n\006WINDOW\020\"\022\024\n\020NESTED_LOO"
+    "P_JOIN\020#\022\021\n\rAVRO_SUB_SCAN\020$\022\021\n\rPCAP_SUB_"
+    "SCAN\020%\022\022\n\016KAFKA_SUB_SCAN\020&\022\021\n\rKUDU_SUB_S"
+    "CAN\020\'*g\n\nSaslStatus\022\020\n\014SASL_UNKNOWN\020\000\022\016\n"
+    "\nSASL_START\020\001\022\024\n\020SASL_IN_PROGRESS\020\002\022\020\n\014S"
+    "ASL_SUCCESS\020\003\022\017\n\013SASL_FAILED\020\004B.\n\033org.ap"
+    "ache.drill.exec.protoB\rUserBitSharedH\001", 5078);
   ::google::protobuf::MessageFactory::InternalRegisterGeneratedFile(
     "UserBitShared.proto", &protobuf_RegisterTypes);
   UserCredentials::default_instance_ = new UserCredentials();
@@ -928,6 +931,8 @@ bool CoreOperatorType_IsValid(int value) {
     case 35:
     case 36:
     case 37:
+    case 38:
+    case 39:
       return true;
     default:
       return false;
@@ -1440,6 +1445,9 @@ bool DrillPBError_ErrorType_IsValid(int value) {
     case 8:
     case 9:
     case 10:
+    case 11:
+    case 12:
+    case 13:
       return true;
     default:
       return false;
@@ -1458,6 +1466,9 @@ const DrillPBError_ErrorType DrillPBError::RESOURCE;
 const DrillPBError_ErrorType DrillPBError::SYSTEM;
 const DrillPBError_ErrorType DrillPBError::UNSUPPORTED_OPERATION;
 const DrillPBError_ErrorType DrillPBError::VALIDATION;
+const DrillPBError_ErrorType DrillPBError::EXECUTION_ERROR;
+const DrillPBError_ErrorType DrillPBError::INTERNAL_ERROR;
+const DrillPBError_ErrorType DrillPBError::UNSPECIFIED_ERROR;
 const DrillPBError_ErrorType DrillPBError::ErrorType_MIN;
 const DrillPBError_ErrorType DrillPBError::ErrorType_MAX;
 const int DrillPBError::ErrorType_ARRAYSIZE;
@@ -4334,6 +4345,8 @@ bool QueryResult_QueryState_IsValid(int value) {
     case 4:
     case 5:
     case 6:
+    case 7:
+    case 8:
       return true;
     default:
       return false;
@@ -4348,6 +4361,8 @@ const QueryResult_QueryState QueryResult::CANCELED;
 const QueryResult_QueryState QueryResult::FAILED;
 const QueryResult_QueryState QueryResult::CANCELLATION_REQUESTED;
 const QueryResult_QueryState QueryResult::ENQUEUED;
+const QueryResult_QueryState QueryResult::PREPARING;
+const QueryResult_QueryState QueryResult::PLANNING;
 const QueryResult_QueryState QueryResult::QueryState_MIN;
 const QueryResult_QueryState QueryResult::QueryState_MAX;
 const int QueryResult::QueryState_ARRAYSIZE;

http://git-wip-us.apache.org/repos/asf/drill/blob/bfc86f17/contrib/native/client/src/protobuf/UserBitShared.pb.h
----------------------------------------------------------------------
diff --git a/contrib/native/client/src/protobuf/UserBitShared.pb.h b/contrib/native/client/src/protobuf/UserBitShared.pb.h
index c62bbf0..5a49bf5 100644
--- a/contrib/native/client/src/protobuf/UserBitShared.pb.h
+++ b/contrib/native/client/src/protobuf/UserBitShared.pb.h
@@ -72,11 +72,14 @@ enum DrillPBError_ErrorType {
   DrillPBError_ErrorType_RESOURCE = 7,
   DrillPBError_ErrorType_SYSTEM = 8,
   DrillPBError_ErrorType_UNSUPPORTED_OPERATION = 9,
-  DrillPBError_ErrorType_VALIDATION = 10
+  DrillPBError_ErrorType_VALIDATION = 10,
+  DrillPBError_ErrorType_EXECUTION_ERROR = 11,
+  DrillPBError_ErrorType_INTERNAL_ERROR = 12,
+  DrillPBError_ErrorType_UNSPECIFIED_ERROR = 13
 };
 bool DrillPBError_ErrorType_IsValid(int value);
 const DrillPBError_ErrorType DrillPBError_ErrorType_ErrorType_MIN = DrillPBError_ErrorType_CONNECTION;
-const DrillPBError_ErrorType DrillPBError_ErrorType_ErrorType_MAX = DrillPBError_ErrorType_VALIDATION;
+const DrillPBError_ErrorType DrillPBError_ErrorType_ErrorType_MAX = DrillPBError_ErrorType_UNSPECIFIED_ERROR;
 const int DrillPBError_ErrorType_ErrorType_ARRAYSIZE = DrillPBError_ErrorType_ErrorType_MAX + 1;
 
 const ::google::protobuf::EnumDescriptor* DrillPBError_ErrorType_descriptor();
@@ -115,11 +118,13 @@ enum QueryResult_QueryState {
   QueryResult_QueryState_CANCELED = 3,
   QueryResult_QueryState_FAILED = 4,
   QueryResult_QueryState_CANCELLATION_REQUESTED = 5,
-  QueryResult_QueryState_ENQUEUED = 6
+  QueryResult_QueryState_ENQUEUED = 6,
+  QueryResult_QueryState_PREPARING = 7,
+  QueryResult_QueryState_PLANNING = 8
 };
 bool QueryResult_QueryState_IsValid(int value);
 const QueryResult_QueryState QueryResult_QueryState_QueryState_MIN = QueryResult_QueryState_STARTING;
-const QueryResult_QueryState QueryResult_QueryState_QueryState_MAX = QueryResult_QueryState_ENQUEUED;
+const QueryResult_QueryState QueryResult_QueryState_QueryState_MAX = QueryResult_QueryState_PLANNING;
 const int QueryResult_QueryState_QueryState_ARRAYSIZE = QueryResult_QueryState_QueryState_MAX + 1;
 
 const ::google::protobuf::EnumDescriptor* QueryResult_QueryState_descriptor();
@@ -236,11 +241,13 @@ enum CoreOperatorType {
   WINDOW = 34,
   NESTED_LOOP_JOIN = 35,
   AVRO_SUB_SCAN = 36,
-  PCAP_SUB_SCAN = 37
+  PCAP_SUB_SCAN = 37,
+  KAFKA_SUB_SCAN = 38,
+  KUDU_SUB_SCAN = 39
 };
 bool CoreOperatorType_IsValid(int value);
 const CoreOperatorType CoreOperatorType_MIN = SINGLE_SENDER;
-const CoreOperatorType CoreOperatorType_MAX = PCAP_SUB_SCAN;
+const CoreOperatorType CoreOperatorType_MAX = KUDU_SUB_SCAN;
 const int CoreOperatorType_ARRAYSIZE = CoreOperatorType_MAX + 1;
 
 const ::google::protobuf::EnumDescriptor* CoreOperatorType_descriptor();
@@ -520,6 +527,9 @@ class DrillPBError : public ::google::protobuf::Message {
   static const ErrorType SYSTEM = DrillPBError_ErrorType_SYSTEM;
   static const ErrorType UNSUPPORTED_OPERATION = DrillPBError_ErrorType_UNSUPPORTED_OPERATION;
   static const ErrorType VALIDATION = DrillPBError_ErrorType_VALIDATION;
+  static const ErrorType EXECUTION_ERROR = DrillPBError_ErrorType_EXECUTION_ERROR;
+  static const ErrorType INTERNAL_ERROR = DrillPBError_ErrorType_INTERNAL_ERROR;
+  static const ErrorType UNSPECIFIED_ERROR = DrillPBError_ErrorType_UNSPECIFIED_ERROR;
   static inline bool ErrorType_IsValid(int value) {
     return DrillPBError_ErrorType_IsValid(value);
   }
@@ -1543,6 +1553,8 @@ class QueryResult : public ::google::protobuf::Message {
   static const QueryState FAILED = QueryResult_QueryState_FAILED;
   static const QueryState CANCELLATION_REQUESTED = QueryResult_QueryState_CANCELLATION_REQUESTED;
   static const QueryState ENQUEUED = QueryResult_QueryState_ENQUEUED;
+  static const QueryState PREPARING = QueryResult_QueryState_PREPARING;
+  static const QueryState PLANNING = QueryResult_QueryState_PLANNING;
   static inline bool QueryState_IsValid(int value) {
     return QueryResult_QueryState_IsValid(value);
   }


[5/7] drill git commit: DRILL-6278: Removed temp codegen directory in testing framework.

Posted by am...@apache.org.
DRILL-6278: Removed temp codegen directory in testing framework.

close apache/drill#1178


Project: http://git-wip-us.apache.org/repos/asf/drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/drill/commit/7f645565
Tree: http://git-wip-us.apache.org/repos/asf/drill/tree/7f645565
Diff: http://git-wip-us.apache.org/repos/asf/drill/diff/7f645565

Branch: refs/heads/master
Commit: 7f645565cd68b1780e643bc20a43951610704008
Parents: 67710bb
Author: Timothy Farkas <ti...@apache.org>
Authored: Tue Mar 20 23:00:22 2018 -0700
Committer: Aman Sinha <as...@maprtech.com>
Committed: Thu Mar 29 23:22:25 2018 -0700

----------------------------------------------------------------------
 .../drill/exec/physical/impl/TopN/TopNBatchTest.java      |  6 +-----
 .../java/org/apache/drill/test/BaseDirTestWatcher.java    | 10 ----------
 .../test/java/org/apache/drill/test/BaseTestQuery.java    |  1 -
 .../test/java/org/apache/drill/test/ClusterFixture.java   |  1 -
 4 files changed, 1 insertion(+), 17 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/drill/blob/7f645565/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/TopN/TopNBatchTest.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/TopN/TopNBatchTest.java b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/TopN/TopNBatchTest.java
index 4860869..14f2ee8 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/TopN/TopNBatchTest.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/TopN/TopNBatchTest.java
@@ -63,8 +63,6 @@ public class TopNBatchTest extends PopUnitTestBase {
   @Test
   public void priorityQueueOrderingTest() throws Exception {
     Properties properties = new Properties();
-    properties.setProperty(ClassBuilder.CODE_DIR_OPTION, dirTestWatcher.getDir().getAbsolutePath());
-
     DrillConfig drillConfig = DrillConfig.create(properties);
 
     FieldReference expr = FieldReference.getWithQuotedRef("colA");
@@ -158,9 +156,7 @@ public class TopNBatchTest extends PopUnitTestBase {
    */
   @Test
   public void sortOneKeyAscending() throws Throwable {
-    ClusterFixtureBuilder builder = ClusterFixture.builder(dirTestWatcher)
-      .configProperty(ClassBuilder.CODE_DIR_OPTION, dirTestWatcher.getDir().getAbsolutePath())
-      .configProperty(CodeCompiler.ENABLE_SAVE_CODE_FOR_DEBUG_TOPN, true);
+    ClusterFixtureBuilder builder = ClusterFixture.builder(dirTestWatcher);
     try (ClusterFixture cluster = builder.build();
          ClientFixture client = cluster.clientFixture()) {
       TestBuilder testBuilder = new TestBuilder(new ClusterFixture.FixtureTestServices(client));

http://git-wip-us.apache.org/repos/asf/drill/blob/7f645565/exec/java-exec/src/test/java/org/apache/drill/test/BaseDirTestWatcher.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/test/BaseDirTestWatcher.java b/exec/java-exec/src/test/java/org/apache/drill/test/BaseDirTestWatcher.java
index d36423b..b595869 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/test/BaseDirTestWatcher.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/test/BaseDirTestWatcher.java
@@ -53,7 +53,6 @@ public class BaseDirTestWatcher extends DirTestWatcher {
     TEST_TMP // Corresponds to the directory that should be mapped to dfs.tmp
   }
 
-  private File codegenDir;
   private File tmpDir;
   private File storeDir;
   private File dfsTestTmpParentDir;
@@ -79,7 +78,6 @@ public class BaseDirTestWatcher extends DirTestWatcher {
   protected void starting(Description description) {
     super.starting(description);
 
-    codegenDir = makeSubDir(Paths.get("codegen"));
     rootDir = makeSubDir(Paths.get("root"));
     tmpDir = makeSubDir(Paths.get("tmp"));
     storeDir = makeSubDir(Paths.get("store"));
@@ -135,14 +133,6 @@ public class BaseDirTestWatcher extends DirTestWatcher {
   }
 
   /**
-   * Gets the temp directory that should be used to save generated code files.
-   * @return The temp directory that should be used to save generated code files.
-   */
-  public File getCodegenDir() {
-    return codegenDir;
-  }
-
-  /**
    * This methods creates a new directory which can be mapped to <b>dfs.tmp</b>.
    */
   public void newDfsTestTmpDir() {

http://git-wip-us.apache.org/repos/asf/drill/blob/7f645565/exec/java-exec/src/test/java/org/apache/drill/test/BaseTestQuery.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/test/BaseTestQuery.java b/exec/java-exec/src/test/java/org/apache/drill/test/BaseTestQuery.java
index c3ecaf1..10cd94c 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/test/BaseTestQuery.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/test/BaseTestQuery.java
@@ -160,7 +160,6 @@ public class BaseTestQuery extends ExecTest {
       props.put(propName, TEST_CONFIGURATIONS.getProperty(propName));
     }
 
-    props.setProperty(ClassBuilder.CODE_DIR_OPTION, dirTestWatcher.getCodegenDir().getAbsolutePath());
     props.setProperty(ExecConstants.DRILL_TMP_DIR, dirTestWatcher.getTmpDir().getAbsolutePath());
     props.setProperty(ExecConstants.SYS_STORE_PROVIDER_LOCAL_PATH, dirTestWatcher.getStoreDir().getAbsolutePath());
 

http://git-wip-us.apache.org/repos/asf/drill/blob/7f645565/exec/java-exec/src/test/java/org/apache/drill/test/ClusterFixture.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/test/ClusterFixture.java b/exec/java-exec/src/test/java/org/apache/drill/test/ClusterFixture.java
index 6dbdacd..d212014 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/test/ClusterFixture.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/test/ClusterFixture.java
@@ -516,7 +516,6 @@ public class ClusterFixture extends BaseFixture implements AutoCloseable {
          .sessionOption(ExecConstants.MAX_WIDTH_PER_NODE_KEY, MAX_WIDTH_PER_NODE);
     Properties props = new Properties();
     props.putAll(ClusterFixture.TEST_CONFIGURATIONS);
-    props.setProperty(ClassBuilder.CODE_DIR_OPTION, dirTestWatcher.getCodegenDir().getAbsolutePath());
     props.setProperty(ExecConstants.DRILL_TMP_DIR, dirTestWatcher.getTmpDir().getAbsolutePath());
     props.setProperty(ExecConstants.SYS_STORE_PROVIDER_LOCAL_PATH, dirTestWatcher.getStoreDir().getAbsolutePath());
 


[6/7] drill git commit: DRILL-6256: Remove references to java 7 from readme and other files

Posted by am...@apache.org.
DRILL-6256: Remove references to java 7 from readme and other files

close apache/drill#1172


Project: http://git-wip-us.apache.org/repos/asf/drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/drill/commit/03b245ef
Tree: http://git-wip-us.apache.org/repos/asf/drill/tree/03b245ef
Diff: http://git-wip-us.apache.org/repos/asf/drill/diff/03b245ef

Branch: refs/heads/master
Commit: 03b245ef4d028458fc6cc3588682d5ae9cc3bb33
Parents: 7f64556
Author: vladimir tkach <vo...@gmail.com>
Authored: Mon Mar 26 20:02:37 2018 +0300
Committer: Aman Sinha <as...@maprtech.com>
Committed: Thu Mar 29 23:23:17 2018 -0700

----------------------------------------------------------------------
 INSTALL.md                                 | 10 +++++-----
 distribution/src/resources/README.md       |  2 +-
 distribution/src/resources/drill-config.sh |  9 ++++-----
 exec/java-exec/src/main/sh/drill-config.sh |  2 +-
 exec/java-exec/src/main/sh/runbit          |  4 ++--
 pom.xml                                    |  2 +-
 6 files changed, 14 insertions(+), 15 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/drill/blob/03b245ef/INSTALL.md
----------------------------------------------------------------------
diff --git a/INSTALL.md b/INSTALL.md
index 7c65611..72268e9 100644
--- a/INSTALL.md
+++ b/INSTALL.md
@@ -3,15 +3,15 @@
 ## Prerequisites
 
 Currently, the Apache Drill build process is known to work on Linux, Windows and OSX.  To build, you need to have the following software installed on your system to successfully complete a build. 
-  * Java 7
+  * Java 8
   * Maven 3.x
 
 ## Confirm settings
     # java -version
-    java version "1.7.0_09"
-    Java(TM) SE Runtime Environment (build 1.7.0_09-b05)
-    Java HotSpot(TM) 64-Bit Server VM (build 23.5-b02, mixed mode)
-    
+    java version "1.8.0_161"
+    Java(TM) SE Runtime Environment (build 1.8.0_161-b12)
+    Java HotSpot(TM) 64-Bit Server VM (build 25.161-b12, mixed mode)
+
     # mvn --version
     Apache Maven 3.0.3 (r1075438; 2011-02-28 09:31:09-0800)
 

http://git-wip-us.apache.org/repos/asf/drill/blob/03b245ef/distribution/src/resources/README.md
----------------------------------------------------------------------
diff --git a/distribution/src/resources/README.md b/distribution/src/resources/README.md
index 40feb29..bb1cd07 100644
--- a/distribution/src/resources/README.md
+++ b/distribution/src/resources/README.md
@@ -3,7 +3,7 @@
 ## Prerequisites
   
   * Linux, Windows or OSX 
-  * Oracle JDK 7 (JDK, not JRE)
+  * Oracle/OpenJDK 8 (JDK, not JRE)
 
 Additional requirements when running in clustered mode:
   * Hadoop 2.3+ distribution of Hadoop (such as Apache or MapR)

http://git-wip-us.apache.org/repos/asf/drill/blob/03b245ef/distribution/src/resources/drill-config.sh
----------------------------------------------------------------------
diff --git a/distribution/src/resources/drill-config.sh b/distribution/src/resources/drill-config.sh
index 55d032d..e3eaa64 100644
--- a/distribution/src/resources/drill-config.sh
+++ b/distribution/src/resources/drill-config.sh
@@ -123,10 +123,10 @@ if [ ! -e "$JAVA" ]; then
   fatal_error "Java not found at JAVA_HOME=$JAVA_HOME."
 fi
 
-# Ensure that Java version is at least 1.7
+# Ensure that Java version is at least 1.8
 "$JAVA" -version 2>&1 | grep "version" | egrep -e "1\.4|1\.5|1\.6|1\.7" > /dev/null
 if [ $? -eq 0 ]; then
-  fatal_error "Java 1.8 or later is required to run Apache Drill."
+  fatal_error "Java 1.8 is required to run Apache Drill."
 fi
 
 # Check if a file exists and has relevant lines for execution
@@ -214,9 +214,8 @@ fi
 
 export SQLLINE_JAVA_OPTS=${SQLLINE_JAVA_OPTS:-""}
 
-# Class unloading is disabled by default in Java 7
-# http://hg.openjdk.java.net/jdk7u/jdk7u60/hotspot/file/tip/src/share/vm/runtime/globals.hpp#l1622
-export SERVER_GC_OPTS="$SERVER_GC_OPTS -XX:+CMSClassUnloadingEnabled -XX:+UseG1GC"
+
+export SERVER_GC_OPTS="$SERVER_GC_OPTS -XX:+UseG1GC"
 
 # No GC options by default for SQLLine
 export CLIENT_GC_OPTS=${CLIENT_GC_OPTS:-""}

http://git-wip-us.apache.org/repos/asf/drill/blob/03b245ef/exec/java-exec/src/main/sh/drill-config.sh
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/sh/drill-config.sh b/exec/java-exec/src/main/sh/drill-config.sh
index 20102fc..fa62da8 100644
--- a/exec/java-exec/src/main/sh/drill-config.sh
+++ b/exec/java-exec/src/main/sh/drill-config.sh
@@ -88,7 +88,7 @@ if [ -z "$JAVA_HOME" ]; then
 +======================================================================+
 |      Error: JAVA_HOME is not set and Java could not be found         |
 +----------------------------------------------------------------------+
-| Drill requires Java 1.7 or later.                                    |
+| Drill requires Java 1.8                                    |
 +======================================================================+
 EOF
     exit 1

http://git-wip-us.apache.org/repos/asf/drill/blob/03b245ef/exec/java-exec/src/main/sh/runbit
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/sh/runbit b/exec/java-exec/src/main/sh/runbit
index 4c4aa21..0e4af4d 100755
--- a/exec/java-exec/src/main/sh/runbit
+++ b/exec/java-exec/src/main/sh/runbit
@@ -29,9 +29,9 @@ else
   exit 1
 fi
 
-$JAVA -version 2>&1 | grep "version" | egrep -e "1.7" > /dev/null
+$JAVA -version 2>&1 | grep "version" | egrep -e "1.8" > /dev/null
 if [ $? -ne 0 ]; then
-  echo "Java 1.7 is required to run Apache Drill."
+  echo "Java 1.8 is required to run Apache Drill."
   exit 1
 fi
 

http://git-wip-us.apache.org/repos/asf/drill/blob/03b245ef/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 2d951ad..f07beaf 100644
--- a/pom.xml
+++ b/pom.xml
@@ -357,7 +357,7 @@
                   <version>[3.0.4,4)</version>
                 </requireMavenVersion>
                 <requireJavaVersion>
-                  <version>[1.7,1.9)</version>
+                  <version>[1.8,1.9)</version>
                 </requireJavaVersion>
               </rules>
             </configuration>


[7/7] drill git commit: DRILL-6125: Fix possible memory leak when query is cancelled or finished.

Posted by am...@apache.org.
DRILL-6125: Fix possible memory leak when query is cancelled or finished.

close apache/drill#1105


Project: http://git-wip-us.apache.org/repos/asf/drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/drill/commit/a264e7fe
Tree: http://git-wip-us.apache.org/repos/asf/drill/tree/a264e7fe
Diff: http://git-wip-us.apache.org/repos/asf/drill/diff/a264e7fe

Branch: refs/heads/master
Commit: a264e7feb1d02ffd5762bb1f652ea22d17aa5243
Parents: 03b245e
Author: Timothy Farkas <ti...@apache.org>
Authored: Tue Jan 30 15:55:41 2018 -0800
Committer: Aman Sinha <as...@maprtech.com>
Committed: Thu Mar 29 23:24:09 2018 -0700

----------------------------------------------------------------------
 .../drill/exec/physical/impl/RootExec.java      |  23 ++-
 .../PartitionSenderRootExec.java                |  32 ++--
 .../exec/work/fragment/FragmentExecutor.java    | 179 ++++++++++++++-----
 3 files changed, 160 insertions(+), 74 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/drill/blob/a264e7fe/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/RootExec.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/RootExec.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/RootExec.java
index 5e366fb..ddeb3e8 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/RootExec.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/RootExec.java
@@ -20,19 +20,28 @@ package org.apache.drill.exec.physical.impl;
 import org.apache.drill.exec.proto.ExecProtos.FragmentHandle;
 
 /**
- * A FragmentRoot is a node which is the last processing node in a query plan. FragmentTerminals include Exchange
- * output nodes and storage nodes.  They are there driving force behind the completion of a query.
+ * <h2>Functionality</h2>
+ * <p>
+ *   A FragmentRoot is a node which is the last processing node in a query plan. FragmentTerminals include Exchange
+ *   output nodes and storage nodes.  They are there driving force behind the completion of a query.
+ * </p>
+ * <h2>Assumptions</h2>
+ * <p>
+ *   All implementations of {@link RootExec} assume that all their methods are called by the same thread.
+ * </p>
  */
 public interface RootExec extends AutoCloseable {
   /**
    * Do the next batch of work.
-   * @return Whether or not additional batches of work are necessary.  False means that this fragment is done.
+   * @return Whether or not additional batches of work are necessary. False means that this fragment is done.
    */
-  public boolean next();
+  boolean next();
 
   /**
-   * Inform sender that receiving fragment is finished and doesn't need any more data
-   * @param handle
+   * Inform sender that receiving fragment is finished and doesn't need any more data. This can be called multiple
+   * times (once for each downstream receiver). If all receivers are finished then a subsequent call to {@link #next()}
+   * will return false.
+   * @param handle The handle pointing to the downstream receiver that does not need anymore data.
    */
-  public void receivingFragmentFinished(FragmentHandle handle);
+  void receivingFragmentFinished(FragmentHandle handle);
 }

http://git-wip-us.apache.org/repos/asf/drill/blob/a264e7fe/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/partitionsender/PartitionSenderRootExec.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/partitionsender/PartitionSenderRootExec.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/partitionsender/PartitionSenderRootExec.java
index 25be50a..7e76238 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/partitionsender/PartitionSenderRootExec.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/partitionsender/PartitionSenderRootExec.java
@@ -65,14 +65,13 @@ public class PartitionSenderRootExec extends BaseRootExec {
   private PartitionerDecorator partitioner;
 
   private ExchangeFragmentContext context;
-  private boolean ok = true;
   private final int outGoingBatchCount;
   private final HashPartitionSender popConfig;
   private final double cost;
 
   private final AtomicIntegerArray remainingReceivers;
   private final AtomicInteger remaingReceiverCount;
-  private volatile boolean done = false;
+  private boolean done = false;
   private boolean first = true;
   private boolean closeIncoming;
 
@@ -146,11 +145,8 @@ public class PartitionSenderRootExec extends BaseRootExec {
 
   @Override
   public boolean innerNext() {
-    if (!ok) {
-      return false;
-    }
-
     IterOutcome out;
+
     if (!done) {
       out = next(incoming);
     } else {
@@ -252,13 +248,11 @@ public class PartitionSenderRootExec extends BaseRootExec {
             startIndex, endIndex);
       }
 
-      synchronized (this) {
-        partitioner = new PartitionerDecorator(subPartitioners, stats, context);
-        for (int index = 0; index < terminations.size(); index++) {
-          partitioner.getOutgoingBatches(terminations.buffer[index]).terminate();
-        }
-        terminations.clear();
+      partitioner = new PartitionerDecorator(subPartitioners, stats, context);
+      for (int index = 0; index < terminations.size(); index++) {
+        partitioner.getOutgoingBatches(terminations.buffer[index]).terminate();
       }
+      terminations.clear();
 
       success = true;
     } finally {
@@ -328,12 +322,10 @@ public class PartitionSenderRootExec extends BaseRootExec {
   public void receivingFragmentFinished(FragmentHandle handle) {
     final int id = handle.getMinorFragmentId();
     if (remainingReceivers.compareAndSet(id, 0, 1)) {
-      synchronized (this) {
-        if (partitioner == null) {
-          terminations.add(id);
-        } else {
-          partitioner.getOutgoingBatches(id).terminate();
-        }
+      if (partitioner == null) {
+        terminations.add(id);
+      } else {
+        partitioner.getOutgoingBatches(id).terminate();
       }
 
       int remaining = remaingReceiverCount.decrementAndGet();
@@ -347,7 +339,7 @@ public class PartitionSenderRootExec extends BaseRootExec {
   public void close() throws Exception {
     logger.debug("Partition sender stopping.");
     super.close();
-    ok = false;
+
     if (partitioner != null) {
       updateAggregateStats();
       partitioner.clear();
@@ -358,7 +350,7 @@ public class PartitionSenderRootExec extends BaseRootExec {
     }
   }
 
-  public void sendEmptyBatch(boolean isLast) {
+  private void sendEmptyBatch(boolean isLast) {
     BatchSchema schema = incoming.getSchema();
     if (schema == null) {
       // If the incoming batch has no schema (possible when there are no input records),

http://git-wip-us.apache.org/repos/asf/drill/blob/a264e7fe/exec/java-exec/src/main/java/org/apache/drill/exec/work/fragment/FragmentExecutor.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/work/fragment/FragmentExecutor.java b/exec/java-exec/src/main/java/org/apache/drill/exec/work/fragment/FragmentExecutor.java
index 4f43dc1..efdb96a 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/work/fragment/FragmentExecutor.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/work/fragment/FragmentExecutor.java
@@ -19,7 +19,9 @@ package org.apache.drill.exec.work.fragment;
 
 import java.io.IOException;
 import java.security.PrivilegedExceptionAction;
+import java.util.Queue;
 import java.util.Set;
+import java.util.concurrent.ConcurrentLinkedQueue;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicReference;
 
@@ -31,7 +33,6 @@ import org.apache.drill.exec.coord.ClusterCoordinator;
 import org.apache.drill.exec.exception.OutOfMemoryException;
 import org.apache.drill.exec.ops.ExecutorFragmentContext;
 import org.apache.drill.exec.ops.FragmentContext;
-import org.apache.drill.exec.ops.FragmentContextImpl;
 import org.apache.drill.exec.physical.base.FragmentRoot;
 import org.apache.drill.exec.physical.impl.ImplCreator;
 import org.apache.drill.exec.physical.impl.RootExec;
@@ -49,14 +50,58 @@ import org.apache.drill.exec.work.foreman.DrillbitStatusListener;
 import org.apache.hadoop.security.UserGroupInformation;
 
 /**
- * Responsible for running a single fragment on a single Drillbit. Listens/responds to status request
- * and cancellation messages.
+ * <h2>Overview</h2>
+ * <p>
+ *   Responsible for running a single fragment on a single Drillbit. Listens/responds to status request and cancellation messages.
+ * </p>
+ * <h2>Theory of Operation</h2>
+ * <p>
+ *  The {@link FragmentExecutor} runs a fragment's {@link RootExec} in the {@link FragmentExecutor#run()} method in a single thread. While a fragment is running
+ *  it may be subject to termination requests. The {@link FragmentExecutor} is reponsible for gracefully handling termination requests for the {@link RootExec}. There
+ *  are two types of termination messages:
+ *  <ol>
+ *    <li><b>Cancellation Request:</b> This signals that the fragment and therefore the {@link RootExec} need to terminate immediately.</li>
+ *    <li><b>Receiver Finished:</b> This signals that a downstream receiver no longer needs anymore data. A fragment may recieve multiple receiver finished requests
+ *    (one for each downstream receiver). The {@link RootExec} will only terminate once it has recieved {@link FragmentExecutor.EventType#RECEIVER_FINISHED} messages
+ *    for all downstream receivers.</li>
+ *  </ol>
+ * </p>
+ * <p>
+ *   The {@link FragmentExecutor} processes termination requests appropriately for the {@link RootExec}. A <b>Cancellation Request</b> is signalled when
+ *   {@link FragmentExecutor#cancel()} is called. A <b>Receiver Finished</b> event is signalled when {@link FragmentExecutor#receivingFragmentFinished(FragmentHandle)} is
+ *   called. The way in which these signals are handled is the following:
+ * </p>
+ * <h3>Cancellation Request</h3>
+ * <p>
+ *   There are two ways in which a cancellation request can be handled when {@link FragmentExecutor#cancel()} is called.
+ *   <ol>
+ *     <li>The Cancellation Request is recieved before the {@link RootExec} for the fragment is even started. In this case we can cleanup resources allocated for the fragment
+ *     and never start a {@link RootExec}</li>
+ *     <li>The Cancellation Request is recieve after the {@link RootExec} for the fragment is started. In this the cancellation request is sent to the
+ *     {@link FragmentEventProcessor}. If this is not the first cancellation request it is ignored. If this is the first cancellation request the {@link RootExec} for this
+ *     fragment is terminated by interrupting it. Then the {@link FragmentExecutor#run()} thread proceeds to cleanup resources normally</li>
+ *   </ol>
+ * </p>
+ * <h3>Receiver Finished</h3>
+ * <p>
+ *  When {@link FragmentExecutor#receivingFragmentFinished(FragmentHandle)} is called, the message is passed to the {@link FragmentEventProcessor} if we
+ *  did not already recieve a Cancellation request. Then the finished message is queued in {@link FragmentExecutor#receiverFinishedQueue}. The {@link FragmentExecutor#run()} polls
+ *  {@link FragmentExecutor#receiverFinishedQueue} and singlas the {@link RootExec} with {@link RootExec#receivingFragmentFinished(FragmentHandle)} appropriately.
+ * </p>
+ * <h2>Possible Design Flaws / Poorly Defined Behavoir</h2>
+ * <p>
+ *   There are still a few aspects of the {@link FragmentExecutor} design that are not clear.
+ *   <ol>
+ *     <li>If we get a <b>Receiver Finished</b> message for one downstream receiver, will we eventually get one from every downstream receiver?</li>
+ *     <li>What happens when we process a <b>Receiver Finished</b> message for some (but not all) downstream receivers and then we cancel the fragment?</li>
+ *     <li>What happens when we process a <b>Receiver Finished</b> message for some (but not all) downstream receivers and then we run out of data from the upstream?</li>
+ *   </ol>
+ * </p>
  */
 public class FragmentExecutor implements Runnable {
   private static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(FragmentExecutor.class);
   private static final ControlsInjector injector = ControlsInjectorFactory.getInjector(FragmentExecutor.class);
 
-  private final AtomicBoolean hasCloseoutThread = new AtomicBoolean(false);
   private final String fragmentName;
   private final ExecutorFragmentContext fragmentContext;
   private final FragmentStatusReporter statusReporter;
@@ -66,6 +111,11 @@ public class FragmentExecutor implements Runnable {
 
   private volatile RootExec root;
   private final AtomicReference<FragmentState> fragmentState = new AtomicReference<>(FragmentState.AWAITING_ALLOCATION);
+  /**
+   * Holds all of the messages sent by downstream receivers that have finished. The {@link FragmentExecutor#run()} thread reads from this queue and passes the
+   * finished messages to the fragment's {@link RootExec} via the {@link RootExec#receivingFragmentFinished(FragmentHandle)} method.
+   */
+  private final Queue<FragmentHandle> receiverFinishedQueue = new ConcurrentLinkedQueue<>();
   private final FragmentEventProcessor eventProcessor = new FragmentEventProcessor();
 
   // Thread that is currently executing the Fragment. Value is null if the fragment hasn't started running or finished
@@ -135,12 +185,16 @@ public class FragmentExecutor implements Runnable {
   }
 
   /**
+   * <p>
    * Cancel the execution of this fragment is in an appropriate state. Messages come from external.
-   * NOTE that this will be called from threads *other* than the one running this runnable(),
+   * </p>
+   * <p>
+   * <b>Note:</b> This will be called from threads <b>Other</b> than the one running this runnable(),
    * so we need to be careful about the state transitions that can result.
+   * </p>
    */
   public void cancel() {
-    final boolean thisIsOnlyThread = hasCloseoutThread.compareAndSet(false, true);
+    final boolean thisIsOnlyThread = myThreadRef.compareAndSet(null, Thread.currentThread());
 
     if (thisIsOnlyThread) {
       eventProcessor.cancelAndFinish();
@@ -182,13 +236,12 @@ public class FragmentExecutor implements Runnable {
   @SuppressWarnings("resource")
   @Override
   public void run() {
-    // if a cancel thread has already entered this executor, we have not reason to continue.
-    if (!hasCloseoutThread.compareAndSet(false, true)) {
+    final Thread myThread = Thread.currentThread();
+
+    if (!myThreadRef.compareAndSet(null, myThread)) {
       return;
     }
 
-    final Thread myThread = Thread.currentThread();
-    myThreadRef.set(myThread);
     final String originalThreadName = myThread.getName();
     final FragmentHandle fragmentHandle = fragmentContext.getHandle();
     final ClusterCoordinator clusterCoordinator = fragmentContext.getClusterCoordinator();
@@ -203,10 +256,10 @@ public class FragmentExecutor implements Runnable {
       final FragmentRoot rootOperator = this.rootOperator != null ? this.rootOperator :
           fragmentContext.getPlanReader().readFragmentRoot(fragment.getFragmentJson());
 
-          root = ImplCreator.getExec(fragmentContext, rootOperator);
-          if (root == null) {
-            return;
-          }
+      root = ImplCreator.getExec(fragmentContext, rootOperator);
+      if (root == null) {
+        return;
+      }
 
       clusterCoordinator.addDrillbitStatusListener(drillbitStatusListener);
       updateState(FragmentState.RUNNING);
@@ -227,11 +280,19 @@ public class FragmentExecutor implements Runnable {
         @Override
         public Void run() throws Exception {
           injector.injectChecked(fragmentContext.getExecutionControls(), "fragment-execution", IOException.class);
-          /*
-           * Run the query until root.next returns false OR we no longer need to continue.
-           */
-          while (shouldContinue() && root.next()) {
-            // loop
+
+          while (shouldContinue()) {
+            // Fragment is not cancelled
+
+            for (FragmentHandle fragmentHandle; (fragmentHandle = receiverFinishedQueue.poll()) != null;) {
+              // See if we have any finished requests. If so execute them.
+              root.receivingFragmentFinished(fragmentHandle);
+            }
+
+            if (!root.next()) {
+              // Fragment has processed all of its data
+              break;
+            }
           }
 
           return null;
@@ -245,19 +306,17 @@ public class FragmentExecutor implements Runnable {
         // we have a heap out of memory error. The JVM in unstable, exit.
         CatastrophicFailure.exit(e, "Unable to handle out of memory condition in FragmentExecutor.", -2);
       }
+    } catch (InterruptedException e) {
+      // Swallow interrupted exceptions since we intentionally interrupt the root when cancelling a query
+      logger.trace("Interruped root: {}", e);
     } catch (Throwable t) {
       fail(t);
     } finally {
 
-      // no longer allow this thread to be interrupted. We synchronize here to make sure that cancel can't set an
-      // interruption after we have moved beyond this block.
-      synchronized (myThreadRef) {
-        myThreadRef.set(null);
-        Thread.interrupted();
-      }
-
-      // Make sure the event processor is started at least once
-      eventProcessor.start();
+      // Don't process any more termination requests, we are done.
+      eventProcessor.terminate();
+      // Clear the interrupt flag if it is set.
+      Thread.interrupted();
 
       // here we could be in FAILED, RUNNING, or CANCELLATION_REQUESTED
       // FAILED state will be because of any Exception in execution loop root.next()
@@ -475,6 +534,7 @@ public class FragmentExecutor implements Runnable {
    * This is especially important as fragments can take longer to start
    */
   private class FragmentEventProcessor extends EventProcessor<FragmentEvent> {
+    private AtomicBoolean terminate = new AtomicBoolean(false);
 
     void cancel() {
       sendEvent(new FragmentEvent(EventType.CANCEL, null));
@@ -488,47 +548,72 @@ public class FragmentExecutor implements Runnable {
       sendEvent(new FragmentEvent(EventType.RECEIVER_FINISHED, handle));
     }
 
+    /**
+     * Tell the {@link FragmentEventProcessor} not to process anymore events. This keeps stray cancellation requests
+     * from being processed after the root has finished running and interrupts in the root thread have been cleared.
+     */
+    public void terminate() {
+      terminate.set(true);
+    }
+
     @Override
     protected void processEvent(FragmentEvent event) {
+      if (event.type.equals(EventType.RECEIVER_FINISHED)) {
+        // Finish request
+        if (terminate.get()) {
+          // We have already recieved a cancellation or we have terminated the event processor. Do not process anymore finish requests.
+          return;
+        }
+      } else {
+        // Cancel request
+        if (!terminate.compareAndSet(false, true)) {
+          // We have already received a cancellation or we have terminated the event processor. Do not process anymore cancellation requests.
+          // This prevents the root thread from being interrupted at an inappropriate time.
+          return;
+        }
+      }
+
       switch (event.type) {
         case CANCEL:
-          /*
-           * We set the cancel requested flag but the actual cancellation is managed by the run() loop, if called.
-           */
+          // We set the cancel requested flag but the actual cancellation is managed by the run() loop, if called.
           updateState(FragmentState.CANCELLATION_REQUESTED);
-
-          /*
-           * Interrupt the thread so that it exits from any blocking operation it could be executing currently. We
-           * synchronize here to ensure we don't accidentally create a race condition where we interrupt the close out
-           * procedure of the main thread.
-          */
-          synchronized (myThreadRef) {
-            final Thread myThread = myThreadRef.get();
-            if (myThread != null) {
-              logger.debug("Interrupting fragment thread {}", myThread.getName());
-              myThread.interrupt();
-            }
-          }
+          // The root was started so we have to interrupt it in case it is performing a blocking operation.
+          killThread();
           break;
-
         case CANCEL_AND_FINISH:
+          // In this case the root was never started so we do not have to interrupt the thread.
           updateState(FragmentState.CANCELLATION_REQUESTED);
+          // The FragmentExecutor#run() loop will not execute in this case so we have to cleanup resources here
           cleanup(FragmentState.FINISHED);
           break;
-
         case RECEIVER_FINISHED:
           assert event.handle != null : "RECEIVER_FINISHED event must have a handle";
           if (root != null) {
             logger.info("Applying request for early sender termination for {} -> {}.",
               QueryIdHelper.getQueryIdentifier(getContext().getHandle()),
               QueryIdHelper.getFragmentId(event.handle));
-            root.receivingFragmentFinished(event.handle);
+
+            receiverFinishedQueue.add(event.handle);
           } else {
             logger.warn("Dropping request for early fragment termination for path {} -> {} as no root exec exists.",
               QueryIdHelper.getFragmentId(getContext().getHandle()), QueryIdHelper.getFragmentId(event.handle));
           }
+          // Note we do not terminate the event processor in this case since we can recieve multiple RECEIVER_FINISHED
+          // events. One for each downstream receiver.
           break;
       }
     }
+
+    /*
+     * Interrupt the thread so that it exits from any blocking operation it could be executing currently. We
+     * synchronize here to ensure we don't accidentally create a race condition where we interrupt the close out
+     * procedure of the main thread.
+    */
+    private void killThread() {
+      // myThreadRef must contain a non-null reference at this point
+      final Thread myThread = myThreadRef.get();
+      logger.debug("Interrupting fragment thread {}", myThread.getName());
+      myThread.interrupt();
+    }
   }
 }


[3/7] drill git commit: DRILL-5937: drill-module.conf : Changed timeout to 30 seconds, ExecConstant.java : Changed comment

Posted by am...@apache.org.
DRILL-5937: drill-module.conf : Changed timeout to 30 seconds, ExecConstant.java : Changed comment

DRILL-5937: ExecConstant.java : removed comment

DRILL-5937: CheckStyle fix

close apache/drill#1190


Project: http://git-wip-us.apache.org/repos/asf/drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/drill/commit/7088bfe4
Tree: http://git-wip-us.apache.org/repos/asf/drill/tree/7088bfe4
Diff: http://git-wip-us.apache.org/repos/asf/drill/diff/7088bfe4

Branch: refs/heads/master
Commit: 7088bfe4bd6e2379ba21b0c48f7280a77d2c3b83
Parents: ea643bf
Author: Pushpendra Jaiswal <pu...@paysafe.com>
Authored: Tue Mar 27 13:16:23 2018 +0530
Committer: Aman Sinha <as...@maprtech.com>
Committed: Thu Mar 29 23:20:30 2018 -0700

----------------------------------------------------------------------
 .../src/main/java/org/apache/drill/exec/ExecConstants.java       | 4 ----
 exec/java-exec/src/main/resources/drill-module.conf              | 2 +-
 2 files changed, 1 insertion(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/drill/blob/7088bfe4/exec/java-exec/src/main/java/org/apache/drill/exec/ExecConstants.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/ExecConstants.java b/exec/java-exec/src/main/java/org/apache/drill/exec/ExecConstants.java
index 34aec1b..77fa211 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/ExecConstants.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/ExecConstants.java
@@ -561,10 +561,6 @@ public final class ExecConstants {
   public static final String CODE_GEN_EXP_IN_METHOD_SIZE = "exec.java.compiler.exp_in_method_size";
   public static final LongValidator CODE_GEN_EXP_IN_METHOD_SIZE_VALIDATOR = new LongValidator(CODE_GEN_EXP_IN_METHOD_SIZE);
 
-  /**
-   * Timeout for create prepare statement request. If the request exceeds this timeout, then request is timed out.
-   * Default value is 10mins.
-   */
   public static final String CREATE_PREPARE_STATEMENT_TIMEOUT_MILLIS = "prepare.statement.create_timeout_ms";
   public static final OptionValidator CREATE_PREPARE_STATEMENT_TIMEOUT_MILLIS_VALIDATOR =
       new PositiveLongValidator(CREATE_PREPARE_STATEMENT_TIMEOUT_MILLIS, Integer.MAX_VALUE);

http://git-wip-us.apache.org/repos/asf/drill/blob/7088bfe4/exec/java-exec/src/main/resources/drill-module.conf
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/resources/drill-module.conf b/exec/java-exec/src/main/resources/drill-module.conf
index a227e0d..9af09bc 100644
--- a/exec/java-exec/src/main/resources/drill-module.conf
+++ b/exec/java-exec/src/main/resources/drill-module.conf
@@ -514,7 +514,7 @@ drill.exec.options: {
     # it is dynamically computed based on cpu_load_average
     planner.width.max_per_node: 0,
     planner.width.max_per_query: 1000,
-    prepare.statement.create_timeout_ms: 10000,
+    prepare.statement.create_timeout_ms: 30000,
     security.admin.user_groups: "%drill_process_user_groups%",
     security.admin.users: "%drill_process_user%",
     store.format: "parquet",


[2/7] drill git commit: DRILL-6299: Fixed a filter pushed down issue when a column doesn't have stats

Posted by am...@apache.org.
DRILL-6299: Fixed a filter pushed down issue when a column doesn't have stats

close apache/drill#1192


Project: http://git-wip-us.apache.org/repos/asf/drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/drill/commit/ea643bfe
Tree: http://git-wip-us.apache.org/repos/asf/drill/tree/ea643bfe
Diff: http://git-wip-us.apache.org/repos/asf/drill/diff/ea643bfe

Branch: refs/heads/master
Commit: ea643bfebeff8991d4e43fa8762773076087d0df
Parents: bfc86f1
Author: Salim Achouche <sa...@gmail.com>
Authored: Wed Mar 28 12:08:25 2018 -0700
Committer: Aman Sinha <as...@maprtech.com>
Committed: Thu Mar 29 23:18:32 2018 -0700

----------------------------------------------------------------------
 .../drill/exec/expr/stat/ParquetIsPredicates.java  | 17 ++++++-----------
 .../exec/expr/stat/ParquetPredicatesHelper.java    |  9 +++++++++
 2 files changed, 15 insertions(+), 11 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/drill/blob/ea643bfe/exec/java-exec/src/main/java/org/apache/drill/exec/expr/stat/ParquetIsPredicates.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/expr/stat/ParquetIsPredicates.java b/exec/java-exec/src/main/java/org/apache/drill/exec/expr/stat/ParquetIsPredicates.java
index c6f9b2f..a58ce7c 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/expr/stat/ParquetIsPredicates.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/expr/stat/ParquetIsPredicates.java
@@ -62,7 +62,7 @@ public class ParquetIsPredicates {
     public boolean canDrop(RangeExprEvaluator evaluator) {
       Statistics exprStat = expr.accept(evaluator, null);
 
-      if (exprStat == null) {
+      if (!ParquetPredicatesHelper.hasStats(exprStat)) {
         return false;
       }
 
@@ -87,8 +87,7 @@ public class ParquetIsPredicates {
     public boolean canDrop(RangeExprEvaluator evaluator) {
       Statistics exprStat = expr.accept(evaluator, null);
 
-      if (exprStat == null ||
-          exprStat.isEmpty()) {
+      if (!ParquetPredicatesHelper.hasStats(exprStat)) {
         return false;
       }
 
@@ -113,8 +112,7 @@ public class ParquetIsPredicates {
     public boolean canDrop(RangeExprEvaluator evaluator) {
       Statistics exprStat = expr.accept(evaluator, null);
 
-      if (exprStat == null ||
-          exprStat.isEmpty()) {
+      if (!ParquetPredicatesHelper.hasStats(exprStat)) {
         return false;
       }
 
@@ -140,8 +138,7 @@ public class ParquetIsPredicates {
     public boolean canDrop(RangeExprEvaluator evaluator) {
       Statistics exprStat = expr.accept(evaluator, null);
 
-      if (exprStat == null ||
-          exprStat.isEmpty()) {
+      if (!ParquetPredicatesHelper.hasStats(exprStat)) {
         return false;
       }
 
@@ -167,8 +164,7 @@ public class ParquetIsPredicates {
     public boolean canDrop(RangeExprEvaluator evaluator) {
       Statistics exprStat = expr.accept(evaluator, null);
 
-      if (exprStat == null ||
-          exprStat.isEmpty()) {
+      if (!ParquetPredicatesHelper.hasStats(exprStat)) {
         return false;
       }
 
@@ -193,8 +189,7 @@ public class ParquetIsPredicates {
     public boolean canDrop(RangeExprEvaluator evaluator) {
       Statistics exprStat = expr.accept(evaluator, null);
 
-      if (exprStat == null ||
-          exprStat.isEmpty()) {
+      if (!ParquetPredicatesHelper.hasStats(exprStat)) {
         return false;
       }
 

http://git-wip-us.apache.org/repos/asf/drill/blob/ea643bfe/exec/java-exec/src/main/java/org/apache/drill/exec/expr/stat/ParquetPredicatesHelper.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/expr/stat/ParquetPredicatesHelper.java b/exec/java-exec/src/main/java/org/apache/drill/exec/expr/stat/ParquetPredicatesHelper.java
index ac82d65..e43acd3 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/expr/stat/ParquetPredicatesHelper.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/expr/stat/ParquetPredicatesHelper.java
@@ -22,9 +22,18 @@ import org.apache.parquet.column.statistics.Statistics;
 /**
  * Parquet predicates class helper for filter pushdown.
  */
+@SuppressWarnings("rawtypes")
 public class ParquetPredicatesHelper {
 
   /**
+   * @param stat statistics object
+   * @return true if the input stat object has valid statistics; false otherwise
+   */
+  public static boolean hasStats(Statistics stat) {
+    return stat != null && !stat.isEmpty();
+  }
+
+  /**
    * Checks that column chunk's statistics has only nulls
    *
    * @param stat parquet column statistics


[4/7] drill git commit: DRILL-6254: IllegalArgumentException: the requested size must be non-negative

Posted by am...@apache.org.
DRILL-6254: IllegalArgumentException: the requested size must be non-negative

close apache/drill#1179


Project: http://git-wip-us.apache.org/repos/asf/drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/drill/commit/67710bba
Tree: http://git-wip-us.apache.org/repos/asf/drill/tree/67710bba
Diff: http://git-wip-us.apache.org/repos/asf/drill/diff/67710bba

Branch: refs/heads/master
Commit: 67710bba7cdbc05428df7390bad8639b099769fc
Parents: 7088bfe
Author: Padma Penumarthy <pp...@yahoo.com>
Authored: Wed Mar 21 13:39:43 2018 -0700
Committer: Aman Sinha <as...@maprtech.com>
Committed: Thu Mar 29 23:21:50 2018 -0700

----------------------------------------------------------------------
 .../drill/exec/physical/impl/flatten/FlattenRecordBatch.java    | 5 ++++-
 1 file changed, 4 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/drill/blob/67710bba/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/flatten/FlattenRecordBatch.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/flatten/FlattenRecordBatch.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/flatten/FlattenRecordBatch.java
index 9dd1770..7509809 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/flatten/FlattenRecordBatch.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/flatten/FlattenRecordBatch.java
@@ -237,7 +237,10 @@ public class FlattenRecordBatch extends AbstractSingleRecordBatch<FlattenPOP> {
 
   private void handleRemainder() {
     int remainingRecordCount = flattener.getFlattenField().getAccessor().getInnerValueCount() - remainderIndex;
-    if (!doAlloc(remainingRecordCount)) {
+
+    // remainingRecordCount can be much higher than number of rows we will have in outgoing batch.
+    // Do memory allocation only for number of rows we are going to have in the batch.
+    if (!doAlloc(Math.min(remainingRecordCount, flattenMemoryManager.getOutputRowCount()))) {
       outOfMemory = true;
       return;
     }