You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@kudu.apache.org by gr...@apache.org on 2019/06/10 18:29:44 UTC

[kudu] branch master updated (89f6ad4 -> 71b0940)

This is an automated email from the ASF dual-hosted git repository.

granthenke pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/kudu.git.


    from 89f6ad4  [backup] Add missing log4j test dependency
     new 4b60b52  [itbll] make error/failure easier to detect
     new 71b0940  [hms] Adjust storage handler package

The 2 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .../kudu/hive/metastore/KuduMetastorePlugin.java      | 10 +++++++---
 .../spark/tools/IntegrationTestBigLinkedList.scala    |  2 +-
 src/kudu/hms/hms_catalog.cc                           |  5 ++---
 src/kudu/hms/hms_client.cc                            | 19 ++++++++++++++++++-
 src/kudu/hms/hms_client.h                             |  5 +++++
 src/kudu/master/hms_notification_log_listener.cc      |  6 +++---
 6 files changed, 36 insertions(+), 11 deletions(-)


[kudu] 02/02: [hms] Adjust storage handler package

Posted by gr...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

granthenke pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/kudu.git

commit 71b094000cb3569d49f0fdf016ae5d17ff1f119e
Author: Grant Henke <gr...@apache.org>
AuthorDate: Thu Jun 6 11:50:19 2019 -0500

    [hms] Adjust storage handler package
    
    This patch changes the new KuduStorageHandler
    package from “org.apache.kudu.hive” to
    “org.apache.hadoop.hive.kudu”.
    
    This is being done to ensure the stand-in storage handler
    can be a real storage handler when a Hive integration
    is added in the future. The “org.apache.hadoop.hive”
    package is the standard package all Hive storage
    handlers lives under.
    
    Note: For the sake of including this into the Impala
    project without breaking things, we temporarily
    maintain support for the old storage handler. It will
    be remvoed in a follow up commit before a release.
    
    Change-Id: I66b314c1c8b56785005d6e0d8b679e19219494fe
    Reviewed-on: http://gerrit.cloudera.org:8080/13540
    Reviewed-by: Hao Hao <ha...@cloudera.com>
    Tested-by: Kudu Jenkins
---
 .../kudu/hive/metastore/KuduMetastorePlugin.java      | 10 +++++++---
 src/kudu/hms/hms_catalog.cc                           |  5 ++---
 src/kudu/hms/hms_client.cc                            | 19 ++++++++++++++++++-
 src/kudu/hms/hms_client.h                             |  5 +++++
 src/kudu/master/hms_notification_log_listener.cc      |  6 +++---
 5 files changed, 35 insertions(+), 10 deletions(-)

diff --git a/java/kudu-hive/src/main/java/org/apache/kudu/hive/metastore/KuduMetastorePlugin.java b/java/kudu-hive/src/main/java/org/apache/kudu/hive/metastore/KuduMetastorePlugin.java
index f1a1f19..d40238e 100644
--- a/java/kudu-hive/src/main/java/org/apache/kudu/hive/metastore/KuduMetastorePlugin.java
+++ b/java/kudu-hive/src/main/java/org/apache/kudu/hive/metastore/KuduMetastorePlugin.java
@@ -66,8 +66,11 @@ import org.apache.hadoop.hive.metastore.events.ListenerEvent;
  */
 public class KuduMetastorePlugin extends MetaStoreEventListener {
 
+  // TODO(ghenke): Remove this after Impala integration of the adjusted KUDU_STORAGE_HANDLER.
   @VisibleForTesting
-  static final String KUDU_STORAGE_HANDLER = "org.apache.kudu.hive.KuduStorageHandler";
+  static final String TEMP_KUDU_STORAGE_HANDLER = "org.apache.kudu.hive.KuduStorageHandler";
+  @VisibleForTesting
+  static final String KUDU_STORAGE_HANDLER = "org.apache.hadoop.hive.kudu.KuduStorageHandler";
   @VisibleForTesting
   static final String LEGACY_KUDU_STORAGE_HANDLER = "com.cloudera.kudu.hive.KuduStorageHandler";
   @VisibleForTesting
@@ -237,8 +240,9 @@ public class KuduMetastorePlugin extends MetaStoreEventListener {
    * @return {@code true} if the table is a Kudu table, otherwise {@code false}
    */
   private boolean isKuduTable(Table table) {
-    return KUDU_STORAGE_HANDLER.equals(table.getParameters()
-        .get(hive_metastoreConstants.META_TABLE_STORAGE));
+    String storageHandler = table.getParameters().get(hive_metastoreConstants.META_TABLE_STORAGE);
+    return KUDU_STORAGE_HANDLER.equals(storageHandler) ||
+        TEMP_KUDU_STORAGE_HANDLER.equals(storageHandler);
   }
 
   /**
diff --git a/src/kudu/hms/hms_catalog.cc b/src/kudu/hms/hms_catalog.cc
index 6f03f5c..6785adc 100644
--- a/src/kudu/hms/hms_catalog.cc
+++ b/src/kudu/hms/hms_catalog.cc
@@ -195,8 +195,7 @@ Status HmsCatalog::DowngradeToLegacyImpalaTable(const string& name) {
   return ha_client_.Execute([&] (HmsClient* client) {
     hive::Table table;
     RETURN_NOT_OK(client->GetTable(hms_database.ToString(), hms_table.ToString(), &table));
-    if (table.parameters[HmsClient::kStorageHandlerKey] !=
-        HmsClient::kKuduStorageHandler) {
+    if (!hms::HmsClient::IsKuduTable(table)) {
       return Status::IllegalState("non-Kudu table cannot be downgraded");
     }
     // Downgrade the storage handler.
@@ -267,7 +266,7 @@ Status HmsCatalog::AlterTable(const string& id,
       RETURN_NOT_OK(client->GetTable(hms_database.ToString(), hms_table.ToString(), &table));
 
       // Check that the HMS entry belongs to the table being altered.
-      if (table.parameters[HmsClient::kStorageHandlerKey] != HmsClient::kKuduStorageHandler ||
+      if (!hms::HmsClient::IsKuduTable(table) ||
           (check_id && table.parameters[HmsClient::kKuduTableIdKey] != id)) {
         // The original table isn't a Kudu table, or isn't the same Kudu table.
         return Status::NotFound("the HMS entry for the table being "
diff --git a/src/kudu/hms/hms_client.cc b/src/kudu/hms/hms_client.cc
index 13e2237..b4a88d7 100644
--- a/src/kudu/hms/hms_client.cc
+++ b/src/kudu/hms/hms_client.cc
@@ -19,6 +19,7 @@
 
 #include <algorithm>
 #include <exception>
+#include <map>
 #include <memory>
 #include <string>
 #include <vector>
@@ -32,6 +33,7 @@
 #include <thrift/transport/TTransport.h>
 #include <thrift/transport/TTransportException.h>
 
+#include "kudu/gutil/map-util.h"
 #include "kudu/gutil/strings/split.h"
 #include "kudu/gutil/strings/strip.h"
 #include "kudu/gutil/strings/substitute.h"
@@ -100,7 +102,9 @@ const char* const HmsClient::kKuduTableNameKey = "kudu.table_name";
 const char* const HmsClient::kKuduMasterAddrsKey = "kudu.master_addresses";
 const char* const HmsClient::kKuduMasterEventKey = "kudu.master_event";
 const char* const HmsClient::kKuduCheckIdKey = "kudu.check_id";
-const char* const HmsClient::kKuduStorageHandler = "org.apache.kudu.hive.KuduStorageHandler";
+const char* const HmsClient::kKuduStorageHandler =
+    "org.apache.hadoop.hive.kudu.KuduStorageHandler";
+const char* const HmsClient::kOldKuduStorageHandler = "org.apache.kudu.hive.KuduStorageHandler";
 
 const char* const HmsClient::kTransactionalEventListeners =
   "hive.metastore.transactional.event.listeners";
@@ -376,5 +380,18 @@ Status HmsClient::DeserializeJsonTable(Slice json, hive::Table* table)  {
   return Status::OK();
 }
 
+bool HmsClient::IsKuduTable(const hive::Table& table) {
+  const string* storage_handler =
+      FindOrNull(table.parameters, hms::HmsClient::kStorageHandlerKey);
+  if (!storage_handler) {
+    return false;
+  }
+
+  // TODO(ghenke): Remove special kOldKuduStorageHandler handling after Impala integration
+  //  of the adjusted kKuduStorageHandler.
+  return *storage_handler == hms::HmsClient::kKuduStorageHandler ||
+         *storage_handler == hms::HmsClient::kOldKuduStorageHandler;
+}
+
 } // namespace hms
 } // namespace kudu
diff --git a/src/kudu/hms/hms_client.h b/src/kudu/hms/hms_client.h
index c79f401..9d10a6c 100644
--- a/src/kudu/hms/hms_client.h
+++ b/src/kudu/hms/hms_client.h
@@ -66,6 +66,8 @@ class HmsClient {
   static const char* const kKuduCheckIdKey;
   static const char* const kStorageHandlerKey;
   static const char* const kKuduStorageHandler;
+  // TODO(ghenke): Remove this after Impala integration of the adjusted kKuduStorageHandler.
+  static const char* const kOldKuduStorageHandler;
   static const char* const kHiveFilterFieldParams;
 
   static const char* const kTransactionalEventListeners;
@@ -180,6 +182,9 @@ class HmsClient {
                        const std::string& table_name,
                        std::vector<hive::Partition>* partitions) WARN_UNUSED_RESULT;
 
+  // Returns true if the HMS table is a Kudu table.
+  static bool IsKuduTable(const hive::Table& table) WARN_UNUSED_RESULT;
+
   // Deserializes a JSON encoded table.
   //
   // Notification event log messages often include table objects serialized as
diff --git a/src/kudu/master/hms_notification_log_listener.cc b/src/kudu/master/hms_notification_log_listener.cc
index 63a5eb5..5b19815 100644
--- a/src/kudu/master/hms_notification_log_listener.cc
+++ b/src/kudu/master/hms_notification_log_listener.cc
@@ -342,7 +342,8 @@ Status HmsNotificationLogListenerTask::HandleAlterTableEvent(const hive::Notific
 
   const string* storage_handler =
       FindOrNull(before_table.parameters, hms::HmsClient::kStorageHandlerKey);
-  if (!storage_handler || *storage_handler != hms::HmsClient::kKuduStorageHandler) {
+
+  if (!hms::HmsClient::IsKuduTable(before_table)) {
     // Not a Kudu table; skip it.
     VLOG(2) << Substitute("Ignoring alter event for non-Kudu table $0",
                           before_table.tableName);
@@ -400,8 +401,7 @@ Status HmsNotificationLogListenerTask::HandleDropTableEvent(const hive::Notifica
     return Status::OK();
   }
 
-  const string* storage_handler = FindOrNull(table.parameters, hms::HmsClient::kStorageHandlerKey);
-  if (!storage_handler || *storage_handler != hms::HmsClient::kKuduStorageHandler) {
+  if (!hms::HmsClient::IsKuduTable(table)) {
     // Not a Kudu table; skip it.
     VLOG(2) << Substitute("Ignoring drop event for non-Kudu table $0", table.tableName);
     return Status::OK();


[kudu] 01/02: [itbll] make error/failure easier to detect

Posted by gr...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

granthenke pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/kudu.git

commit 4b60b5280ed2fc317b95a60f257053df27d70fa5
Author: Alexey Serbin <al...@apache.org>
AuthorDate: Mon Jun 10 09:29:51 2019 -0700

    [itbll] make error/failure easier to detect
    
    I found it extremely hard to parse a Spark job's output running ITBLL
    to find error/failure messages originated from the ITBLL code.  This
    patch changes the output from ITBLL so every failure message is simply
    prepended with 'FAILURE: ' prefix.
    
    Change-Id: Ief639a1c075df4f7e6ea9253817345a023e37636
    Reviewed-on: http://gerrit.cloudera.org:8080/13575
    Reviewed-by: Andrew Wong <aw...@cloudera.com>
    Reviewed-by: Hao Hao <ha...@cloudera.com>
    Tested-by: Kudu Jenkins
---
 .../org/apache/kudu/spark/tools/IntegrationTestBigLinkedList.scala      | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/java/kudu-spark-tools/src/main/scala/org/apache/kudu/spark/tools/IntegrationTestBigLinkedList.scala b/java/kudu-spark-tools/src/main/scala/org/apache/kudu/spark/tools/IntegrationTestBigLinkedList.scala
index b29e734..056bb19 100644
--- a/java/kudu-spark-tools/src/main/scala/org/apache/kudu/spark/tools/IntegrationTestBigLinkedList.scala
+++ b/java/kudu-spark-tools/src/main/scala/org/apache/kudu/spark/tools/IntegrationTestBigLinkedList.scala
@@ -83,7 +83,7 @@ object IntegrationTestBigLinkedList {
   }
 
   def fail(msg: String): Nothing = {
-    System.err.println(msg)
+    System.err.println(s"FAILURE: $msg")
     sys.exit(1)
   }