You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@kudu.apache.org by da...@apache.org on 2017/10/17 23:59:01 UTC

kudu git commit: KUDU-2191 (1/n): Hive Metastore Kudu plugin

Repository: kudu
Updated Branches:
  refs/heads/master 1c90b2f5c -> 57d5a3151


KUDU-2191 (1/n): Hive Metastore Kudu plugin

Adds a new plugin for the Hive Metastore whose purposes is to ensure
that Kudu table entries in the HMS are consistent. See the class
documentation for more specific information about what consistency
parameters are checked and enforced.

The plugin will live in the Kudu repository for the forseeble future
since it makes it easier to test against. We should consider upstreaming
it into Apache Hive eventually.

Change-Id: Ief0d64f71f90e04588a05098a44658fd461b9ec8
Reviewed-on: http://gerrit.cloudera.org:8080/8121
Reviewed-by: Dan Burkert <da...@apache.org>
Tested-by: Kudu Jenkins


Project: http://git-wip-us.apache.org/repos/asf/kudu/repo
Commit: http://git-wip-us.apache.org/repos/asf/kudu/commit/57d5a315
Tree: http://git-wip-us.apache.org/repos/asf/kudu/tree/57d5a315
Diff: http://git-wip-us.apache.org/repos/asf/kudu/diff/57d5a315

Branch: refs/heads/master
Commit: 57d5a31513aabd1b7f8b5e0b6d5c0aba408d0fc7
Parents: 1c90b2f
Author: Dan Burkert <da...@apache.org>
Authored: Wed Sep 20 17:27:55 2017 -0700
Committer: Dan Burkert <da...@apache.org>
Committed: Tue Oct 17 23:58:49 2017 +0000

----------------------------------------------------------------------
 java/gradle/dependencies.gradle                 |   3 +
 java/kudu-hive/build.gradle                     |  34 +++
 java/kudu-hive/pom.xml                          | 104 +++++++
 .../hive/metastore/KuduMetastorePlugin.java     | 190 +++++++++++++
 .../hive/metastore/TestKuduMetastorePlugin.java | 273 +++++++++++++++++++
 java/kudu-hive/src/test/resources/hive-site.xml |  45 +++
 .../src/test/resources/log4j.properties         |  22 ++
 java/pom.xml                                    |  14 +-
 java/settings.gradle                            |   1 +
 9 files changed, 680 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/kudu/blob/57d5a315/java/gradle/dependencies.gradle
----------------------------------------------------------------------
diff --git a/java/gradle/dependencies.gradle b/java/gradle/dependencies.gradle
index 3487c99..0a296d7 100755
--- a/java/gradle/dependencies.gradle
+++ b/java/gradle/dependencies.gradle
@@ -36,6 +36,7 @@ versions += [
     guava          : "20.0",
     hadoop         : "2.8.1",
     hamcrest       : "1.3",
+    hive           : "2.3.0",
     jepsen         : "0.1.5",
     jsr305         : "3.0.2",
     junit          : "4.12",
@@ -76,6 +77,8 @@ libs += [
     guava             : "com.google.guava:guava:$versions.guava",
     hadoopClient      : "org.apache.hadoop:hadoop-client:$versions.hadoop",
     hamcrestCore      : "org.hamcrest:hamcrest-core:$versions.hamcrest",
+    hiveMetastore     : "org.apache.hive:hive-metastore:$versions.hive",
+    hiveMetastoreTest : "org.apache.hive:hive-metastore:$versions.hive:tests",
     jepsen            : "jepsen:jepsen:$versions.jepsen",
     jsr305            : "com.google.code.findbugs:jsr305:$versions.jsr305",
     junit             : "junit:junit:$versions.junit",

http://git-wip-us.apache.org/repos/asf/kudu/blob/57d5a315/java/kudu-hive/build.gradle
----------------------------------------------------------------------
diff --git a/java/kudu-hive/build.gradle b/java/kudu-hive/build.gradle
new file mode 100644
index 0000000..6d9d03d
--- /dev/null
+++ b/java/kudu-hive/build.gradle
@@ -0,0 +1,34 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+dependencies {
+  compile libs.yetusAnnotations
+  compile libs.slf4jApi
+  compile libs.hiveMetastore
+
+  testCompile libs.hiveMetastoreTest
+  testCompile libs.junit
+  testCompile libs.log4j
+  testCompile libs.slf4jLog4j12
+}
+
+test {
+  // This property is substituted into hive-site.xml.
+  systemProperty "testdata.dir", temporaryDir
+  // Configure the derby log file location.
+  systemProperty "derby.stream.error.file", "$temporaryDir/derby.log"
+}

http://git-wip-us.apache.org/repos/asf/kudu/blob/57d5a315/java/kudu-hive/pom.xml
----------------------------------------------------------------------
diff --git a/java/kudu-hive/pom.xml b/java/kudu-hive/pom.xml
new file mode 100644
index 0000000..fa67977
--- /dev/null
+++ b/java/kudu-hive/pom.xml
@@ -0,0 +1,104 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
+    <modelVersion>4.0.0</modelVersion>
+    <parent>
+        <groupId>org.apache.kudu</groupId>
+        <artifactId>kudu-parent</artifactId>
+        <version>1.6.0-SNAPSHOT</version>
+    </parent>
+
+    <artifactId>kudu-hive</artifactId>
+    <name>Kudu Hive</name>
+
+    <dependencies>
+        <dependency>
+            <groupId>org.apache.hive</groupId>
+            <artifactId>hive-metastore</artifactId>
+            <version>${hive.version}</version>
+            <scope>provided</scope>
+        </dependency>
+
+        <dependency>
+            <groupId>org.apache.hive</groupId>
+            <artifactId>hive-metastore</artifactId>
+            <version>${hive.version}</version>
+            <classifier>tests</classifier>
+            <scope>test</scope>
+        </dependency>
+
+        <dependency>
+            <groupId>org.apache.yetus</groupId>
+            <artifactId>audience-annotations</artifactId>
+            <version>${yetus.version}</version>
+        </dependency>
+
+        <dependency>
+            <groupId>org.slf4j</groupId>
+            <artifactId>slf4j-api</artifactId>
+            <version>${slf4j.version}</version>
+        </dependency>
+
+        <!-- Transitively required through hive-metastore, which has marked it
+             as an optional dependency. -->
+        <dependency>
+            <groupId>org.apache.hadoop</groupId>
+            <artifactId>hadoop-common</artifactId>
+            <version>${hadoop.version}</version>
+            <scope>provided</scope>
+        </dependency>
+
+        <dependency>
+            <groupId>log4j</groupId>
+            <artifactId>log4j</artifactId>
+            <version>${log4j.version}</version>
+            <scope>test</scope>
+        </dependency>
+        <dependency>
+            <groupId>junit</groupId>
+            <artifactId>junit</artifactId>
+            <version>${junit.version}</version>
+            <scope>test</scope>
+        </dependency>
+        <dependency>
+            <groupId>org.slf4j</groupId>
+            <artifactId>slf4j-log4j12</artifactId>
+            <version>${slf4j.version}</version>
+            <scope>test</scope>
+        </dependency>
+    </dependencies>
+
+    <build>
+        <plugins>
+            <plugin>
+                <groupId>org.apache.maven.plugins</groupId>
+                <artifactId>maven-surefire-plugin</artifactId>
+                <configuration>
+                    <systemPropertyVariables>
+                        <!-- This property is substituted into hive-site.xml. -->
+                        <testdata.dir>${testdata.dir}</testdata.dir>
+                        <!-- Configure the derby log file location. -->
+                        <derby.stream.error.file>${testdata.dir}/derby.log</derby.stream.error.file>
+                    </systemPropertyVariables>
+                </configuration>
+            </plugin>
+        </plugins>
+    </build>
+</project>

http://git-wip-us.apache.org/repos/asf/kudu/blob/57d5a315/java/kudu-hive/src/main/java/org/apache/kudu/hive/metastore/KuduMetastorePlugin.java
----------------------------------------------------------------------
diff --git a/java/kudu-hive/src/main/java/org/apache/kudu/hive/metastore/KuduMetastorePlugin.java b/java/kudu-hive/src/main/java/org/apache/kudu/hive/metastore/KuduMetastorePlugin.java
new file mode 100644
index 0000000..4991f10
--- /dev/null
+++ b/java/kudu-hive/src/main/java/org/apache/kudu/hive/metastore/KuduMetastorePlugin.java
@@ -0,0 +1,190 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.kudu.hive.metastore;
+
+import com.google.common.annotations.VisibleForTesting;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.MetaStoreEventListener;
+import org.apache.hadoop.hive.metastore.api.EnvironmentContext;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
+import org.apache.hadoop.hive.metastore.events.AlterTableEvent;
+import org.apache.hadoop.hive.metastore.events.CreateTableEvent;
+import org.apache.hadoop.hive.metastore.events.DropTableEvent;
+
+/**
+ * The {@code KuduMetastorePlugin} intercepts DDL operations on Kudu table entries
+ * in the HMS, and validates that they are consistent. It's meant to be run as a
+ * transactional event listener, which requires it being on the classpath of the
+ * Hive Metastore, and the following configuration in {@code hive-site.xml}:
+ *
+ * <pre>
+ * {@code
+ *  <property>
+ *    <name>hive.metastore.transactional.event.listeners</name>
+ *    <value>org.apache.kudu.hive.metastore.KuduMetastorePlugin</value>
+ *  </property>
+ * }
+ * </pre>
+ *
+ * The plugin enforces that Kudu table entries in the HMS always
+ * contain two properties: a Kudu table ID and the Kudu master addresses. It also
+ * enforces that non-Kudu tables do not have these properties. The plugin
+ * considers entries to be Kudu tables if they contain the Kudu storage handler.
+ *
+ * Additionally, the plugin checks that when particular events have an
+ * environment containing a Kudu table ID, that event only applies
+ * to the specified Kudu table. This provides some amount of concurrency safety,
+ * so that the Kudu Master can ensure it is operating on the correct table entry.
+ */
+public class KuduMetastorePlugin extends MetaStoreEventListener {
+
+  @VisibleForTesting
+  static final String KUDU_STORAGE_HANDLER = "org.apache.kudu.hive.KuduStorageHandler";
+  @VisibleForTesting
+  static final String KUDU_TABLE_ID_KEY = "kudu.table_id";
+  @VisibleForTesting
+  static final String KUDU_MASTER_ADDRS_KEY = "kudu.master_addresses";
+
+  public KuduMetastorePlugin(Configuration config) {
+    super(config);
+  }
+
+  @Override
+  public void onCreateTable(CreateTableEvent tableEvent) throws MetaException {
+    super.onCreateTable(tableEvent);
+    Table table = tableEvent.getTable();
+
+    // Allow non-Kudu tables to be created.
+    if (!isKuduTable(table)) {
+      // But ensure that the new table does not contain Kudu-specific properties.
+      checkNoKuduProperties(table);
+      return;
+    }
+
+    checkKuduProperties(table);
+  }
+
+  @Override
+  public void onDropTable(DropTableEvent tableEvent) throws MetaException {
+    super.onDropTable(tableEvent);
+
+    EnvironmentContext environmentContext = tableEvent.getEnvironmentContext();
+    String targetTableId = environmentContext == null ? null :
+        environmentContext.getProperties().get(KUDU_TABLE_ID_KEY);
+
+    // If this request doesn't specify a Kudu table ID then allow it to proceed.
+    if (targetTableId == null) {
+      return;
+    }
+
+    Table table = tableEvent.getTable();
+
+    // Check that the table being dropped is a Kudu table.
+    if (!isKuduTable(table)) {
+      throw new MetaException("Kudu table ID does not match the non-Kudu HMS entry");
+    }
+
+    // Check that the table's ID matches the request's table ID.
+    if (!targetTableId.equals(table.getParameters().get(KUDU_TABLE_ID_KEY))) {
+      throw new MetaException("Kudu table ID does not match the HMS entry");
+    }
+  }
+
+  @Override
+  public void onAlterTable(AlterTableEvent tableEvent) throws MetaException {
+    super.onAlterTable(tableEvent);
+
+    Table oldTable = tableEvent.getOldTable();
+    Table newTable = tableEvent.getNewTable();
+
+    // Allow non-Kudu tables to be altered.
+    if (!isKuduTable(oldTable)) {
+      // But ensure that the alteration isn't introducing Kudu-specific properties.
+      checkNoKuduProperties(newTable);
+      return;
+    }
+
+    // Check the altered table's properties.
+    checkKuduProperties(newTable);
+
+    // Check that the table ID isn't changing.
+    String oldTableId = oldTable.getParameters().get(KUDU_TABLE_ID_KEY);
+    String newTableId = newTable.getParameters().get(KUDU_TABLE_ID_KEY);
+    if (!newTableId.equals(oldTableId)) {
+      throw new MetaException("Kudu table ID does not match the existing HMS entry");
+    }
+  }
+
+  /**
+   * Checks whether the table is a Kudu table.
+   * @param table the table to check
+   * @return {@code true} if the table is a Kudu table, otherwise {@code false}
+   */
+  private boolean isKuduTable(Table table) {
+    return KUDU_STORAGE_HANDLER.equals(table.getParameters()
+                                            .get(hive_metastoreConstants.META_TABLE_STORAGE));
+  }
+
+  /**
+   * Checks that the Kudu table entry contains the required Kudu table properties.
+   * @param table the table to check
+   */
+  private void checkKuduProperties(Table table) throws MetaException {
+    if (!isKuduTable(table)) {
+      throw new MetaException(String.format(
+          "Kudu table entry must contain a Kudu storage handler property (%s=%s)",
+          hive_metastoreConstants.META_TABLE_STORAGE,
+          KUDU_STORAGE_HANDLER));
+    }
+    String tableId = table.getParameters().get(KUDU_TABLE_ID_KEY);
+    if (tableId == null || tableId.isEmpty()) {
+      throw new MetaException(String.format(
+          "Kudu table entry must contain a table ID property (%s)", KUDU_TABLE_ID_KEY));
+    }
+    String masterAddresses = table.getParameters().get(KUDU_MASTER_ADDRS_KEY);
+    if (masterAddresses == null || masterAddresses.isEmpty()) {
+      throw new MetaException(String.format(
+          "Kudu table entry must contain a Master addresses property (%s)", KUDU_MASTER_ADDRS_KEY));
+    }
+  }
+
+  /**
+   * Checks that the non-Kudu table entry does not contain Kudu-specific table properties.
+   * @param table the table to check
+   */
+  private void checkNoKuduProperties(Table table) throws MetaException {
+    if (isKuduTable(table)) {
+      throw new MetaException(String.format(
+          "non-Kudu table entry must not contain the Kudu storage handler (%s=%s)",
+          hive_metastoreConstants.META_TABLE_STORAGE,
+          KUDU_STORAGE_HANDLER));
+    }
+    if (table.getParameters().containsKey(KUDU_TABLE_ID_KEY)) {
+      throw new MetaException(String.format(
+          "non-Kudu table entry must not contain a table ID property (%s)",
+          KUDU_TABLE_ID_KEY));
+    }
+    if (table.getParameters().containsKey(KUDU_MASTER_ADDRS_KEY)) {
+      throw new MetaException(String.format(
+          "non-Kudu table entry must not contain a Master addresses property (%s)",
+          KUDU_MASTER_ADDRS_KEY));
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/kudu/blob/57d5a315/java/kudu-hive/src/test/java/org/apache/kudu/hive/metastore/TestKuduMetastorePlugin.java
----------------------------------------------------------------------
diff --git a/java/kudu-hive/src/test/java/org/apache/kudu/hive/metastore/TestKuduMetastorePlugin.java b/java/kudu-hive/src/test/java/org/apache/kudu/hive/metastore/TestKuduMetastorePlugin.java
new file mode 100644
index 0000000..8eb6927
--- /dev/null
+++ b/java/kudu-hive/src/test/java/org/apache/kudu/hive/metastore/TestKuduMetastorePlugin.java
@@ -0,0 +1,273 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.kudu.hive.metastore;
+
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.util.UUID;
+
+import com.google.common.collect.Lists;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
+import org.apache.hadoop.hive.metastore.MetaStoreUtils;
+import org.apache.hadoop.hive.metastore.MockPartitionExpressionForMetastore;
+import org.apache.hadoop.hive.metastore.PartitionExpressionProxy;
+import org.apache.hadoop.hive.metastore.api.EnvironmentContext;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.metastore.api.SerDeInfo;
+import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
+import org.apache.thrift.TException;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+public class TestKuduMetastorePlugin {
+  private static HiveConf clientConf;
+  private HiveMetaStoreClient client;
+
+  @BeforeClass
+  public static void startMetaStoreServer() throws Exception {
+    HiveConf metastoreConf = new HiveConf();
+    // Avoids a dependency on the default partition expression class, which is
+    // contained in the hive-exec jar.
+    metastoreConf.setClass(HiveConf.ConfVars.METASTORE_EXPRESSION_PROXY_CLASS.varname,
+                           MockPartitionExpressionForMetastore.class,
+                           PartitionExpressionProxy.class);
+    int msPort = MetaStoreUtils.startMetaStore(metastoreConf);
+
+    clientConf = new HiveConf();
+    clientConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + msPort);
+  }
+
+  @Before
+  public void createClient() throws Exception {
+    client = new HiveMetaStoreClient(clientConf);
+  }
+
+  @After
+  public void closeClient() {
+    if (client != null) {
+      client.close();
+    }
+  }
+
+  /**
+   * @return a valid Kudu table descriptor.
+   */
+  private static Table newTable(String name) {
+    Table table = new Table();
+    table.setDbName("default");
+    table.setTableName(name);
+    table.setTableType("MANAGED_TABLE");
+    table.putToParameters(hive_metastoreConstants.META_TABLE_STORAGE,
+                          KuduMetastorePlugin.KUDU_STORAGE_HANDLER);
+    table.putToParameters(KuduMetastorePlugin.KUDU_TABLE_ID_KEY,
+                          UUID.randomUUID().toString());
+    table.putToParameters(KuduMetastorePlugin.KUDU_MASTER_ADDRS_KEY,
+                          "localhost");
+
+    // The HMS will NPE if the storage descriptor and partition keys aren't set...
+    StorageDescriptor sd = new StorageDescriptor();
+    sd.addToCols(new FieldSchema("a", "bigint", ""));
+    sd.setSerdeInfo(new SerDeInfo());
+    sd.setLocation(String.format("%s/%s/%s",
+                                 clientConf.get(HiveConf.ConfVars.METASTOREWAREHOUSE.varname),
+                                 table.getDbName(), table.getTableName()));
+    table.setSd(sd);
+    table.setPartitionKeys(Lists.<FieldSchema>newArrayList());
+
+    return table;
+  }
+
+  @Test
+  public void testCreateTableHandler() throws Exception {
+
+    // A non-Kudu table with a Kudu table ID should be rejected.
+    try {
+      Table table = newTable("table");
+      table.getParameters().remove(hive_metastoreConstants.META_TABLE_STORAGE);
+      table.getParameters().remove(KuduMetastorePlugin.KUDU_MASTER_ADDRS_KEY);
+      client.createTable(table);
+      fail();
+    } catch (TException e) {
+      assertTrue(e.getMessage().contains(
+          "non-Kudu table entry must not contain a table ID property"));
+    }
+
+    // A non-Kudu table with a Kudu master address should be rejected.
+    try {
+      Table table = newTable("table");
+      table.getParameters().remove(hive_metastoreConstants.META_TABLE_STORAGE);
+      table.getParameters().remove(KuduMetastorePlugin.KUDU_TABLE_ID_KEY);
+      client.createTable(table);
+      fail();
+    } catch (TException e) {
+      assertTrue(e.getMessage().contains(
+          "non-Kudu table entry must not contain a Master addresses property"));
+    }
+
+    // A Kudu table without a Kudu table ID.
+    try {
+      Table table = newTable("table");
+      table.getParameters().remove(KuduMetastorePlugin.KUDU_TABLE_ID_KEY);
+      client.createTable(table);
+      fail();
+    } catch (TException e) {
+      assertTrue(e.getMessage().contains("Kudu table entry must contain a table ID property"));
+    }
+
+    // A Kudu table without a master address
+    try {
+      Table table = newTable("table");
+      table.getParameters().remove(KuduMetastorePlugin.KUDU_MASTER_ADDRS_KEY);
+      client.createTable(table);
+      fail();
+    } catch (TException e) {
+      assertTrue(e.getMessage().contains(
+          "Kudu table entry must contain a Master addresses property"));
+    }
+
+    // Check that creating a valid table is accepted.
+    Table table = newTable("table");
+    client.createTable(table);
+    client.dropTable(table.getDbName(), table.getTableName());
+  }
+
+  @Test
+  public void testAlterTableHandler() throws Exception {
+    // Test altering a Kudu table.
+    Table table = newTable("table");
+    client.createTable(table);
+    try {
+
+      // Try to alter the Kudu table with a different table ID.
+      try {
+        client.alter_table(table.getDbName(), table.getTableName(), newTable(table.getTableName()));
+        fail();
+      } catch (TException e) {
+        assertTrue(e.getMessage().contains("Kudu table ID does not match the existing HMS entry"));
+      }
+
+      // Try to alter the Kudu table with no storage handler.
+      try {
+        Table alteredTable = table.deepCopy();
+        alteredTable.getParameters().remove(hive_metastoreConstants.META_TABLE_STORAGE);
+        client.alter_table(table.getDbName(), table.getTableName(), alteredTable);
+        fail();
+      } catch (TException e) {
+        assertTrue(e.getMessage().contains(
+            "Kudu table entry must contain a Kudu storage handler property"));
+      }
+
+      // Check that altering the table succeeds.
+      client.alter_table(table.getDbName(), table.getTableName(), table);
+    } finally {
+      client.dropTable(table.getDbName(), table.getTableName());
+    }
+
+    // Test altering a non-Kudu table.
+    table.getParameters().clear();
+    client.createTable(table);
+    try {
+
+      // Try to alter the table and add a Kudu table ID.
+      try {
+        Table alteredTable = table.deepCopy();
+        alteredTable.putToParameters(KuduMetastorePlugin.KUDU_TABLE_ID_KEY,
+                                     UUID.randomUUID().toString());
+        client.alter_table(table.getDbName(), table.getTableName(), alteredTable);
+        fail();
+      } catch (TException e) {
+        assertTrue(e.getMessage().contains(
+            "non-Kudu table entry must not contain a table ID property"));
+      }
+
+      // Try to alter the table and set a Kudu storage handler.
+      try {
+        Table alteredTable = table.deepCopy();
+        alteredTable.putToParameters(hive_metastoreConstants.META_TABLE_STORAGE,
+                                     KuduMetastorePlugin.KUDU_STORAGE_HANDLER);
+        client.alter_table(table.getDbName(), table.getTableName(), alteredTable);
+        fail();
+      } catch (TException e) {
+        assertTrue(e.getMessage().contains(
+            "non-Kudu table entry must not contain the Kudu storage handler"));
+      }
+
+      // Check that altering the table succeeds.
+      client.alter_table(table.getDbName(), table.getTableName(), table);
+    } finally {
+      client.dropTable(table.getDbName(), table.getTableName());
+    }
+  }
+
+  @Test
+  public void testDropTableHandler() throws Exception {
+    // Test dropping a Kudu table.
+    Table table = newTable("table");
+    client.createTable(table);
+    try {
+
+      // Test with an invalid table ID.
+      try {
+        EnvironmentContext envContext = new EnvironmentContext();
+        envContext.putToProperties(KuduMetastorePlugin.KUDU_TABLE_ID_KEY,
+                                   UUID.randomUUID().toString());
+        client.dropTable(table.getDbName(), table.getTableName(),
+                         /* delete data */ true,
+                         /* ignore unknown */ false,
+                         envContext);
+        fail();
+      } catch (TException e) {
+        assertTrue(e.getMessage().contains("Kudu table ID does not match the HMS entry"));
+      }
+    } finally {
+      // Dropping a Kudu table without context should succeed.
+      client.dropTable(table.getDbName(), table.getTableName());
+    }
+
+    // Test dropping a Kudu table with the correct ID.
+    client.createTable(table);
+    EnvironmentContext envContext = new EnvironmentContext();
+    envContext.putToProperties(KuduMetastorePlugin.KUDU_TABLE_ID_KEY,
+                               table.getParameters().get(KuduMetastorePlugin.KUDU_TABLE_ID_KEY));
+    client.dropTable(table.getDbName(), table.getTableName(),
+                     /* delete data */ true,
+                     /* ignore unknown */ false,
+                     envContext);
+
+    // Test dropping a non-Kudu table.
+    table.getParameters().clear();
+    client.createTable(table);
+    try {
+      client.dropTable(table.getDbName(), table.getTableName(),
+                       /* delete data */ true,
+                       /* ignore unknown */ false,
+                       envContext);
+      fail();
+    } catch (TException e) {
+      assertTrue(e.getMessage().contains("Kudu table ID does not match the non-Kudu HMS entry"));
+    } finally {
+      client.dropTable(table.getDbName(), table.getTableName());
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/kudu/blob/57d5a315/java/kudu-hive/src/test/resources/hive-site.xml
----------------------------------------------------------------------
diff --git a/java/kudu-hive/src/test/resources/hive-site.xml b/java/kudu-hive/src/test/resources/hive-site.xml
new file mode 100644
index 0000000..ff09ccd
--- /dev/null
+++ b/java/kudu-hive/src/test/resources/hive-site.xml
@@ -0,0 +1,45 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<configuration>
+  <property>
+    <name>hive.metastore.transactional.event.listeners</name>
+    <value>org.apache.kudu.hive.metastore.KuduMetastorePlugin</value>
+  </property>
+
+  <property>
+    <name>datanucleus.schema.autoCreateAll</name>
+    <value>true</value>
+  </property>
+
+  <property>
+    <name>hive.metastore.schema.verification</name>
+    <value>false</value>
+  </property>
+
+  <property>
+    <name>hive.metastore.warehouse.dir</name>
+    <value>file://${testdata.dir}/warehouse</value>
+  </property>
+
+  <property>
+    <name>javax.jdo.option.ConnectionURL</name>
+    <value>jdbc:derby:memory:${testdata.dir}/metadb;create=true</value>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/kudu/blob/57d5a315/java/kudu-hive/src/test/resources/log4j.properties
----------------------------------------------------------------------
diff --git a/java/kudu-hive/src/test/resources/log4j.properties b/java/kudu-hive/src/test/resources/log4j.properties
new file mode 100644
index 0000000..79573b8
--- /dev/null
+++ b/java/kudu-hive/src/test/resources/log4j.properties
@@ -0,0 +1,22 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+log4j.rootLogger = DEBUG, out
+
+log4j.appender.out = org.apache.log4j.ConsoleAppender
+log4j.appender.out.layout = org.apache.log4j.PatternLayout
+log4j.appender.out.layout.ConversionPattern = %d{HH:mm:ss.SSS} [%p - %t] (%F:%L) %m%n

http://git-wip-us.apache.org/repos/asf/kudu/blob/57d5a315/java/pom.xml
----------------------------------------------------------------------
diff --git a/java/pom.xml b/java/pom.xml
index e341456..d3df466 100644
--- a/java/pom.xml
+++ b/java/pom.xml
@@ -65,16 +65,17 @@
         <scala-maven-plugin.version>3.2.2</scala-maven-plugin.version>
 
         <!-- Library dependencies -->
-        <jepsen.version>0.1.5</jepsen.version>
         <async.version>1.4.1</async.version>
         <avro.version>1.8.2</avro.version>
-        <clojure.version>1.8.0</clojure.version>
         <clojure.tools.version>0.3.5</clojure.tools.version>
+        <clojure.version>1.8.0</clojure.version>
         <commons-io.version>2.5</commons-io.version>
         <flume.version>1.7.0</flume.version>
         <guava.version>20.0</guava.version>
         <hadoop.version>2.8.1</hadoop.version>
         <hamcrest-core.version>1.3</hamcrest-core.version>
+        <hive.version>2.3.0</hive.version>
+        <jepsen.version>0.1.5</jepsen.version>
         <jsr305.version>3.0.2</jsr305.version>
         <junit.version>4.12</junit.version>
         <log4j.version>1.2.17</log4j.version>
@@ -95,7 +96,7 @@
         <spark.version.label>spark2</spark.version.label>
 
         <!-- Misc variables -->
-        <testdata.dir>target/testdata</testdata.dir>
+        <testdata.dir>${project.build.directory}/testdata</testdata.dir>
         <testArgLine>-enableassertions -Xmx1900m
             -Djava.security.egd=file:/dev/urandom -Djava.net.preferIPv4Stack=true
             -Djava.awt.headless=true
@@ -103,12 +104,13 @@
     </properties>
 
     <modules>
-        <module>kudu-client</module>
         <module>kudu-client-tools</module>
-        <module>kudu-mapreduce</module>
+        <module>kudu-client</module>
         <module>kudu-flume-sink</module>
-        <module>kudu-spark</module>
+        <module>kudu-hive</module>
+        <module>kudu-mapreduce</module>
         <module>kudu-spark-tools</module>
+        <module>kudu-spark</module>
     </modules>
 
     <build>

http://git-wip-us.apache.org/repos/asf/kudu/blob/57d5a315/java/settings.gradle
----------------------------------------------------------------------
diff --git a/java/settings.gradle b/java/settings.gradle
index 430e884..74367ff 100644
--- a/java/settings.gradle
+++ b/java/settings.gradle
@@ -22,6 +22,7 @@ rootProject.name = "kudu-parent"
 include "kudu-client"
 include "kudu-client-tools"
 include "kudu-flume-sink"
+include "kudu-hive"
 include "kudu-jepsen"
 include "kudu-mapreduce"
 include "kudu-spark"