You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by an...@apache.org on 2020/08/26 08:30:59 UTC

[hive] branch master updated: HIVE-24067:TestReplicationScenariosExclusiveReplica - Wrong FS error during DB drop (Pravin Kumar Sinha, reviewed by Aasha Medhi)

This is an automated email from the ASF dual-hosted git repository.

anishek pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git


The following commit(s) were added to refs/heads/master by this push:
     new 18893d7  HIVE-24067:TestReplicationScenariosExclusiveReplica - Wrong FS error during DB drop (Pravin Kumar Sinha, reviewed by Aasha Medhi)
18893d7 is described below

commit 18893d7e0bbde4e596c00e48560ddc15da532076
Author: Anishek Agarwal <an...@gmail.com>
AuthorDate: Wed Aug 26 14:00:48 2020 +0530

    HIVE-24067:TestReplicationScenariosExclusiveReplica - Wrong FS error during DB drop (Pravin Kumar Sinha, reviewed by Aasha Medhi)
---
 .../ql/parse/BaseReplicationAcrossInstances.java   | 34 +++++++-------
 .../TestReplicationScenariosExclusiveReplica.java  | 54 ++++++++++++++++++++++
 2 files changed, 71 insertions(+), 17 deletions(-)

diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/BaseReplicationAcrossInstances.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/BaseReplicationAcrossInstances.java
index 5fb44dd..476a940 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/BaseReplicationAcrossInstances.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/BaseReplicationAcrossInstances.java
@@ -63,7 +63,8 @@ public class BaseReplicationAcrossInstances {
       put(HiveConf.ConfVars.HIVE_IN_TEST_REPL.varname, "true");
     }};
     localOverrides.putAll(overrides);
-    setReplicaExternalBase(miniDFSCluster.getFileSystem(), localOverrides);
+    setFullyQualifiedReplicaExternalTableBase(miniDFSCluster.getFileSystem());
+    localOverrides.put(HiveConf.ConfVars.REPL_EXTERNAL_TABLE_BASE_DIR.varname, fullyQualifiedReplicaExternalBase);
     primary = new WarehouseInstance(LOG, miniDFSCluster, localOverrides);
     localOverrides.put(MetastoreConf.ConfVars.REPLDIR.getHiveName(), primary.repldDir);
     replica = new WarehouseInstance(LOG, miniDFSCluster, localOverrides);
@@ -73,9 +74,7 @@ public class BaseReplicationAcrossInstances {
   static void internalBeforeClassSetupExclusiveReplica(Map<String, String> primaryOverrides,
                                                        Map<String, String> replicaOverrides, Class clazz)
           throws Exception {
-    /**
-     * Setup replica cluster.
-     */
+    // Setup replica HDFS.
     String replicaBaseDir = Files.createTempDirectory("replica").toFile().getAbsolutePath();
     replicaConf = new HiveConf(clazz);
     replicaConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, replicaBaseDir);
@@ -84,28 +83,30 @@ public class BaseReplicationAcrossInstances {
     MiniDFSCluster miniReplicaDFSCluster =
             new MiniDFSCluster.Builder(replicaConf).numDataNodes(1).format(true).build();
 
-    Map<String, String> localOverrides = new HashMap<>();
-    localOverrides.put("fs.defaultFS", miniReplicaDFSCluster.getFileSystem().getUri().toString());
-    localOverrides.put(HiveConf.ConfVars.HIVE_IN_TEST_REPL.varname, "true");
-    localOverrides.putAll(replicaOverrides);
-    setReplicaExternalBase(miniReplicaDFSCluster.getFileSystem(), localOverrides);
-    replica = new WarehouseInstance(LOG, miniReplicaDFSCluster, localOverrides);
-
-    /**
-     * Setup primary cluster.
-     */
+    // Setup primary HDFS.
     String primaryBaseDir = Files.createTempDirectory("base").toFile().getAbsolutePath();
     conf = new HiveConf(clazz);
     conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, primaryBaseDir);
     conf.set("dfs.client.use.datanode.hostname", "true");
     conf.set("hadoop.proxyuser." + Utils.getUGI().getShortUserName() + ".hosts", "*");
     MiniDFSCluster miniPrimaryDFSCluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).format(true).build();
-    localOverrides.clear();
+
+    // Setup primary warehouse.
+    setFullyQualifiedReplicaExternalTableBase(miniReplicaDFSCluster.getFileSystem());
+    Map<String, String> localOverrides = new HashMap<>();
     localOverrides.put(HiveConf.ConfVars.HIVE_IN_TEST_REPL.varname, "true");
     localOverrides.put(HiveConf.ConfVars.REPL_EXTERNAL_TABLE_BASE_DIR.varname, fullyQualifiedReplicaExternalBase);
     localOverrides.put("fs.defaultFS", miniPrimaryDFSCluster.getFileSystem().getUri().toString());
     localOverrides.putAll(primaryOverrides);
     primary = new WarehouseInstance(LOG, miniPrimaryDFSCluster, localOverrides);
+
+    // Setup replica warehouse.
+    localOverrides.clear();
+    localOverrides.put(HiveConf.ConfVars.REPL_EXTERNAL_TABLE_BASE_DIR.varname, fullyQualifiedReplicaExternalBase);
+    localOverrides.put("fs.defaultFS", miniReplicaDFSCluster.getFileSystem().getUri().toString());
+    localOverrides.put(HiveConf.ConfVars.HIVE_IN_TEST_REPL.varname, "true");
+    localOverrides.putAll(replicaOverrides);
+    replica = new WarehouseInstance(LOG, miniReplicaDFSCluster, localOverrides);
   }
 
   @AfterClass
@@ -114,10 +115,9 @@ public class BaseReplicationAcrossInstances {
     replica.close();
   }
 
-  private static void setReplicaExternalBase(FileSystem fs, Map<String, String> confMap) throws IOException {
+  private static void setFullyQualifiedReplicaExternalTableBase(FileSystem fs) throws IOException {
     fs.mkdirs(REPLICA_EXTERNAL_BASE);
     fullyQualifiedReplicaExternalBase =  fs.getFileStatus(REPLICA_EXTERNAL_BASE).getPath().toString();
-    confMap.put(HiveConf.ConfVars.REPL_EXTERNAL_TABLE_BASE_DIR.varname, fullyQualifiedReplicaExternalBase);
   }
 
   @Before
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosExclusiveReplica.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosExclusiveReplica.java
index 7e6b5e2..549447e 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosExclusiveReplica.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosExclusiveReplica.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hive.ql.parse;
 
+import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
@@ -24,6 +25,7 @@ import org.apache.hadoop.hive.metastore.messaging.json.gzip.GzipJSONMessageEncod
 import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.junit.After;
+import org.junit.Assert;
 import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
@@ -175,6 +177,58 @@ public class TestReplicationScenariosExclusiveReplica extends BaseReplicationAcr
             .verifyResult("800");
   }
 
+  @Test
+  public void externalTableReplicationDropDatabase() throws Throwable {
+    String primaryDb = "primarydb1";
+    String replicaDb = "repldb1";
+    String tableName = "t1";
+    List<String> withClauseOptions = getStagingLocationConfig(primary.repldDir);
+    WarehouseInstance.Tuple tuple = primary
+            .run("create database " + primaryDb)
+            .run("alter database "+ primaryDb + " set dbproperties('repl.source.for'='1,2,3')")
+            .run("use " + primaryDb)
+            .run("create external table " +  tableName + " (id int)")
+            .run("insert into table " + tableName + " values (500)")
+            .dump(primaryDb, withClauseOptions);
+
+    replica.load(replicaDb, primaryDb, withClauseOptions)
+            .run("use " + replicaDb)
+            .run("show tables like '" + tableName + "'")
+            .verifyResult(tableName)
+            .run("select id from " + tableName)
+            .verifyResult("500");
+
+    Path dbDataLocPrimary = new Path(primary.externalTableWarehouseRoot, primaryDb + ".db");
+    Path extTableBase = new Path(replica.getConf().get(HiveConf.ConfVars.REPL_EXTERNAL_TABLE_BASE_DIR.varname));
+    Path dbDataLocReplica = new Path(extTableBase + dbDataLocPrimary.toUri().getPath());
+    verifyTableDataExists(primary, dbDataLocPrimary, tableName, true);
+    verifyTableDataExists(replica, dbDataLocReplica, tableName, true);
+
+    primary.run("show databases like '" + primaryDb + "'")
+            .verifyResult(primaryDb);
+    replica.run("show databases like '" + replicaDb + "'")
+            .verifyResult(replicaDb);
+    primary.run("drop database " + primaryDb + " cascade");
+    replica.run("drop database " + replicaDb + " cascade");
+    primary.run("show databases like '" + primaryDb + "'")
+            .verifyResult(null);
+    replica.run("show databases like '" + replicaDb + "'")
+            .verifyResult(null);
+
+    verifyTableDataExists(primary, dbDataLocPrimary, tableName, false);
+    verifyTableDataExists(replica, dbDataLocReplica, tableName, true);
+  }
+
+  private void verifyTableDataExists(WarehouseInstance warehouse, Path dbDataPath, String tableName,
+                                     boolean shouldExists) throws IOException {
+    FileSystem fileSystem = FileSystem.get(warehouse.warehouseRoot.toUri(), warehouse.getConf());
+    Path tablePath = new Path(dbDataPath, tableName);
+    Path dataFilePath = new Path(tablePath, "000000_0");
+    Assert.assertEquals(shouldExists, fileSystem.exists(dbDataPath));
+    Assert.assertEquals(shouldExists, fileSystem.exists(tablePath));
+    Assert.assertEquals(shouldExists, fileSystem.exists(dataFilePath));
+  }
+
   private List<String> getStagingLocationConfig(String stagingLoc) {
     List<String> confList = new ArrayList<>();
     confList.add("'" + HiveConf.ConfVars.REPLDIR.varname + "'='" + stagingLoc + "'");