You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by se...@apache.org on 2018/07/02 02:08:03 UTC
[15/20] hive git commit: HIVE-19970: Replication dump has a NPE when
table is empty (Mahesh Kumar Behera, reviewed by Peter Vary, Sankar Hariappan)
HIVE-19970: Replication dump has a NPE when table is empty (Mahesh Kumar Behera, reviewed by Peter Vary, Sankar Hariappan)
Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/1c33fea8
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/1c33fea8
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/1c33fea8
Branch: refs/heads/master-txnstats
Commit: 1c33fea890bc01a85eb336caf5d73a85652f91a3
Parents: 80c3bb5
Author: Sankar Hariappan <sa...@apache.org>
Authored: Sun Jul 1 09:48:09 2018 +0530
Committer: Sankar Hariappan <sa...@apache.org>
Committed: Sun Jul 1 10:01:32 2018 +0530
----------------------------------------------------------------------
.../hive/ql/parse/TestReplicationScenarios.java | 42 ++++++++++++++++
.../TestReplicationScenariosAcidTables.java | 52 +++++++++++++++++++-
...TestReplicationScenariosAcrossInstances.java | 42 +++-------------
.../hadoop/hive/ql/parse/WarehouseInstance.java | 7 ++-
.../org/apache/hadoop/hive/ql/ErrorMsg.java | 3 +-
.../hadoop/hive/ql/exec/repl/ReplDumpTask.java | 4 ++
.../exec/repl/bootstrap/load/LoadDatabase.java | 2 +-
.../hive/ql/exec/repl/util/ReplUtils.java | 6 +--
.../ql/parse/repl/dump/PartitionExport.java | 24 ++++++---
.../hadoop/hive/ql/parse/repl/dump/Utils.java | 8 ++-
.../ql/parse/repl/dump/io/FileOperations.java | 7 +++
.../hadoop/hive/metastore/HiveAlterHandler.java | 7 +--
12 files changed, 149 insertions(+), 55 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/1c33fea8/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java
index 689c859..46c623d 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java
@@ -91,6 +91,7 @@ import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.apache.hadoop.hive.metastore.ReplChangeManager.SOURCE_OF_REPLICATION;
+import org.junit.Assert;
public class TestReplicationScenarios {
@@ -3186,6 +3187,47 @@ public class TestReplicationScenarios {
}
@Test
+ public void testDumpWithTableDirMissing() throws IOException {
+ String dbName = createDB(testName.getMethodName(), driver);
+ run("CREATE TABLE " + dbName + ".normal(a int)", driver);
+ run("INSERT INTO " + dbName + ".normal values (1)", driver);
+
+ Path path = new Path(System.getProperty("test.warehouse.dir", ""));
+ path = new Path(path, dbName.toLowerCase() + ".db");
+ path = new Path(path, "normal");
+ FileSystem fs = path.getFileSystem(hconf);
+ fs.delete(path);
+
+ advanceDumpDir();
+ CommandProcessorResponse ret = driver.run("REPL DUMP " + dbName);
+ Assert.assertEquals(ret.getResponseCode(), ErrorMsg.FILE_NOT_FOUND.getErrorCode());
+
+ run("DROP TABLE " + dbName + ".normal", driver);
+ run("drop database " + dbName, true, driver);
+ }
+
+ @Test
+ public void testDumpWithPartitionDirMissing() throws IOException {
+ String dbName = createDB(testName.getMethodName(), driver);
+ run("CREATE TABLE " + dbName + ".normal(a int) PARTITIONED BY (part int)", driver);
+ run("INSERT INTO " + dbName + ".normal partition (part= 124) values (1)", driver);
+
+ Path path = new Path(System.getProperty("test.warehouse.dir",""));
+ path = new Path(path, dbName.toLowerCase()+".db");
+ path = new Path(path, "normal");
+ path = new Path(path, "part=124");
+ FileSystem fs = path.getFileSystem(hconf);
+ fs.delete(path);
+
+ advanceDumpDir();
+ CommandProcessorResponse ret = driver.run("REPL DUMP " + dbName);
+ Assert.assertEquals(ret.getResponseCode(), ErrorMsg.FILE_NOT_FOUND.getErrorCode());
+
+ run("DROP TABLE " + dbName + ".normal", driver);
+ run("drop database " + dbName, true, driver);
+ }
+
+ @Test
public void testDumpNonReplDatabase() throws IOException {
String dbName = createDBNonRepl(testName.getMethodName(), driver);
verifyFail("REPL DUMP " + dbName, driver);
http://git-wip-us.apache.org/repos/asf/hive/blob/1c33fea8/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcidTables.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcidTables.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcidTables.java
index 4892486..86c0405 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcidTables.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcidTables.java
@@ -32,6 +32,10 @@ import org.apache.hadoop.hive.metastore.InjectableBehaviourObjectStore;
import org.apache.hadoop.hive.metastore.InjectableBehaviourObjectStore.CallerArguments;
import org.apache.hadoop.hive.metastore.InjectableBehaviourObjectStore.BehaviourInjection;
import static org.apache.hadoop.hive.metastore.ReplChangeManager.SOURCE_OF_REPLICATION;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.ql.ErrorMsg;
+import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse;
import org.junit.rules.TestName;
import org.junit.rules.TestRule;
import org.slf4j.Logger;
@@ -63,10 +67,11 @@ public class TestReplicationScenariosAcidTables {
protected static final Logger LOG = LoggerFactory.getLogger(TestReplicationScenarios.class);
private static WarehouseInstance primary, replica, replicaNonAcid;
private String primaryDbName, replicatedDbName;
+ private static HiveConf conf;
@BeforeClass
public static void classLevelSetup() throws Exception {
- Configuration conf = new Configuration();
+ conf = new HiveConf(TestReplicationScenariosAcidTables.class);
conf.set("dfs.client.use.datanode.hostname", "true");
conf.set("hadoop.proxyuser." + Utils.getUGI().getShortUserName() + ".hosts", "*");
MiniDFSCluster miniDFSCluster =
@@ -432,4 +437,49 @@ public class TestReplicationScenariosAcidTables {
.run("select name from t2 order by name")
.verifyResults(Arrays.asList("bob", "carl"));
}
+
+ @Test
+ public void testDumpAcidTableWithPartitionDirMissing() throws Throwable {
+ String dbName = testName.getMethodName();
+ primary.run("CREATE DATABASE " + dbName + " WITH DBPROPERTIES ( '" +
+ SOURCE_OF_REPLICATION + "' = '1,2,3')")
+ .run("CREATE TABLE " + dbName + ".normal (a int) PARTITIONED BY (part int)" +
+ " STORED AS ORC TBLPROPERTIES ('transactional'='true')")
+ .run("INSERT INTO " + dbName + ".normal partition (part= 124) values (1)");
+
+ Path path = new Path(primary.warehouseRoot, dbName.toLowerCase()+".db");
+ path = new Path(path, "normal");
+ path = new Path(path, "part=124");
+ FileSystem fs = path.getFileSystem(conf);
+ fs.delete(path);
+
+ CommandProcessorResponse ret = primary.runCommand("REPL DUMP " + dbName +
+ " with ('hive.repl.dump.include.acid.tables' = 'true')");
+ Assert.assertEquals(ret.getResponseCode(), ErrorMsg.FILE_NOT_FOUND.getErrorCode());
+
+ primary.run("DROP TABLE " + dbName + ".normal");
+ primary.run("drop database " + dbName);
+ }
+
+ @Test
+ public void testDumpAcidTableWithTableDirMissing() throws Throwable {
+ String dbName = testName.getMethodName();
+ primary.run("CREATE DATABASE " + dbName + " WITH DBPROPERTIES ( '" +
+ SOURCE_OF_REPLICATION + "' = '1,2,3')")
+ .run("CREATE TABLE " + dbName + ".normal (a int) " +
+ " STORED AS ORC TBLPROPERTIES ('transactional'='true')")
+ .run("INSERT INTO " + dbName + ".normal values (1)");
+
+ Path path = new Path(primary.warehouseRoot, dbName.toLowerCase()+".db");
+ path = new Path(path, "normal");
+ FileSystem fs = path.getFileSystem(conf);
+ fs.delete(path);
+
+ CommandProcessorResponse ret = primary.runCommand("REPL DUMP " + dbName +
+ " with ('hive.repl.dump.include.acid.tables' = 'true')");
+ Assert.assertEquals(ret.getResponseCode(), ErrorMsg.FILE_NOT_FOUND.getErrorCode());
+
+ primary.run("DROP TABLE " + dbName + ".normal");
+ primary.run("drop database " + dbName);
+ }
}
http://git-wip-us.apache.org/repos/asf/hive/blob/1c33fea8/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcrossInstances.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcrossInstances.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcrossInstances.java
index d0608cf..08f0130 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcrossInstances.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcrossInstances.java
@@ -68,6 +68,9 @@ import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.apache.hadoop.hive.metastore.ReplChangeManager.SOURCE_OF_REPLICATION;
+import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse;
+import org.apache.hadoop.hive.ql.ErrorMsg;
+import org.junit.Assert;
public class TestReplicationScenariosAcrossInstances {
@Rule
@@ -879,41 +882,6 @@ public class TestReplicationScenariosAcrossInstances {
}
@Test
- public void testIfCkptSetForObjectsByBootstrapReplLoad() throws Throwable {
- WarehouseInstance.Tuple tuple = primary
- .run("use " + primaryDbName)
- .run("create table t1 (id int)")
- .run("insert into table t1 values (10)")
- .run("create table t2 (place string) partitioned by (country string)")
- .run("insert into table t2 partition(country='india') values ('bangalore')")
- .run("insert into table t2 partition(country='uk') values ('london')")
- .run("insert into table t2 partition(country='us') values ('sfo')")
- .dump(primaryDbName, null);
-
- replica.load(replicatedDbName, tuple.dumpLocation)
- .run("use " + replicatedDbName)
- .run("repl status " + replicatedDbName)
- .verifyResult(tuple.lastReplicationId)
- .run("show tables")
- .verifyResults(new String[] { "t1", "t2" })
- .run("select country from t2")
- .verifyResults(Arrays.asList("india", "uk", "us"));
-
- Database db = replica.getDatabase(replicatedDbName);
- verifyIfCkptSet(db.getParameters(), tuple.dumpLocation);
- Table t1 = replica.getTable(replicatedDbName, "t1");
- verifyIfCkptSet(t1.getParameters(), tuple.dumpLocation);
- Table t2 = replica.getTable(replicatedDbName, "t2");
- verifyIfCkptSet(t2.getParameters(), tuple.dumpLocation);
- Partition india = replica.getPartition(replicatedDbName, "t2", Collections.singletonList("india"));
- verifyIfCkptSet(india.getParameters(), tuple.dumpLocation);
- Partition us = replica.getPartition(replicatedDbName, "t2", Collections.singletonList("us"));
- verifyIfCkptSet(us.getParameters(), tuple.dumpLocation);
- Partition uk = replica.getPartition(replicatedDbName, "t2", Collections.singletonList("uk"));
- verifyIfCkptSet(uk.getParameters(), tuple.dumpLocation);
- }
-
- @Test
public void testIncrementalDumpMultiIteration() throws Throwable {
WarehouseInstance.Tuple bootstrapTuple = primary.dump(primaryDbName, null);
@@ -1182,7 +1150,9 @@ public class TestReplicationScenariosAcrossInstances {
assertEquals(0, replica.getForeignKeyList(replicatedDbName, "t2").size());
// Retry with different dump should fail.
- replica.loadFailure(replicatedDbName, tuple2.dumpLocation);
+ CommandProcessorResponse ret = replica.runCommand("REPL LOAD " + replicatedDbName +
+ " FROM '" + tuple2.dumpLocation + "'");
+ Assert.assertEquals(ret.getResponseCode(), ErrorMsg.REPL_BOOTSTRAP_LOAD_PATH_NOT_VALID.getErrorCode());
// Verify if create table is not called on table t1 but called for t2 and t3.
// Also, allow constraint creation only on t1 and t3. Foreign key creation on t2 fails.
http://git-wip-us.apache.org/repos/asf/hive/blob/1c33fea8/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/WarehouseInstance.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/WarehouseInstance.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/WarehouseInstance.java
index fc812ad..f666df1 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/WarehouseInstance.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/WarehouseInstance.java
@@ -77,6 +77,7 @@ public class WarehouseInstance implements Closeable {
HiveConf hiveConf;
MiniDFSCluster miniDFSCluster;
private HiveMetaStoreClient client;
+ public final Path warehouseRoot;
private static int uniqueIdentifier = 0;
@@ -90,7 +91,7 @@ public class WarehouseInstance implements Closeable {
assert miniDFSCluster.isDataNodeUp();
DistributedFileSystem fs = miniDFSCluster.getFileSystem();
- Path warehouseRoot = mkDir(fs, "/warehouse" + uniqueIdentifier);
+ warehouseRoot = mkDir(fs, "/warehouse" + uniqueIdentifier);
if (StringUtils.isNotEmpty(keyNameForEncryptedZone)) {
fs.createEncryptionZone(warehouseRoot, keyNameForEncryptedZone);
}
@@ -199,6 +200,10 @@ public class WarehouseInstance implements Closeable {
return this;
}
+ public CommandProcessorResponse runCommand(String command) throws Throwable {
+ return driver.run(command);
+ }
+
WarehouseInstance runFailure(String command) throws Throwable {
CommandProcessorResponse ret = driver.run(command);
if (ret.getException() == null) {
http://git-wip-us.apache.org/repos/asf/hive/blob/1c33fea8/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java b/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
index 90d6b8f..b2c9daa 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
@@ -502,7 +502,8 @@ public enum ErrorMsg {
//if the error message is changed for REPL_EVENTS_MISSING_IN_METASTORE, then need modification in getNextNotification
//method in HiveMetaStoreClient
REPL_EVENTS_MISSING_IN_METASTORE(20016, "Notification events are missing in the meta store."),
- REPL_BOOTSTRAP_LOAD_PATH_NOT_VALID(20017, "Target database is bootstrapped from some other path."),
+ REPL_BOOTSTRAP_LOAD_PATH_NOT_VALID(20017, "Load path {0} not valid as target database is bootstrapped " +
+ "from some other path : {1}."),
REPL_FILE_MISSING_FROM_SRC_AND_CM_PATH(20018, "File is missing from both source and cm path."),
REPL_LOAD_PATH_NOT_FOUND(20019, "Load path does not exist."),
REPL_DATABASE_IS_NOT_SOURCE_OF_REPLICATION(20020,
http://git-wip-us.apache.org/repos/asf/hive/blob/1c33fea8/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpTask.java
index 7e5f805..e48657c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpTask.java
@@ -121,6 +121,10 @@ public class ReplDumpTask extends Task<ReplDumpWork> implements Serializable {
lastReplId = incrementalDump(dumpRoot, dmd, cmRoot);
}
prepareReturnValues(Arrays.asList(dumpRoot.toUri().toString(), String.valueOf(lastReplId)), dumpSchema);
+ } catch (RuntimeException e) {
+ LOG.error("failed", e);
+ setException(e);
+ return ErrorMsg.getErrorMsg(e.getMessage()).getErrorCode();
} catch (Exception e) {
LOG.error("failed", e);
setException(e);
http://git-wip-us.apache.org/repos/asf/hive/blob/1c33fea8/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/LoadDatabase.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/LoadDatabase.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/LoadDatabase.java
index 054153c..0fd305a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/LoadDatabase.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/LoadDatabase.java
@@ -79,7 +79,7 @@ public class LoadDatabase {
}
return tracker;
} catch (Exception e) {
- throw new SemanticException(e);
+ throw new SemanticException(e.getMessage(), e);
}
}
http://git-wip-us.apache.org/repos/asf/hive/blob/1c33fea8/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/util/ReplUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/util/ReplUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/util/ReplUtils.java
index 618be1d..b1f731f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/util/ReplUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/util/ReplUtils.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.hive.ql.exec.repl.util;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
+import org.apache.hadoop.hive.ql.ErrorMsg;
import org.apache.hadoop.hive.ql.exec.Task;
import org.apache.hadoop.hive.ql.exec.TaskFactory;
import org.apache.hadoop.hive.ql.exec.repl.ReplStateLogWork;
@@ -115,9 +116,8 @@ public class ReplUtils {
if (props.get(REPL_CHECKPOINT_KEY).equals(dumpRoot)) {
return true;
}
- throw new InvalidOperationException("REPL LOAD with Dump: " + dumpRoot
- + " is not allowed as the target DB: " + dbName
- + " is already bootstrap loaded by another Dump " + props.get(REPL_CHECKPOINT_KEY));
+ throw new InvalidOperationException(ErrorMsg.REPL_BOOTSTRAP_LOAD_PATH_NOT_VALID.format(dumpRoot,
+ props.get(REPL_CHECKPOINT_KEY)));
}
return false;
}
http://git-wip-us.apache.org/repos/asf/hive/blob/1c33fea8/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/PartitionExport.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/PartitionExport.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/PartitionExport.java
index d73fc4f..9e24799 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/PartitionExport.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/PartitionExport.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hive.ql.parse.repl.dump;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.ql.metadata.Partition;
import org.apache.hadoop.hive.ql.metadata.PartitionIterable;
import org.apache.hadoop.hive.ql.parse.ReplicationSpec;
@@ -29,6 +30,7 @@ import org.apache.hadoop.hive.ql.session.SessionState;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import java.util.LinkedList;
import java.util.List;
import java.util.concurrent.ArrayBlockingQueue;
import java.util.concurrent.BlockingQueue;
@@ -36,6 +38,7 @@ import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.TimeUnit;
+import java.util.concurrent.Future;
import static org.apache.hadoop.hive.ql.parse.repl.dump.TableExport.Paths;
@@ -70,10 +73,11 @@ class PartitionExport {
this.callersSession = SessionState.get();
}
- void write(final ReplicationSpec forReplicationSpec) throws InterruptedException {
+ void write(final ReplicationSpec forReplicationSpec) throws InterruptedException, HiveException {
+ List<Future<?>> futures = new LinkedList<>();
ExecutorService producer = Executors.newFixedThreadPool(1,
new ThreadFactoryBuilder().setNameFormat("partition-submitter-thread-%d").build());
- producer.submit(() -> {
+ futures.add(producer.submit(() -> {
SessionState.setCurrentSessionState(callersSession);
for (Partition partition : partitionIterable) {
try {
@@ -83,7 +87,7 @@ class PartitionExport {
"Error while queuing up the partitions for export of data files", e);
}
}
- });
+ }));
producer.shutdown();
ThreadFactory namingThreadFactory =
@@ -102,7 +106,7 @@ class PartitionExport {
continue;
}
LOG.debug("scheduling partition dump {}", partition.getName());
- consumer.submit(() -> {
+ futures.add(consumer.submit(() -> {
String partitionName = partition.getName();
String threadName = Thread.currentThread().getName();
LOG.debug("Thread: {}, start partition dump {}", threadName, partitionName);
@@ -115,11 +119,19 @@ class PartitionExport {
.export(forReplicationSpec);
LOG.debug("Thread: {}, finish partition dump {}", threadName, partitionName);
} catch (Exception e) {
- throw new RuntimeException("Error while export of data files", e);
+ throw new RuntimeException(e.getMessage(), e);
}
- });
+ }));
}
consumer.shutdown();
+ for (Future<?> future : futures) {
+ try {
+ future.get();
+ } catch (Exception e) {
+ LOG.error("failed", e.getCause());
+ throw new HiveException(e.getCause().getMessage(), e.getCause());
+ }
+ }
// may be drive this via configuration as well.
consumer.awaitTermination(Long.MAX_VALUE, TimeUnit.SECONDS);
}
http://git-wip-us.apache.org/repos/asf/hive/blob/1c33fea8/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/Utils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/Utils.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/Utils.java
index e356607..976104c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/Utils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/Utils.java
@@ -22,6 +22,7 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.metastore.api.Database;
import org.apache.hadoop.hive.metastore.api.NotificationEvent;
+import org.apache.hadoop.hive.ql.ErrorMsg;
import org.apache.hadoop.hive.ql.exec.Utilities;
import org.apache.hadoop.hive.ql.io.AcidUtils;
import org.apache.hadoop.hive.ql.metadata.Hive;
@@ -37,6 +38,7 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.DataOutputStream;
+import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.Collection;
import java.util.Collections;
@@ -204,7 +206,11 @@ public class Utils {
static List<Path> getDataPathList(Path fromPath, ReplicationSpec replicationSpec, HiveConf conf)
throws IOException {
if (replicationSpec.isTransactionalTableDump()) {
- return AcidUtils.getValidDataPaths(fromPath, conf, replicationSpec.getValidWriteIdList());
+ try {
+ return AcidUtils.getValidDataPaths(fromPath, conf, replicationSpec.getValidWriteIdList());
+ } catch (FileNotFoundException e) {
+ throw new IOException(ErrorMsg.FILE_NOT_FOUND.format(e.getMessage()), e);
+ }
} else {
return Collections.singletonList(fromPath);
}
http://git-wip-us.apache.org/repos/asf/hive/blob/1c33fea8/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/io/FileOperations.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/io/FileOperations.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/io/FileOperations.java
index 58eae38..e8eaae6 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/io/FileOperations.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/io/FileOperations.java
@@ -18,6 +18,7 @@
package org.apache.hadoop.hive.ql.parse.repl.dump.io;
import java.io.BufferedWriter;
+import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.OutputStreamWriter;
import java.util.ArrayList;
@@ -46,6 +47,8 @@ import org.apache.hadoop.hive.shims.Utils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import static org.apache.hadoop.hive.ql.ErrorMsg.FILE_NOT_FOUND;
+
//TODO: this object is created once to call one method and then immediately destroyed.
//So it's basically just a roundabout way to pass arguments to a static method. Simplify?
public class FileOperations {
@@ -156,6 +159,10 @@ public class FileOperations {
}
done = true;
} catch (IOException e) {
+ if (e instanceof FileNotFoundException) {
+ logger.error("exporting data files in dir : " + dataPathList + " to " + exportRootDataDir + " failed");
+ throw new FileNotFoundException(FILE_NOT_FOUND.format(e.getMessage()));
+ }
repeat++;
logger.info("writeFilesList failed", e);
if (repeat >= FileUtils.MAX_IO_ERROR_RETRY) {
http://git-wip-us.apache.org/repos/asf/hive/blob/1c33fea8/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
index c2da6d3..93ac74c 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
@@ -125,13 +125,10 @@ public class HiveAlterHandler implements AlterHandler {
Table oldt = null;
- List<TransactionalMetaStoreEventListener> transactionalListeners = null;
- List<MetaStoreEventListener> listeners = null;
+ List<TransactionalMetaStoreEventListener> transactionalListeners = handler.getTransactionalListeners();
+ List<MetaStoreEventListener> listeners = handler.getListeners();
Map<String, String> txnAlterTableEventResponses = Collections.emptyMap();
- transactionalListeners = handler.getTransactionalListeners();
- listeners = handler.getListeners();
-
try {
boolean rename = false;
List<Partition> parts;