You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by om...@apache.org on 2016/07/14 15:50:57 UTC

hive git commit: HIVE-14004. Fix ArrayIndexOutOfBounds in schema evolution when using ACID. (omalley reviewed by prasanthj)

Repository: hive
Updated Branches:
  refs/heads/branch-2.1 ca647ba50 -> a5a270ec8


HIVE-14004. Fix ArrayIndexOutOfBounds in schema evolution when using ACID. (omalley reviewed
by prasanthj)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/a5a270ec
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/a5a270ec
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/a5a270ec

Branch: refs/heads/branch-2.1
Commit: a5a270ec8e8bd97c09cb5d8cf351c968201a7d18
Parents: ca647ba
Author: Owen O'Malley <om...@apache.org>
Authored: Wed Jul 13 08:30:30 2016 -0700
Committer: Owen O'Malley <om...@apache.org>
Committed: Thu Jul 14 08:50:28 2016 -0700

----------------------------------------------------------------------
 .../hadoop/hive/ql/io/orc/ReaderImpl.java       |  1 +
 .../apache/hadoop/hive/ql/TestTxnCommands2.java | 28 ++++++++++++++++++--
 2 files changed, 27 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/a5a270ec/ql/src/java/org/apache/hadoop/hive/ql/io/orc/ReaderImpl.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/ReaderImpl.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/ReaderImpl.java
index 9bcdb39..74c5071 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/ReaderImpl.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/ReaderImpl.java
@@ -79,6 +79,7 @@ public class ReaderImpl extends org.apache.orc.impl.ReaderImpl
     boolean[] include = options.getInclude();
     // if included columns is null, then include all columns
     if (include == null) {
+      options = options.clone();
       include = new boolean[types.size()];
       Arrays.fill(include, true);
       options.include(include);

http://git-wip-us.apache.org/repos/asf/hive/blob/a5a270ec/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java
index e76c925..245a3bc 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java
@@ -846,7 +846,7 @@ public class TestTxnCommands2 {
     init.run();
     int numAttemptedCompactions = 1;
     checkCompactionState(new CompactionsByState(numAttemptedCompactions,numFailedCompactions,0,0,0,0,numFailedCompactions + numAttemptedCompactions), countCompacts(txnHandler));
-    
+
     hiveConf.setTimeVar(HiveConf.ConfVars.COMPACTOR_HISTORY_REAPER_INTERVAL, 10, TimeUnit.MILLISECONDS);
     AcidCompactionHistoryService compactionHistoryService = new AcidCompactionHistoryService();
     runHouseKeeperService(compactionHistoryService, hiveConf);//should not remove anything from history
@@ -868,7 +868,7 @@ public class TestTxnCommands2 {
       hiveConf.getIntVar(HiveConf.ConfVars.COMPACTOR_HISTORY_RETENTION_ATTEMPTED),
       hiveConf.getIntVar(HiveConf.ConfVars.COMPACTOR_HISTORY_RETENTION_FAILED),0,0,0,0,
       hiveConf.getIntVar(HiveConf.ConfVars.COMPACTOR_HISTORY_RETENTION_FAILED) + hiveConf.getIntVar(HiveConf.ConfVars.COMPACTOR_HISTORY_RETENTION_ATTEMPTED)), countCompacts(txnHandler));
-    
+
     hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEFAILCOMPACTION, false);
     txnHandler.compact(new CompactionRequest("default", tblName, CompactionType.MINOR));
     //at this point "show compactions" should have (COMPACTOR_HISTORY_RETENTION_FAILED) failed + 1 initiated (explicitly by user)
@@ -1139,6 +1139,30 @@ public class TestTxnCommands2 {
     Assert.assertNull(exception);
   }
 
+  @Test
+  public void testCompactWithDelete() throws Exception {
+    int[][] tableData = {{1,2},{3,4}};
+    runStatementOnDriver("insert into " + Table.ACIDTBL + "(a,b) " + makeValuesClause(tableData));
+    runStatementOnDriver("alter table "+ Table.ACIDTBL + " compact 'MAJOR'");
+    Worker t = new Worker();
+    t.setThreadId((int) t.getId());
+    t.setHiveConf(hiveConf);
+    AtomicBoolean stop = new AtomicBoolean();
+    AtomicBoolean looped = new AtomicBoolean();
+    stop.set(true);
+    t.init(stop, looped);
+    t.run();
+    runStatementOnDriver("delete from " + Table.ACIDTBL + " where b = 4");
+    runStatementOnDriver("update " + Table.ACIDTBL + " set b = -2 where b = 2");
+    runStatementOnDriver("alter table "+ Table.ACIDTBL + " compact 'MINOR'");
+    t.run();
+    TxnStore txnHandler = TxnUtils.getTxnStore(hiveConf);
+    ShowCompactResponse resp = txnHandler.showCompact(new ShowCompactRequest());
+    Assert.assertEquals("Unexpected number of compactions in history", 2, resp.getCompactsSize());
+    Assert.assertEquals("Unexpected 0 compaction state", TxnStore.CLEANING_RESPONSE, resp.getCompacts().get(0).getState());
+    Assert.assertEquals("Unexpected 1 compaction state", TxnStore.CLEANING_RESPONSE, resp.getCompacts().get(1).getState());
+  }
+
   /**
    * takes raw data and turns it into a string as if from Driver.getResults()
    * sorts rows in dictionary order