You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by ga...@apache.org on 2015/01/27 20:19:04 UTC

svn commit: r1655111 [2/2] - in /hive/branches/branch-1.0: common/src/java/org/apache/hadoop/hive/common/ common/src/test/org/apache/hadoop/hive/common/ hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/ itests/hive-unit/src/test/java/org/...

Modified: hive/branches/branch-1.0/ql/src/test/org/apache/hadoop/hive/ql/io/TestAcidUtils.java
URL: http://svn.apache.org/viewvc/hive/branches/branch-1.0/ql/src/test/org/apache/hadoop/hive/ql/io/TestAcidUtils.java?rev=1655111&r1=1655110&r2=1655111&view=diff
==============================================================================
--- hive/branches/branch-1.0/ql/src/test/org/apache/hadoop/hive/ql/io/TestAcidUtils.java (original)
+++ hive/branches/branch-1.0/ql/src/test/org/apache/hadoop/hive/ql/io/TestAcidUtils.java Tue Jan 27 19:19:03 2015
@@ -20,7 +20,8 @@ package org.apache.hadoop.hive.ql.io;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hive.common.ValidTxnListImpl;
+import org.apache.hadoop.hive.metastore.txn.ValidCompactorTxnList;
+import org.apache.hadoop.hive.common.ValidReadTxnList;
 import org.apache.hadoop.hive.ql.io.orc.TestInputOutputFormat;
 import org.apache.hadoop.hive.ql.io.orc.TestInputOutputFormat.MockFile;
 import org.apache.hadoop.hive.ql.io.orc.TestInputOutputFormat.MockFileSystem;
@@ -91,7 +92,7 @@ public class TestAcidUtils {
         new MockFile("mock:/tbl/part1/subdir/000000_0", 0, new byte[0]));
     AcidUtils.Directory dir =
         AcidUtils.getAcidState(new MockPath(fs, "/tbl/part1"), conf,
-            new ValidTxnListImpl("100:"));
+            new ValidReadTxnList("100:"));
     assertEquals(null, dir.getBaseDirectory());
     assertEquals(0, dir.getCurrentDirectories().size());
     assertEquals(0, dir.getObsolete().size());
@@ -121,7 +122,7 @@ public class TestAcidUtils {
         new MockFile("mock:/tbl/part1/delta_101_101/bucket_0", 0, new byte[0]));
     AcidUtils.Directory dir =
         AcidUtils.getAcidState(new TestInputOutputFormat.MockPath(fs,
-            "mock:/tbl/part1"), conf, new ValidTxnListImpl("100:"));
+            "mock:/tbl/part1"), conf, new ValidReadTxnList("100:"));
     assertEquals(null, dir.getBaseDirectory());
     List<FileStatus> obsolete = dir.getObsolete();
     assertEquals(2, obsolete.size());
@@ -162,7 +163,7 @@ public class TestAcidUtils {
         new MockFile("mock:/tbl/part1/delta_90_120/bucket_0", 0, new byte[0]));
     AcidUtils.Directory dir =
         AcidUtils.getAcidState(new TestInputOutputFormat.MockPath(fs,
-            "mock:/tbl/part1"), conf, new ValidTxnListImpl("100:"));
+            "mock:/tbl/part1"), conf, new ValidReadTxnList("100:"));
     assertEquals("mock:/tbl/part1/base_49", dir.getBaseDirectory().toString());
     List<FileStatus> obsolete = dir.getObsolete();
     assertEquals(5, obsolete.size());
@@ -191,7 +192,7 @@ public class TestAcidUtils {
         new MockFile("mock:/tbl/part1/base_200/bucket_0", 500, new byte[0]));
     Path part = new MockPath(fs, "/tbl/part1");
     AcidUtils.Directory dir =
-        AcidUtils.getAcidState(part, conf, new ValidTxnListImpl("150:"));
+        AcidUtils.getAcidState(part, conf, new ValidReadTxnList("150:"));
     assertEquals("mock:/tbl/part1/base_200", dir.getBaseDirectory().toString());
     List<FileStatus> obsoletes = dir.getObsolete();
     assertEquals(4, obsoletes.size());
@@ -202,7 +203,7 @@ public class TestAcidUtils {
     assertEquals(0, dir.getOriginalFiles().size());
     assertEquals(0, dir.getCurrentDirectories().size());
     // we should always get the latest base
-    dir = AcidUtils.getAcidState(part, conf, new ValidTxnListImpl("10:"));
+    dir = AcidUtils.getAcidState(part, conf, new ValidReadTxnList("10:"));
     assertEquals("mock:/tbl/part1/base_200", dir.getBaseDirectory().toString());
   }
 
@@ -216,7 +217,7 @@ public class TestAcidUtils {
         new MockFile("mock:/tbl/part1/000001_1", 500, new byte[0]));
     Path part = new MockPath(fs, "/tbl/part1");
     AcidUtils.Directory dir =
-        AcidUtils.getAcidState(part, conf, new ValidTxnListImpl("150:"));
+        AcidUtils.getAcidState(part, conf, new ValidReadTxnList("150:"));
     // The two original buckets won't be in the obsolete list because we don't look at those
     // until we have determined there is no base.
     List<FileStatus> obsolete = dir.getObsolete();
@@ -239,7 +240,7 @@ public class TestAcidUtils {
         new MockFile("mock:/tbl/part1/base_50/bucket_0", 500, new byte[0]));
     Path part = new MockPath(fs, "mock:/tbl/part1");
     AcidUtils.Directory dir =
-        AcidUtils.getAcidState(part, conf, new ValidTxnListImpl("100:"));
+        AcidUtils.getAcidState(part, conf, new ValidReadTxnList("100:"));
     assertEquals("mock:/tbl/part1/base_50", dir.getBaseDirectory().toString());
     List<FileStatus> obsolete = dir.getObsolete();
     assertEquals(2, obsolete.size());
@@ -252,4 +253,51 @@ public class TestAcidUtils {
     assertEquals("mock:/tbl/part1/delta_000062_62", delts.get(2).getPath().toString());
     assertEquals("mock:/tbl/part1/delta_0000063_63", delts.get(3).getPath().toString());
   }
+
+  @Test
+  public void deltasWithOpenTxnInRead() throws Exception {
+    Configuration conf = new Configuration();
+    MockFileSystem fs = new MockFileSystem(conf,
+        new MockFile("mock:/tbl/part1/delta_1_1/bucket_0", 500, new byte[0]),
+        new MockFile("mock:/tbl/part1/delta_2_5/bucket_0", 500, new byte[0]));
+    Path part = new MockPath(fs, "mock:/tbl/part1");
+    AcidUtils.Directory dir = AcidUtils.getAcidState(part, conf, new ValidReadTxnList("100:4"));
+    List<AcidUtils.ParsedDelta> delts = dir.getCurrentDirectories();
+    assertEquals(2, delts.size());
+    assertEquals("mock:/tbl/part1/delta_1_1", delts.get(0).getPath().toString());
+    assertEquals("mock:/tbl/part1/delta_2_5", delts.get(1).getPath().toString());
+  }
+
+  @Test
+  public void deltasWithOpenTxnsNotInCompact() throws Exception {
+    Configuration conf = new Configuration();
+    MockFileSystem fs = new MockFileSystem(conf,
+        new MockFile("mock:/tbl/part1/delta_1_1/bucket_0", 500, new byte[0]),
+        new MockFile("mock:/tbl/part1/delta_2_5/bucket_0", 500, new byte[0]));
+    Path part = new MockPath(fs, "mock:/tbl/part1");
+    AcidUtils.Directory dir =
+        AcidUtils.getAcidState(part, conf, new ValidCompactorTxnList("100:4"));
+    List<AcidUtils.ParsedDelta> delts = dir.getCurrentDirectories();
+    assertEquals(1, delts.size());
+    assertEquals("mock:/tbl/part1/delta_1_1", delts.get(0).getPath().toString());
+  }
+
+  @Test
+  public void deltasWithOpenTxnsNotInCompact2() throws Exception {
+    Configuration conf = new Configuration();
+    MockFileSystem fs = new MockFileSystem(conf,
+        new MockFile("mock:/tbl/part1/delta_1_1/bucket_0", 500, new byte[0]),
+        new MockFile("mock:/tbl/part1/delta_2_5/bucket_0", 500, new byte[0]),
+        new MockFile("mock:/tbl/part1/delta_2_5/bucket_0" + AcidUtils.DELTA_SIDE_FILE_SUFFIX, 500,
+            new byte[0]),
+        new MockFile("mock:/tbl/part1/delta_6_10/bucket_0", 500, new byte[0]));
+    Path part = new MockPath(fs, "mock:/tbl/part1");
+    AcidUtils.Directory dir =
+        AcidUtils.getAcidState(part, conf, new ValidCompactorTxnList("100:4"));
+    List<AcidUtils.ParsedDelta> delts = dir.getCurrentDirectories();
+    assertEquals(1, delts.size());
+    assertEquals("mock:/tbl/part1/delta_1_1", delts.get(0).getPath().toString());
+  }
+
+
 }

Modified: hive/branches/branch-1.0/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcRawRecordMerger.java
URL: http://svn.apache.org/viewvc/hive/branches/branch-1.0/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcRawRecordMerger.java?rev=1655111&r1=1655110&r2=1655111&view=diff
==============================================================================
--- hive/branches/branch-1.0/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcRawRecordMerger.java (original)
+++ hive/branches/branch-1.0/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcRawRecordMerger.java Tue Jan 27 19:19:03 2015
@@ -24,7 +24,7 @@ import org.apache.hadoop.conf.Configurat
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.common.ValidTxnList;
-import org.apache.hadoop.hive.common.ValidTxnListImpl;
+import org.apache.hadoop.hive.common.ValidReadTxnList;
 import org.apache.hadoop.hive.ql.io.AcidOutputFormat;
 import org.apache.hadoop.hive.ql.io.AcidUtils;
 import org.apache.hadoop.hive.ql.io.RecordIdentifier;
@@ -301,7 +301,7 @@ public class TestOrcRawRecordMerger {
   }
 
   private static ValidTxnList createMaximalTxnList() {
-    return new ValidTxnListImpl(Long.MAX_VALUE + ":");
+    return new ValidReadTxnList(Long.MAX_VALUE + ":");
   }
 
   @Test
@@ -492,7 +492,7 @@ public class TestOrcRawRecordMerger {
         .maximumTransactionId(100);
     of.getRecordUpdater(root, options).close(false);
 
-    ValidTxnList txnList = new ValidTxnListImpl("200:");
+    ValidTxnList txnList = new ValidReadTxnList("200:");
     AcidUtils.Directory directory = AcidUtils.getAcidState(root, conf, txnList);
 
     Path basePath = AcidUtils.createBucketFile(directory.getBaseDirectory(),
@@ -550,7 +550,7 @@ public class TestOrcRawRecordMerger {
     ru.delete(200, new MyRow("", 8, 0, BUCKET));
     ru.close(false);
 
-    ValidTxnList txnList = new ValidTxnListImpl("200:");
+    ValidTxnList txnList = new ValidReadTxnList("200:");
     AcidUtils.Directory directory = AcidUtils.getAcidState(root, conf, txnList);
 
     assertEquals(new Path(root, "base_0000100"), directory.getBaseDirectory());
@@ -734,7 +734,7 @@ public class TestOrcRawRecordMerger {
     merger.close();
 
     // try ignoring the 200 transaction and make sure it works still
-    ValidTxnList txns = new ValidTxnListImpl("2000:200");
+    ValidTxnList txns = new ValidReadTxnList("2000:200");
     merger =
         new OrcRawRecordMerger(conf, true, baseReader, false, BUCKET,
             txns, new Reader.Options(),

Modified: hive/branches/branch-1.0/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/CompactorTest.java
URL: http://svn.apache.org/viewvc/hive/branches/branch-1.0/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/CompactorTest.java?rev=1655111&r1=1655110&r2=1655111&view=diff
==============================================================================
--- hive/branches/branch-1.0/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/CompactorTest.java (original)
+++ hive/branches/branch-1.0/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/CompactorTest.java Tue Jan 27 19:19:03 2015
@@ -51,6 +51,7 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Properties;
+import java.util.Set;
 import java.util.Stack;
 
 /**
@@ -155,6 +156,11 @@ public abstract class CompactorTest {
     addFile(t, p, minTxn, maxTxn, numRecords, FileType.DELTA, 2, true);
   }
 
+  protected void addLengthFile(Table t, Partition p, long minTxn, long maxTxn, int numRecords)
+    throws Exception {
+    addFile(t, p, minTxn, maxTxn, numRecords, FileType.LENGTH_FILE, 2, true);
+  }
+
   protected void addBaseFile(Table t, Partition p, long maxTxn, int numRecords) throws Exception {
     addFile(t, p, 0, maxTxn, numRecords, FileType.BASE, 2, true);
   }
@@ -184,9 +190,21 @@ public abstract class CompactorTest {
     return paths;
   }
 
-  protected void burnThroughTransactions(int num) throws MetaException, NoSuchTxnException, TxnAbortedException {
+  protected void burnThroughTransactions(int num)
+      throws MetaException, NoSuchTxnException, TxnAbortedException {
+    burnThroughTransactions(num, null, null);
+  }
+
+  protected void burnThroughTransactions(int num, Set<Long> open, Set<Long> aborted)
+      throws MetaException, NoSuchTxnException, TxnAbortedException {
     OpenTxnsResponse rsp = txnHandler.openTxns(new OpenTxnRequest(num, "me", "localhost"));
-    for (long tid : rsp.getTxn_ids()) txnHandler.commitTxn(new CommitTxnRequest(tid));
+    for (long tid : rsp.getTxn_ids()) {
+      if (aborted != null && aborted.contains(tid)) {
+        txnHandler.abortTxn(new AbortTxnRequest(tid));
+      } else if (open == null || (open != null && !open.contains(tid))) {
+        txnHandler.commitTxn(new CommitTxnRequest(tid));
+      }
+    }
   }
 
   protected void stopThread() {
@@ -248,7 +266,7 @@ public abstract class CompactorTest {
     return location;
   }
 
-  private enum FileType {BASE, DELTA, LEGACY};
+  private enum FileType {BASE, DELTA, LEGACY, LENGTH_FILE};
 
   private void addFile(Table t, Partition p, long minTxn, long maxTxn,
                        int numRecords,  FileType type, int numBuckets,
@@ -258,6 +276,7 @@ public abstract class CompactorTest {
     String filename = null;
     switch (type) {
       case BASE: filename = "base_" + maxTxn; break;
+      case LENGTH_FILE: // Fall through to delta
       case DELTA: filename = "delta_" + minTxn + "_" + maxTxn; break;
       case LEGACY: break; // handled below
     }
@@ -272,12 +291,19 @@ public abstract class CompactorTest {
         Path dir = new Path(location, filename);
         fs.mkdirs(dir);
         partFile = AcidUtils.createBucketFile(dir, bucket);
+        if (type == FileType.LENGTH_FILE) {
+          partFile = new Path(partFile.toString() + AcidUtils.DELTA_SIDE_FILE_SUFFIX);
+        }
       }
       FSDataOutputStream out = fs.create(partFile);
-      for (int i = 0; i < numRecords; i++) {
-        RecordIdentifier ri = new RecordIdentifier(maxTxn - 1, bucket, i);
-        ri.write(out);
-        out.writeBytes("mary had a little lamb its fleece was white as snow\n");
+      if (type == FileType.LENGTH_FILE) {
+        out.writeInt(numRecords);
+      } else {
+        for (int i = 0; i < numRecords; i++) {
+          RecordIdentifier ri = new RecordIdentifier(maxTxn - 1, bucket, i);
+          ri.write(out);
+          out.writeBytes("mary had a little lamb its fleece was white as snow\n");
+        }
       }
       out.close();
     }

Modified: hive/branches/branch-1.0/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestWorker.java
URL: http://svn.apache.org/viewvc/hive/branches/branch-1.0/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestWorker.java?rev=1655111&r1=1655110&r2=1655111&view=diff
==============================================================================
--- hive/branches/branch-1.0/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestWorker.java (original)
+++ hive/branches/branch-1.0/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestWorker.java Tue Jan 27 19:19:03 2015
@@ -17,19 +17,20 @@
  */
 package org.apache.hadoop.hive.ql.txn.compactor;
 
-import junit.framework.Assert;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.*;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.api.*;
-import org.apache.hadoop.hive.metastore.txn.TxnDbUtil;
+import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
 
 import java.io.*;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.HashMap;
+import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
 
@@ -274,7 +275,6 @@ public class TestWorker extends Compacto
     // There should still now be 5 directories in the location
     FileSystem fs = FileSystem.get(conf);
     FileStatus[] stat = fs.listStatus(new Path(t.getSd().getLocation()));
-    for (int i = 0; i < stat.length; i++) System.out.println("HERE: " + stat[i].getPath().toString());
     Assert.assertEquals(4, stat.length);
 
     // Find the new delta file and make sure it has the right contents
@@ -296,6 +296,78 @@ public class TestWorker extends Compacto
   }
 
   @Test
+  public void minorWithOpenInMiddle() throws Exception {
+    LOG.debug("Starting minorWithOpenInMiddle");
+    Table t = newTable("default", "mtwb", false);
+
+    addBaseFile(t, null, 20L, 20);
+    addDeltaFile(t, null, 21L, 22L, 2);
+    addDeltaFile(t, null, 23L, 25L, 3);
+    addLengthFile(t, null, 23L, 25L, 3);
+    addDeltaFile(t, null, 26L, 27L, 2);
+    burnThroughTransactions(27, new HashSet<Long>(Arrays.asList(23L)), null);
+
+    CompactionRequest rqst = new CompactionRequest("default", "mtwb", CompactionType.MINOR);
+    txnHandler.compact(rqst);
+
+    startWorker();
+
+    ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
+    List<ShowCompactResponseElement> compacts = rsp.getCompacts();
+    Assert.assertEquals(1, compacts.size());
+    Assert.assertEquals("ready for cleaning", compacts.get(0).getState());
+
+    // There should still now be 5 directories in the location
+    FileSystem fs = FileSystem.get(conf);
+    FileStatus[] stat = fs.listStatus(new Path(t.getSd().getLocation()));
+    Assert.assertEquals(5, stat.length);
+
+    // Find the new delta file and make sure it has the right contents
+    Arrays.sort(stat);
+    Assert.assertEquals("base_20", stat[0].getPath().getName());
+    Assert.assertEquals("delta_0000021_0000022", stat[1].getPath().getName());
+    Assert.assertEquals("delta_21_22", stat[2].getPath().getName());
+    Assert.assertEquals("delta_23_25", stat[3].getPath().getName());
+    Assert.assertEquals("delta_26_27", stat[4].getPath().getName());
+  }
+
+  @Test
+  public void minorWithAborted() throws Exception {
+    LOG.debug("Starting minorWithAborted");
+    Table t = newTable("default", "mtwb", false);
+
+    addBaseFile(t, null, 20L, 20);
+    addDeltaFile(t, null, 21L, 22L, 2);
+    addDeltaFile(t, null, 23L, 25L, 3);
+    addLengthFile(t, null, 23L, 25L, 3);
+    addDeltaFile(t, null, 26L, 27L, 2);
+    burnThroughTransactions(27, null, new HashSet<Long>(Arrays.asList(24L, 25L)));
+
+    CompactionRequest rqst = new CompactionRequest("default", "mtwb", CompactionType.MINOR);
+    txnHandler.compact(rqst);
+
+    startWorker();
+
+    ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
+    List<ShowCompactResponseElement> compacts = rsp.getCompacts();
+    Assert.assertEquals(1, compacts.size());
+    Assert.assertEquals("ready for cleaning", compacts.get(0).getState());
+
+    // There should still now be 5 directories in the location
+    FileSystem fs = FileSystem.get(conf);
+    FileStatus[] stat = fs.listStatus(new Path(t.getSd().getLocation()));
+    Assert.assertEquals(5, stat.length);
+
+    // Find the new delta file and make sure it has the right contents
+    Arrays.sort(stat);
+    Assert.assertEquals("base_20", stat[0].getPath().getName());
+    Assert.assertEquals("delta_0000021_0000027", stat[1].getPath().getName());
+    Assert.assertEquals("delta_21_22", stat[2].getPath().getName());
+    Assert.assertEquals("delta_23_25", stat[3].getPath().getName());
+    Assert.assertEquals("delta_26_27", stat[4].getPath().getName());
+  }
+
+  @Test
   public void minorPartitionWithBase() throws Exception {
     Table t = newTable("default", "mpwb", true);
     Partition p = newPartition(t, "today");
@@ -481,7 +553,7 @@ public class TestWorker extends Compacto
     addDeltaFile(t, null, 1L, 2L, 2);
     addDeltaFile(t, null, 3L, 4L, 2);
 
-    burnThroughTransactions(5);
+    burnThroughTransactions(4);
 
     CompactionRequest rqst = new CompactionRequest("default", "matnb", CompactionType.MAJOR);
     txnHandler.compact(rqst);
@@ -603,6 +675,7 @@ public class TestWorker extends Compacto
 
   @Test
   public void majorPartitionWithBaseMissingBuckets() throws Exception {
+    LOG.debug("Starting majorPartitionWithBaseMissingBuckets");
     Table t = newTable("default", "mapwbmb", true);
     Partition p = newPartition(t, "today");
 
@@ -611,7 +684,7 @@ public class TestWorker extends Compacto
     addDeltaFile(t, p, 21L, 22L, 2, 2, false);
     addDeltaFile(t, p, 23L, 26L, 4);
 
-    burnThroughTransactions(25);
+    burnThroughTransactions(27);
 
     CompactionRequest rqst = new CompactionRequest("default", "mapwbmb", CompactionType.MAJOR);
     rqst.setPartitionname("ds=today");
@@ -654,4 +727,76 @@ public class TestWorker extends Compacto
     }
     Assert.assertTrue(sawNewBase);
   }
+
+  @Test
+  public void majorWithOpenInMiddle() throws Exception {
+    LOG.debug("Starting majorWithOpenInMiddle");
+    Table t = newTable("default", "mtwb", false);
+
+    addBaseFile(t, null, 20L, 20);
+    addDeltaFile(t, null, 21L, 22L, 2);
+    addDeltaFile(t, null, 23L, 25L, 3);
+    addLengthFile(t, null, 23L, 25L, 3);
+    addDeltaFile(t, null, 26L, 27L, 2);
+    burnThroughTransactions(27, new HashSet<Long>(Arrays.asList(23L)), null);
+
+    CompactionRequest rqst = new CompactionRequest("default", "mtwb", CompactionType.MAJOR);
+    txnHandler.compact(rqst);
+
+    startWorker();
+
+    ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
+    List<ShowCompactResponseElement> compacts = rsp.getCompacts();
+    Assert.assertEquals(1, compacts.size());
+    Assert.assertEquals("ready for cleaning", compacts.get(0).getState());
+
+    // There should still now be 5 directories in the location
+    FileSystem fs = FileSystem.get(conf);
+    FileStatus[] stat = fs.listStatus(new Path(t.getSd().getLocation()));
+    Assert.assertEquals(5, stat.length);
+
+    // Find the new delta file and make sure it has the right contents
+    Arrays.sort(stat);
+    Assert.assertEquals("base_0000022", stat[0].getPath().getName());
+    Assert.assertEquals("base_20", stat[1].getPath().getName());
+    Assert.assertEquals("delta_21_22", stat[2].getPath().getName());
+    Assert.assertEquals("delta_23_25", stat[3].getPath().getName());
+    Assert.assertEquals("delta_26_27", stat[4].getPath().getName());
+  }
+
+  @Test
+  public void majorWithAborted() throws Exception {
+    LOG.debug("Starting majorWithAborted");
+    Table t = newTable("default", "mtwb", false);
+
+    addBaseFile(t, null, 20L, 20);
+    addDeltaFile(t, null, 21L, 22L, 2);
+    addDeltaFile(t, null, 23L, 25L, 3);
+    addLengthFile(t, null, 23L, 25L, 3);
+    addDeltaFile(t, null, 26L, 27L, 2);
+    burnThroughTransactions(27, null, new HashSet<Long>(Arrays.asList(24L, 25L)));
+
+    CompactionRequest rqst = new CompactionRequest("default", "mtwb", CompactionType.MAJOR);
+    txnHandler.compact(rqst);
+
+    startWorker();
+
+    ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
+    List<ShowCompactResponseElement> compacts = rsp.getCompacts();
+    Assert.assertEquals(1, compacts.size());
+    Assert.assertEquals("ready for cleaning", compacts.get(0).getState());
+
+    // There should still now be 5 directories in the location
+    FileSystem fs = FileSystem.get(conf);
+    FileStatus[] stat = fs.listStatus(new Path(t.getSd().getLocation()));
+    Assert.assertEquals(5, stat.length);
+
+    // Find the new delta file and make sure it has the right contents
+    Arrays.sort(stat);
+    Assert.assertEquals("base_0000027", stat[0].getPath().getName());
+    Assert.assertEquals("base_20", stat[1].getPath().getName());
+    Assert.assertEquals("delta_21_22", stat[2].getPath().getName());
+    Assert.assertEquals("delta_23_25", stat[3].getPath().getName());
+    Assert.assertEquals("delta_26_27", stat[4].getPath().getName());
+  }
 }