You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@accumulo.apache.org by ec...@apache.org on 2014/09/15 20:46:58 UTC

git commit: ACCUMULO-3125 rename test written for specific tickets

Repository: accumulo
Updated Branches:
  refs/heads/master 83ed12ae3 -> 0ac596022


ACCUMULO-3125 rename test written for specific tickets


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/0ac59602
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/0ac59602
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/0ac59602

Branch: refs/heads/master
Commit: 0ac596022cb0397dac5ee37a05558b98abe5ab58
Parents: 83ed12a
Author: Eric C. Newton <er...@gmail.com>
Authored: Mon Sep 15 14:46:40 2014 -0400
Committer: Eric C. Newton <er...@gmail.com>
Committed: Mon Sep 15 14:46:40 2014 -0400

----------------------------------------------------------------------
 .../apache/accumulo/test/Accumulo3010IT.java    | 91 --------------------
 .../apache/accumulo/test/Accumulo3030IT.java    | 83 ------------------
 .../apache/accumulo/test/Accumulo3047IT.java    | 74 ----------------
 .../test/AllowScansToBeInterruptedIT.java       | 83 ++++++++++++++++++
 .../test/BadDeleteMarkersCreatedIT.java         | 74 ++++++++++++++++
 .../test/RecoveryCompactionsAreFlushesIT.java   | 91 ++++++++++++++++++++
 6 files changed, 248 insertions(+), 248 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/0ac59602/test/src/test/java/org/apache/accumulo/test/Accumulo3010IT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/Accumulo3010IT.java b/test/src/test/java/org/apache/accumulo/test/Accumulo3010IT.java
deleted file mode 100644
index 1a0bfa2..0000000
--- a/test/src/test/java/org/apache/accumulo/test/Accumulo3010IT.java
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test;
-
-import java.util.Map.Entry;
-
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.minicluster.ServerType;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.accumulo.minicluster.impl.ProcessReference;
-import org.apache.accumulo.test.functional.ConfigurableMacIT;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.RawLocalFileSystem;
-import org.junit.Assert;
-import org.junit.Test;
-
-public class Accumulo3010IT extends ConfigurableMacIT {
-
-  @Override
-  public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
-    cfg.setNumTservers(1);
-    cfg.setProperty(Property.INSTANCE_ZK_TIMEOUT, "5s");
-    // file system supports recovery
-    hadoopCoreSite.set("fs.file.impl", RawLocalFileSystem.class.getName());
-  }
-  
-  @Test(timeout = 60 * 1000)
-  public void test() throws Exception {
-    // create a table
-    String tableName = getUniqueNames(1)[0];
-    Connector c = getConnector();
-    c.tableOperations().create(tableName);
-    c.tableOperations().setProperty(tableName, Property.TABLE_MAJC_RATIO.getKey(), "100");
-    c.tableOperations().setProperty(tableName, Property.TABLE_FILE_MAX.getKey(), "3");
-    // create 3 flush files
-    BatchWriter bw = c.createBatchWriter(tableName, new BatchWriterConfig());
-    Mutation m = new Mutation("a");
-    m.put("b", "c", new Value("v".getBytes()));
-    for (int i = 0; i < 3; i++) {
-      bw.addMutation(m);
-      bw.flush();
-      c.tableOperations().flush(tableName, null, null, true);
-    }
-    // create an unsaved mutation
-    bw.addMutation(m);
-    bw.close();
-    // kill the tablet server
-    for (ProcessReference p : cluster.getProcesses().get(ServerType.TABLET_SERVER)) {
-      cluster.killProcess(ServerType.TABLET_SERVER, p);
-    }
-    // recover
-    cluster.start();
-    // ensure the table is readable
-    for (@SuppressWarnings("unused") Entry<Key,Value> entry : c.createScanner(tableName, Authorizations.EMPTY)) {
-    }
-    // ensure that the recovery was not a merging minor compaction
-    Scanner s = c.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
-    s.fetchColumnFamily(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME);
-    for (Entry<Key, Value> entry : s) {
-      String filename = entry.getKey().getColumnQualifier().toString();
-      String parts[] = filename.split("/");
-      Assert.assertFalse(parts[parts.length-1].startsWith("M"));
-    }
-  }
-  
-  
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/0ac59602/test/src/test/java/org/apache/accumulo/test/Accumulo3030IT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/Accumulo3030IT.java b/test/src/test/java/org/apache/accumulo/test/Accumulo3030IT.java
deleted file mode 100644
index bc56346..0000000
--- a/test/src/test/java/org/apache/accumulo/test/Accumulo3030IT.java
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test;
-
-import java.util.Map.Entry;
-
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.IteratorSetting;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.util.UtilWaitThread;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.accumulo.test.functional.ConfigurableMacIT;
-import org.apache.accumulo.test.functional.SlowIterator;
-import org.apache.hadoop.conf.Configuration;
-import org.junit.Assert;
-import org.junit.Test;
-
-public class Accumulo3030IT extends ConfigurableMacIT {
-  
-  @Override
-  public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
-    cfg.setNumTservers(1);
-  }
-
-  @Test(timeout = 60 * 1000)
-  public void test() throws Exception {
-    // make a table
-    final String tableName = getUniqueNames(1)[0];
-    final Connector conn = getConnector();
-    conn.tableOperations().create(tableName);
-    // make the world's slowest scanner
-    final Scanner scanner = conn.createScanner(tableName, Authorizations.EMPTY);
-    final IteratorSetting cfg = new IteratorSetting(100, SlowIterator.class);
-    SlowIterator.setSeekSleepTime(cfg, 99999*1000);
-    scanner.addScanIterator(cfg);
-    // create a thread to interrupt the slow scan
-    final Thread scanThread = Thread.currentThread();
-    Thread thread = new Thread() {
-      @Override
-      public void run() {
-        try {
-          // ensure the scan is running: not perfect, the metadata tables could be scanned, too.
-          String tserver = conn.instanceOperations().getTabletServers().iterator().next();
-          while (conn.instanceOperations().getActiveScans(tserver).size() < 1) {
-            UtilWaitThread.sleep(1000);
-          }
-        } catch (Exception e) {
-          e.printStackTrace();
-        }
-        // BAM!
-        scanThread.interrupt();
-      }
-    };
-    thread.start();
-    try {
-      // Use the scanner, expect problems
-      for (@SuppressWarnings("unused") Entry<Key,Value> entry : scanner) {
-      }
-      Assert.fail("Scan should not succeed");
-    } catch (Exception ex) {
-    } finally {
-      thread.join();
-    }
-  }
-  
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/0ac59602/test/src/test/java/org/apache/accumulo/test/Accumulo3047IT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/Accumulo3047IT.java b/test/src/test/java/org/apache/accumulo/test/Accumulo3047IT.java
deleted file mode 100644
index 74730b2..0000000
--- a/test/src/test/java/org/apache/accumulo/test/Accumulo3047IT.java
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test;
-
-import java.util.Map.Entry;
-import java.util.SortedSet;
-import java.util.TreeSet;
-
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.util.UtilWaitThread;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.accumulo.test.functional.ConfigurableMacIT;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.io.Text;
-import org.junit.Assert;
-import org.junit.Test;
-
-public class Accumulo3047IT extends ConfigurableMacIT {
-  
-  @Override
-  public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
-    cfg.setNumTservers(1);
-    cfg.setProperty(Property.GC_CYCLE_DELAY, "1s");
-    cfg.setProperty(Property.GC_CYCLE_START, "0s");
-  }
-
-  @Test(timeout= 60 * 1000)
-  public void test() throws Exception {
-    // make a table
-    String tableName = getUniqueNames(1)[0];
-    Connector c = getConnector();
-    c.tableOperations().create(tableName);
-    // add some splits
-    SortedSet<Text> splits = new TreeSet<Text>();
-    for (int i = 0; i < 10; i++) {
-      splits.add(new Text("" + i));
-    }
-    c.tableOperations().addSplits(tableName, splits);
-    // get rid of all the splits
-    c.tableOperations().deleteRows(tableName, null, null);
-    // get rid of the table
-    c.tableOperations().delete(tableName);
-    // let gc run
-    UtilWaitThread.sleep(5 * 1000);
-    // look for delete markers
-    Scanner scanner = c.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
-    scanner.setRange(MetadataSchema.DeletesSection.getRange());
-    for (Entry<Key,Value> entry : scanner) {
-      Assert.fail(entry.getKey().getRow().toString());
-    }
-  }
-  
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/0ac59602/test/src/test/java/org/apache/accumulo/test/AllowScansToBeInterruptedIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/AllowScansToBeInterruptedIT.java b/test/src/test/java/org/apache/accumulo/test/AllowScansToBeInterruptedIT.java
new file mode 100644
index 0000000..bc56346
--- /dev/null
+++ b/test/src/test/java/org/apache/accumulo/test/AllowScansToBeInterruptedIT.java
@@ -0,0 +1,83 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test;
+
+import java.util.Map.Entry;
+
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.IteratorSetting;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.util.UtilWaitThread;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
+import org.apache.accumulo.test.functional.ConfigurableMacIT;
+import org.apache.accumulo.test.functional.SlowIterator;
+import org.apache.hadoop.conf.Configuration;
+import org.junit.Assert;
+import org.junit.Test;
+
+public class Accumulo3030IT extends ConfigurableMacIT {
+  
+  @Override
+  public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
+    cfg.setNumTservers(1);
+  }
+
+  @Test(timeout = 60 * 1000)
+  public void test() throws Exception {
+    // make a table
+    final String tableName = getUniqueNames(1)[0];
+    final Connector conn = getConnector();
+    conn.tableOperations().create(tableName);
+    // make the world's slowest scanner
+    final Scanner scanner = conn.createScanner(tableName, Authorizations.EMPTY);
+    final IteratorSetting cfg = new IteratorSetting(100, SlowIterator.class);
+    SlowIterator.setSeekSleepTime(cfg, 99999*1000);
+    scanner.addScanIterator(cfg);
+    // create a thread to interrupt the slow scan
+    final Thread scanThread = Thread.currentThread();
+    Thread thread = new Thread() {
+      @Override
+      public void run() {
+        try {
+          // ensure the scan is running: not perfect, the metadata tables could be scanned, too.
+          String tserver = conn.instanceOperations().getTabletServers().iterator().next();
+          while (conn.instanceOperations().getActiveScans(tserver).size() < 1) {
+            UtilWaitThread.sleep(1000);
+          }
+        } catch (Exception e) {
+          e.printStackTrace();
+        }
+        // BAM!
+        scanThread.interrupt();
+      }
+    };
+    thread.start();
+    try {
+      // Use the scanner, expect problems
+      for (@SuppressWarnings("unused") Entry<Key,Value> entry : scanner) {
+      }
+      Assert.fail("Scan should not succeed");
+    } catch (Exception ex) {
+    } finally {
+      thread.join();
+    }
+  }
+  
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/0ac59602/test/src/test/java/org/apache/accumulo/test/BadDeleteMarkersCreatedIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/BadDeleteMarkersCreatedIT.java b/test/src/test/java/org/apache/accumulo/test/BadDeleteMarkersCreatedIT.java
new file mode 100644
index 0000000..74730b2
--- /dev/null
+++ b/test/src/test/java/org/apache/accumulo/test/BadDeleteMarkersCreatedIT.java
@@ -0,0 +1,74 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test;
+
+import java.util.Map.Entry;
+import java.util.SortedSet;
+import java.util.TreeSet;
+
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.util.UtilWaitThread;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
+import org.apache.accumulo.test.functional.ConfigurableMacIT;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.Text;
+import org.junit.Assert;
+import org.junit.Test;
+
+public class Accumulo3047IT extends ConfigurableMacIT {
+  
+  @Override
+  public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
+    cfg.setNumTservers(1);
+    cfg.setProperty(Property.GC_CYCLE_DELAY, "1s");
+    cfg.setProperty(Property.GC_CYCLE_START, "0s");
+  }
+
+  @Test(timeout= 60 * 1000)
+  public void test() throws Exception {
+    // make a table
+    String tableName = getUniqueNames(1)[0];
+    Connector c = getConnector();
+    c.tableOperations().create(tableName);
+    // add some splits
+    SortedSet<Text> splits = new TreeSet<Text>();
+    for (int i = 0; i < 10; i++) {
+      splits.add(new Text("" + i));
+    }
+    c.tableOperations().addSplits(tableName, splits);
+    // get rid of all the splits
+    c.tableOperations().deleteRows(tableName, null, null);
+    // get rid of the table
+    c.tableOperations().delete(tableName);
+    // let gc run
+    UtilWaitThread.sleep(5 * 1000);
+    // look for delete markers
+    Scanner scanner = c.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
+    scanner.setRange(MetadataSchema.DeletesSection.getRange());
+    for (Entry<Key,Value> entry : scanner) {
+      Assert.fail(entry.getKey().getRow().toString());
+    }
+  }
+  
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/0ac59602/test/src/test/java/org/apache/accumulo/test/RecoveryCompactionsAreFlushesIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/RecoveryCompactionsAreFlushesIT.java b/test/src/test/java/org/apache/accumulo/test/RecoveryCompactionsAreFlushesIT.java
new file mode 100644
index 0000000..1a0bfa2
--- /dev/null
+++ b/test/src/test/java/org/apache/accumulo/test/RecoveryCompactionsAreFlushesIT.java
@@ -0,0 +1,91 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test;
+
+import java.util.Map.Entry;
+
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.minicluster.ServerType;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
+import org.apache.accumulo.minicluster.impl.ProcessReference;
+import org.apache.accumulo.test.functional.ConfigurableMacIT;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.RawLocalFileSystem;
+import org.junit.Assert;
+import org.junit.Test;
+
+public class Accumulo3010IT extends ConfigurableMacIT {
+
+  @Override
+  public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
+    cfg.setNumTservers(1);
+    cfg.setProperty(Property.INSTANCE_ZK_TIMEOUT, "5s");
+    // file system supports recovery
+    hadoopCoreSite.set("fs.file.impl", RawLocalFileSystem.class.getName());
+  }
+  
+  @Test(timeout = 60 * 1000)
+  public void test() throws Exception {
+    // create a table
+    String tableName = getUniqueNames(1)[0];
+    Connector c = getConnector();
+    c.tableOperations().create(tableName);
+    c.tableOperations().setProperty(tableName, Property.TABLE_MAJC_RATIO.getKey(), "100");
+    c.tableOperations().setProperty(tableName, Property.TABLE_FILE_MAX.getKey(), "3");
+    // create 3 flush files
+    BatchWriter bw = c.createBatchWriter(tableName, new BatchWriterConfig());
+    Mutation m = new Mutation("a");
+    m.put("b", "c", new Value("v".getBytes()));
+    for (int i = 0; i < 3; i++) {
+      bw.addMutation(m);
+      bw.flush();
+      c.tableOperations().flush(tableName, null, null, true);
+    }
+    // create an unsaved mutation
+    bw.addMutation(m);
+    bw.close();
+    // kill the tablet server
+    for (ProcessReference p : cluster.getProcesses().get(ServerType.TABLET_SERVER)) {
+      cluster.killProcess(ServerType.TABLET_SERVER, p);
+    }
+    // recover
+    cluster.start();
+    // ensure the table is readable
+    for (@SuppressWarnings("unused") Entry<Key,Value> entry : c.createScanner(tableName, Authorizations.EMPTY)) {
+    }
+    // ensure that the recovery was not a merging minor compaction
+    Scanner s = c.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
+    s.fetchColumnFamily(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME);
+    for (Entry<Key, Value> entry : s) {
+      String filename = entry.getKey().getColumnQualifier().toString();
+      String parts[] = filename.split("/");
+      Assert.assertFalse(parts[parts.length-1].startsWith("M"));
+    }
+  }
+  
+  
+}