You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@accumulo.apache.org by kt...@apache.org on 2018/08/07 17:23:42 UTC

[accumulo] branch master updated: Remove partially functional archive feature (#581)

This is an automated email from the ASF dual-hosted git repository.

kturner pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/accumulo.git


The following commit(s) were added to refs/heads/master by this push:
     new fd39a79  Remove partially functional archive feature (#581)
fd39a79 is described below

commit fd39a79bcb7a16cded100fed9ea11a4dea9f986d
Author: Keith Turner <ke...@deenlo.com>
AuthorDate: Tue Aug 7 13:23:40 2018 -0400

    Remove partially functional archive feature (#581)
---
 .../org/apache/accumulo/core/conf/Property.java    |   2 -
 .../apache/accumulo/server/ServerConstants.java    |   6 -
 .../apache/accumulo/gc/SimpleGarbageCollector.java |  82 +-----
 .../accumulo/gc/SimpleGarbageCollectorTest.java    |   7 +-
 .../apache/accumulo/master/tableOps/CleanUp.java   |  51 +---
 .../accumulo/test/BulkImportMonitoringIT.java      |   1 -
 .../org/apache/accumulo/test/FileArchiveIT.java    | 289 ---------------------
 .../apache/accumulo/test/GetFileInfoBulkIT.java    |   1 -
 .../test/functional/ConfigurableMacBase.java       |   1 -
 .../test/performance/RollWALPerformanceIT.java     |   1 -
 10 files changed, 12 insertions(+), 429 deletions(-)

diff --git a/core/src/main/java/org/apache/accumulo/core/conf/Property.java b/core/src/main/java/org/apache/accumulo/core/conf/Property.java
index dbdc3c8..a1754c4 100644
--- a/core/src/main/java/org/apache/accumulo/core/conf/Property.java
+++ b/core/src/main/java/org/apache/accumulo/core/conf/Property.java
@@ -582,8 +582,6 @@ public enum Property {
       "The number of threads used to delete RFiles and write-ahead logs"),
   GC_TRASH_IGNORE("gc.trash.ignore", "false", PropertyType.BOOLEAN,
       "Do not use the Trash, even if it is configured."),
-  GC_FILE_ARCHIVE("gc.file.archive", "false", PropertyType.BOOLEAN,
-      "Archive any files/directories instead of moving to the HDFS trash or deleting."),
   GC_TRACE_PERCENT("gc.trace.percent", "0.01", PropertyType.FRACTION,
       "Percent of gc cycles to trace"),
 
diff --git a/server/base/src/main/java/org/apache/accumulo/server/ServerConstants.java b/server/base/src/main/java/org/apache/accumulo/server/ServerConstants.java
index 2992af6..51a54f9 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/ServerConstants.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/ServerConstants.java
@@ -147,8 +147,6 @@ public class ServerConstants {
   public static final String TABLE_DIR = "tables";
   public static final String RECOVERY_DIR = "recovery";
   public static final String WAL_DIR = "wal";
-  public static final String WALOG_ARCHIVE_DIR = "walogArchive";
-  public static final String FILE_ARCHIVE_DIR = "fileArchive";
 
   public static String[] getTablesDirs() {
     return VolumeConfiguration.prefix(getBaseUris(), TABLE_DIR);
@@ -162,10 +160,6 @@ public class ServerConstants {
     return VolumeConfiguration.prefix(getBaseUris(), WAL_DIR);
   }
 
-  public static String[] getWalogArchives() {
-    return VolumeConfiguration.prefix(getBaseUris(), WALOG_ARCHIVE_DIR);
-  }
-
   public static Path getInstanceIdLocation(Volume v) {
     // all base dirs should have the same instance id, so can choose any one
     return v.prefixChild(INSTANCE_ID_DIR);
diff --git a/server/gc/src/main/java/org/apache/accumulo/gc/SimpleGarbageCollector.java b/server/gc/src/main/java/org/apache/accumulo/gc/SimpleGarbageCollector.java
index 18da383..aa059e8 100644
--- a/server/gc/src/main/java/org/apache/accumulo/gc/SimpleGarbageCollector.java
+++ b/server/gc/src/main/java/org/apache/accumulo/gc/SimpleGarbageCollector.java
@@ -76,7 +76,6 @@ import org.apache.accumulo.core.util.NamingThreadFactory;
 import org.apache.accumulo.core.util.Pair;
 import org.apache.accumulo.core.util.ServerServices;
 import org.apache.accumulo.core.util.ServerServices.Service;
-import org.apache.accumulo.core.volume.Volume;
 import org.apache.accumulo.core.zookeeper.ZooUtil;
 import org.apache.accumulo.fate.zookeeper.ZooLock.LockLossReason;
 import org.apache.accumulo.fate.zookeeper.ZooLock.LockWatcher;
@@ -224,15 +223,6 @@ public class SimpleGarbageCollector implements Iface {
     return getConfiguration().getCount(Property.GC_DELETE_THREADS);
   }
 
-  /**
-   * Should files be archived (as opposed to preserved in trash)
-   *
-   * @return True if files should be archived, false otherwise
-   */
-  boolean shouldArchiveFiles() {
-    return getConfiguration().getBoolean(Property.GC_FILE_ARCHIVE);
-  }
-
   private class GCEnv implements GarbageCollectionEnvironment {
 
     private String tableName;
@@ -391,7 +381,7 @@ public class SimpleGarbageCollector implements Iface {
 
               log.debug("Deleting {}", fullPath);
 
-              if (archiveOrMoveToTrash(fullPath) || fs.deleteRecursively(fullPath)) {
+              if (moveToTrash(fullPath) || fs.deleteRecursively(fullPath)) {
                 // delete succeeded, still want to delete
                 removeFlag = true;
                 synchronized (SimpleGarbageCollector.this) {
@@ -474,7 +464,7 @@ public class SimpleGarbageCollector implements Iface {
         if (tabletDirs.length == 0) {
           Path p = new Path(dir + "/" + tableID);
           log.debug("Removing table dir {}", p);
-          if (!archiveOrMoveToTrash(p))
+          if (!moveToTrash(p))
             fs.delete(p);
         }
       }
@@ -627,70 +617,14 @@ public class SimpleGarbageCollector implements Iface {
    * @throws IOException
    *           if the volume manager encountered a problem
    */
-  boolean archiveOrMoveToTrash(Path path) throws IOException {
-    if (shouldArchiveFiles()) {
-      return archiveFile(path);
-    } else {
-      if (!isUsingTrash())
-        return false;
-      try {
-        return fs.moveToTrash(path);
-      } catch (FileNotFoundException ex) {
-        return false;
-      }
-    }
-  }
-
-  /**
-   * Move a file, that would otherwise be deleted, to the archive directory for files
-   *
-   * @param fileToArchive
-   *          Path to file that is to be archived
-   * @return True if the file was successfully moved to the file archive directory, false otherwise
-   */
-  boolean archiveFile(Path fileToArchive) throws IOException {
-    // Figure out what the base path this volume uses on this FileSystem
-    Volume sourceVolume = fs.getVolumeByPath(fileToArchive);
-    String sourceVolumeBasePath = sourceVolume.getBasePath();
-
-    log.debug("Base path for volume: {}", sourceVolumeBasePath);
-
-    // Get the path for the file we want to archive
-    String sourcePathBasePath = fileToArchive.toUri().getPath();
-
-    // Strip off the common base path for the file to archive
-    String relativeVolumePath = sourcePathBasePath.substring(sourceVolumeBasePath.length());
-    if (Path.SEPARATOR_CHAR == relativeVolumePath.charAt(0)) {
-      if (relativeVolumePath.length() > 1) {
-        relativeVolumePath = relativeVolumePath.substring(1);
-      } else {
-        relativeVolumePath = "";
-      }
-    }
-
-    log.debug("Computed relative path for file to archive: {}", relativeVolumePath);
-
-    // The file archive path on this volume (we can't archive this file to a different volume)
-    Path archivePath = new Path(sourceVolumeBasePath, ServerConstants.FILE_ARCHIVE_DIR);
-
-    log.debug("File archive path: {}", archivePath);
-
-    fs.mkdirs(archivePath);
-
-    // Preserve the path beneath the Volume's base directory (e.g. tables/1/A_0000001.rf)
-    Path fileArchivePath = new Path(archivePath, relativeVolumePath);
-
-    log.debug("Create full path of {} from {} and {}", fileArchivePath, archivePath,
-        relativeVolumePath);
-
-    // Make sure that it doesn't already exist, something is wrong.
-    if (fs.exists(fileArchivePath)) {
-      log.warn("Tried to archive file, but it already exists: {}", fileArchivePath);
+  boolean moveToTrash(Path path) throws IOException {
+    if (!isUsingTrash())
+      return false;
+    try {
+      return fs.moveToTrash(path);
+    } catch (FileNotFoundException ex) {
       return false;
     }
-
-    log.debug("Moving {} to {}", fileToArchive, fileArchivePath);
-    return fs.rename(fileToArchive, fileArchivePath);
   }
 
   private void getZooLock(HostAndPort addr) throws KeeperException, InterruptedException {
diff --git a/server/gc/src/test/java/org/apache/accumulo/gc/SimpleGarbageCollectorTest.java b/server/gc/src/test/java/org/apache/accumulo/gc/SimpleGarbageCollectorTest.java
index 5d82004..15c8c9d 100644
--- a/server/gc/src/test/java/org/apache/accumulo/gc/SimpleGarbageCollectorTest.java
+++ b/server/gc/src/test/java/org/apache/accumulo/gc/SimpleGarbageCollectorTest.java
@@ -89,7 +89,6 @@ public class SimpleGarbageCollectorTest {
     conf.put(Property.GC_CYCLE_DELAY.getKey(), "20");
     conf.put(Property.GC_DELETE_THREADS.getKey(), "2");
     conf.put(Property.GC_TRASH_IGNORE.getKey(), "false");
-    conf.put(Property.GC_FILE_ARCHIVE.getKey(), "false");
 
     return new ConfigurationCopy(conf);
   }
@@ -108,7 +107,7 @@ public class SimpleGarbageCollectorTest {
     Path path = createMock(Path.class);
     expect(volMgr.moveToTrash(path)).andReturn(true);
     replay(volMgr);
-    assertTrue(gc.archiveOrMoveToTrash(path));
+    assertTrue(gc.moveToTrash(path));
     verify(volMgr);
   }
 
@@ -117,7 +116,7 @@ public class SimpleGarbageCollectorTest {
     Path path = createMock(Path.class);
     expect(volMgr.moveToTrash(path)).andThrow(new FileNotFoundException());
     replay(volMgr);
-    assertFalse(gc.archiveOrMoveToTrash(path));
+    assertFalse(gc.moveToTrash(path));
     verify(volMgr);
   }
 
@@ -125,7 +124,7 @@ public class SimpleGarbageCollectorTest {
   public void testMoveToTrash_NotUsingTrash() throws Exception {
     systemConfig.set(Property.GC_TRASH_IGNORE.getKey(), "true");
     Path path = createMock(Path.class);
-    assertFalse(gc.archiveOrMoveToTrash(path));
+    assertFalse(gc.moveToTrash(path));
   }
 
   @Test
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CleanUp.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CleanUp.java
index 5e13c15..81cf7b4 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CleanUp.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CleanUp.java
@@ -29,8 +29,6 @@ import org.apache.accumulo.core.client.impl.Namespace;
 import org.apache.accumulo.core.client.impl.Table;
 import org.apache.accumulo.core.client.impl.Tables;
 import org.apache.accumulo.core.client.impl.thrift.ThriftSecurityException;
-import org.apache.accumulo.core.conf.AccumuloConfiguration;
-import org.apache.accumulo.core.conf.Property;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
@@ -40,7 +38,6 @@ import org.apache.accumulo.core.metadata.MetadataTable;
 import org.apache.accumulo.core.metadata.schema.MetadataSchema;
 import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
 import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.volume.Volume;
 import org.apache.accumulo.fate.Repo;
 import org.apache.accumulo.master.Master;
 import org.apache.accumulo.server.ServerConstants;
@@ -171,18 +168,11 @@ class CleanUp extends MasterRepo {
     }
 
     if (refCount == 0) {
-      final AccumuloConfiguration conf = master.getConfiguration();
-      boolean archiveFiles = conf.getBoolean(Property.GC_FILE_ARCHIVE);
-
       // delete the map files
       try {
         VolumeManager fs = master.getFileSystem();
         for (String dir : ServerConstants.getTablesDirs()) {
-          if (archiveFiles) {
-            archiveFile(fs, dir, tableId);
-          } else {
-            fs.deleteRecursively(new Path(dir, tableId.canonicalID()));
-          }
+          fs.deleteRecursively(new Path(dir, tableId.canonicalID()));
         }
       } catch (IOException e) {
         log.error("Unable to remove deleted table directory", e);
@@ -220,45 +210,6 @@ class CleanUp extends MasterRepo {
     return null;
   }
 
-  protected void archiveFile(VolumeManager fs, String dir, Table.ID tableId) throws IOException {
-    Path tableDirectory = new Path(dir, tableId.canonicalID());
-    Volume v = fs.getVolumeByPath(tableDirectory);
-    String basePath = v.getBasePath();
-
-    // Path component of URI
-    String tableDirPath = tableDirectory.toUri().getPath();
-
-    // Just the suffix of the path (after the Volume's base path)
-    String tableDirSuffix = tableDirPath.substring(basePath.length());
-
-    // Remove a leading path separator char because Path will treat the "child" as an absolute path
-    // with it
-    if (Path.SEPARATOR_CHAR == tableDirSuffix.charAt(0)) {
-      if (tableDirSuffix.length() > 1) {
-        tableDirSuffix = tableDirSuffix.substring(1);
-      } else {
-        tableDirSuffix = "";
-      }
-    }
-
-    // Get the file archive directory on this volume
-    final Path fileArchiveDir = new Path(basePath, ServerConstants.FILE_ARCHIVE_DIR);
-
-    // Make sure it exists just to be safe
-    fs.mkdirs(fileArchiveDir);
-
-    // The destination to archive this table to
-    final Path destTableDir = new Path(fileArchiveDir, tableDirSuffix);
-
-    log.debug("Archiving " + tableDirectory + " to " + tableDirectory);
-
-    if (fs.exists(destTableDir)) {
-      merge(fs, tableDirectory, destTableDir);
-    } else {
-      fs.rename(tableDirectory, destTableDir);
-    }
-  }
-
   protected void merge(VolumeManager fs, Path src, Path dest) throws IOException {
     for (FileStatus child : fs.listStatus(src)) {
       final String childName = child.getPath().getName();
diff --git a/test/src/main/java/org/apache/accumulo/test/BulkImportMonitoringIT.java b/test/src/main/java/org/apache/accumulo/test/BulkImportMonitoringIT.java
index 7b78bc9..4a59bf4 100644
--- a/test/src/main/java/org/apache/accumulo/test/BulkImportMonitoringIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/BulkImportMonitoringIT.java
@@ -54,7 +54,6 @@ public class BulkImportMonitoringIT extends ConfigurableMacBase {
   protected void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
     cfg.setNumTservers(1);
     cfg.useMiniDFS(true);
-    cfg.setProperty(Property.GC_FILE_ARCHIVE, "false");
   }
 
   @Test
diff --git a/test/src/main/java/org/apache/accumulo/test/FileArchiveIT.java b/test/src/main/java/org/apache/accumulo/test/FileArchiveIT.java
deleted file mode 100644
index 5935d5f..0000000
--- a/test/src/main/java/org/apache/accumulo/test/FileArchiveIT.java
+++ /dev/null
@@ -1,289 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test;
-
-import java.util.Map.Entry;
-
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.impl.Table;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.accumulo.server.ServerConstants;
-import org.apache.accumulo.test.functional.ConfigurableMacBase;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.junit.Assert;
-import org.junit.Test;
-
-import com.google.common.collect.Iterables;
-
-/**
- * Tests that files are archived instead of deleted when configured.
- */
-public class FileArchiveIT extends ConfigurableMacBase {
-
-  @Override
-  public int defaultTimeoutSeconds() {
-    return 2 * 60;
-  }
-
-  @Override
-  public void configure(MiniAccumuloConfigImpl cfg, Configuration coreSite) {
-    cfg.setProperty(Property.GC_FILE_ARCHIVE, "true");
-    cfg.setProperty(Property.GC_CYCLE_DELAY, "1s");
-    cfg.setProperty(Property.GC_CYCLE_START, "1s");
-  }
-
-  @Test
-  public void testUnusuedFilesAreArchived() throws Exception {
-    final Connector conn = getConnector();
-    final String tableName = getUniqueNames(1)[0];
-
-    conn.tableOperations().create(tableName);
-
-    final Table.ID tableId = Table.ID.of(conn.tableOperations().tableIdMap().get(tableName));
-    Assert.assertNotNull("Could not get table ID", tableId);
-
-    BatchWriter bw = conn.createBatchWriter(tableName, new BatchWriterConfig());
-    Mutation m = new Mutation("row");
-    m.put("", "", "value");
-    bw.addMutation(m);
-    bw.close();
-
-    // Compact memory to disk
-    conn.tableOperations().compact(tableName, null, null, true, true);
-
-    try (Scanner s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
-      s.setRange(MetadataSchema.TabletsSection.getRange(tableId));
-      s.fetchColumnFamily(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME);
-
-      Entry<Key,Value> entry = Iterables.getOnlyElement(s);
-      final String file = entry.getKey().getColumnQualifier().toString();
-      final Path p = new Path(file);
-
-      // Then force another to make an unreferenced file
-      conn.tableOperations().compact(tableName, null, null, true, true);
-
-      log.info("File for table: {}", file);
-
-      FileSystem fs = getCluster().getFileSystem();
-      int i = 0;
-      while (fs.exists(p)) {
-        i++;
-        Thread.sleep(1000);
-        if (0 == i % 10) {
-          log.info("Waited {} iterations, file still exists", i);
-        }
-      }
-
-      log.info("File was removed");
-
-      String filePath = p.toUri().getPath()
-          .substring(getCluster().getConfig().getAccumuloDir().toString().length());
-
-      log.info("File relative to accumulo dir: {}", filePath);
-
-      Path fileArchiveDir = new Path(getCluster().getConfig().getAccumuloDir().toString(),
-          ServerConstants.FILE_ARCHIVE_DIR);
-
-      Assert.assertTrue("File archive directory didn't exist", fs.exists(fileArchiveDir));
-
-      // Remove the leading '/' to make sure Path treats the 2nd arg as a child.
-      Path archivedFile = new Path(fileArchiveDir, filePath.substring(1));
-
-      Assert.assertTrue("File doesn't exists in archive directory: " + archivedFile,
-          fs.exists(archivedFile));
-    }
-  }
-
-  @Test
-  public void testDeletedTableIsArchived() throws Exception {
-    final Connector conn = getConnector();
-    final String tableName = getUniqueNames(1)[0];
-
-    conn.tableOperations().create(tableName);
-
-    final Table.ID tableId = Table.ID.of(conn.tableOperations().tableIdMap().get(tableName));
-    Assert.assertNotNull("Could not get table ID", tableId);
-
-    BatchWriter bw = conn.createBatchWriter(tableName, new BatchWriterConfig());
-    Mutation m = new Mutation("row");
-    m.put("", "", "value");
-    bw.addMutation(m);
-    bw.close();
-
-    // Compact memory to disk
-    conn.tableOperations().compact(tableName, null, null, true, true);
-
-    try (Scanner s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
-      s.setRange(MetadataSchema.TabletsSection.getRange(tableId));
-      s.fetchColumnFamily(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME);
-
-      Entry<Key,Value> entry = Iterables.getOnlyElement(s);
-      final String file = entry.getKey().getColumnQualifier().toString();
-      final Path p = new Path(file);
-
-      conn.tableOperations().delete(tableName);
-
-      log.info("File for table: {}", file);
-
-      FileSystem fs = getCluster().getFileSystem();
-      int i = 0;
-      while (fs.exists(p)) {
-        i++;
-        Thread.sleep(1000);
-        if (0 == i % 10) {
-          log.info("Waited {} iterations, file still exists", i);
-        }
-      }
-
-      log.info("File was removed");
-
-      String filePath = p.toUri().getPath()
-          .substring(getCluster().getConfig().getAccumuloDir().toString().length());
-
-      log.info("File relative to accumulo dir: {}", filePath);
-
-      Path fileArchiveDir = new Path(getCluster().getConfig().getAccumuloDir().toString(),
-          ServerConstants.FILE_ARCHIVE_DIR);
-
-      Assert.assertTrue("File archive directory didn't exist", fs.exists(fileArchiveDir));
-
-      // Remove the leading '/' to make sure Path treats the 2nd arg as a child.
-      Path archivedFile = new Path(fileArchiveDir, filePath.substring(1));
-
-      Assert.assertTrue("File doesn't exists in archive directory: " + archivedFile,
-          fs.exists(archivedFile));
-    }
-  }
-
-  @Test
-  public void testUnusuedFilesAndDeletedTable() throws Exception {
-    final Connector conn = getConnector();
-    final String tableName = getUniqueNames(1)[0];
-
-    conn.tableOperations().create(tableName);
-
-    final Table.ID tableId = Table.ID.of(conn.tableOperations().tableIdMap().get(tableName));
-    Assert.assertNotNull("Could not get table ID", tableId);
-
-    BatchWriter bw = conn.createBatchWriter(tableName, new BatchWriterConfig());
-    Mutation m = new Mutation("row");
-    m.put("", "", "value");
-    bw.addMutation(m);
-    bw.close();
-
-    // Compact memory to disk
-    conn.tableOperations().compact(tableName, null, null, true, true);
-
-    Entry<Key,Value> entry;
-    Path fileArchiveDir;
-    FileSystem fs;
-    int i = 0;
-    try (Scanner s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
-      s.setRange(MetadataSchema.TabletsSection.getRange(tableId));
-      s.fetchColumnFamily(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME);
-
-      entry = Iterables.getOnlyElement(s);
-      final String file = entry.getKey().getColumnQualifier().toString();
-      final Path p = new Path(file);
-
-      // Then force another to make an unreferenced file
-      conn.tableOperations().compact(tableName, null, null, true, true);
-
-      log.info("File for table: {}", file);
-
-      fs = getCluster().getFileSystem();
-      while (fs.exists(p)) {
-        i++;
-        Thread.sleep(1000);
-        if (0 == i % 10) {
-          log.info("Waited {} iterations, file still exists", i);
-        }
-      }
-
-      log.info("File was removed");
-
-      String filePath = p.toUri().getPath()
-          .substring(getCluster().getConfig().getAccumuloDir().toString().length());
-
-      log.info("File relative to accumulo dir: {}", filePath);
-
-      fileArchiveDir = new Path(getCluster().getConfig().getAccumuloDir().toString(),
-          ServerConstants.FILE_ARCHIVE_DIR);
-
-      Assert.assertTrue("File archive directory didn't exist", fs.exists(fileArchiveDir));
-
-      // Remove the leading '/' to make sure Path treats the 2nd arg as a child.
-      Path archivedFile = new Path(fileArchiveDir, filePath.substring(1));
-
-      Assert.assertTrue("File doesn't exists in archive directory: " + archivedFile,
-          fs.exists(archivedFile));
-
-      // Offline the table so we can be sure there is a single file
-      conn.tableOperations().offline(tableName, true);
-    }
-
-    // See that the file in metadata currently is
-    try (Scanner s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
-      s.setRange(MetadataSchema.TabletsSection.getRange(tableId));
-      s.fetchColumnFamily(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME);
-
-      entry = Iterables.getOnlyElement(s);
-      final String finalFile = entry.getKey().getColumnQualifier().toString();
-      final Path finalPath = new Path(finalFile);
-
-      conn.tableOperations().delete(tableName);
-
-      log.info("File for table: {}", finalPath);
-
-      i = 0;
-      while (fs.exists(finalPath)) {
-        i++;
-        Thread.sleep(1000);
-        if (0 == i % 10) {
-          log.info("Waited {} iterations, file still exists", i);
-        }
-      }
-
-      log.info("File was removed");
-
-      String finalFilePath = finalPath.toUri().getPath()
-          .substring(getCluster().getConfig().getAccumuloDir().toString().length());
-
-      log.info("File relative to accumulo dir: {}", finalFilePath);
-
-      Assert.assertTrue("File archive directory didn't exist", fs.exists(fileArchiveDir));
-
-      // Remove the leading '/' to make sure Path treats the 2nd arg as a child.
-      Path finalArchivedFile = new Path(fileArchiveDir, finalFilePath.substring(1));
-
-      Assert.assertTrue("File doesn't exists in archive directory: " + finalArchivedFile,
-          fs.exists(finalArchivedFile));
-    }
-  }
-}
diff --git a/test/src/main/java/org/apache/accumulo/test/GetFileInfoBulkIT.java b/test/src/main/java/org/apache/accumulo/test/GetFileInfoBulkIT.java
index 0be588f..1363646 100644
--- a/test/src/main/java/org/apache/accumulo/test/GetFileInfoBulkIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/GetFileInfoBulkIT.java
@@ -61,7 +61,6 @@ public class GetFileInfoBulkIT extends ConfigurableMacBase {
   protected void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
     cfg.setNumTservers(1);
     cfg.useMiniDFS(true);
-    cfg.setProperty(Property.GC_FILE_ARCHIVE, "false");
   }
 
   @SuppressWarnings("unchecked")
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/ConfigurableMacBase.java b/test/src/main/java/org/apache/accumulo/test/functional/ConfigurableMacBase.java
index 0fdb0c2..504eade 100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/ConfigurableMacBase.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/ConfigurableMacBase.java
@@ -154,7 +154,6 @@ public class ConfigurableMacBase extends AccumuloITBase {
     String nativePathInDevTree = NativeMapIT.nativeMapLocation().getAbsolutePath();
     String nativePathInMapReduce = new File(System.getProperty("user.dir")).toString();
     cfg.setNativeLibPaths(nativePathInDevTree, nativePathInMapReduce);
-    cfg.setProperty(Property.GC_FILE_ARCHIVE, Boolean.TRUE.toString());
     Configuration coreSite = new Configuration(false);
     cfg.setProperty(Property.TSERV_NATIVEMAP_ENABLED, Boolean.TRUE.toString());
     configure(cfg, coreSite);
diff --git a/test/src/main/java/org/apache/accumulo/test/performance/RollWALPerformanceIT.java b/test/src/main/java/org/apache/accumulo/test/performance/RollWALPerformanceIT.java
index 3a5cfa2..2aada43 100644
--- a/test/src/main/java/org/apache/accumulo/test/performance/RollWALPerformanceIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/performance/RollWALPerformanceIT.java
@@ -51,7 +51,6 @@ public class RollWALPerformanceIT extends ConfigurableMacBase {
     cfg.setProperty(Property.TSERV_WAL_REPLICATION, "1");
     cfg.setProperty(Property.TSERV_WALOG_MAX_SIZE, "5M");
     cfg.setProperty(Property.TABLE_MINC_LOGS_MAX, "100");
-    cfg.setProperty(Property.GC_FILE_ARCHIVE, "false");
     cfg.setProperty(Property.GC_CYCLE_START, "1s");
     cfg.setProperty(Property.GC_CYCLE_DELAY, "1s");
     cfg.useMiniDFS(true);