You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@accumulo.apache.org by ct...@apache.org on 2015/04/29 03:03:38 UTC

[1/9] accumulo git commit: ACCUMULO-3759 Fix Java 8 compiler warnings

Repository: accumulo
Updated Branches:
  refs/heads/1.7 f99638795 -> 6e2e6780f
  refs/heads/master 26d66914a -> a7de08b7c


http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/server/master/src/main/java/org/apache/accumulo/master/tableOps/NamespaceInfo.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/NamespaceInfo.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/NamespaceInfo.java
new file mode 100644
index 0000000..ef2becd
--- /dev/null
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/NamespaceInfo.java
@@ -0,0 +1,31 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.master.tableOps;
+
+import java.io.Serializable;
+import java.util.Map;
+
+class NamespaceInfo implements Serializable {
+
+  private static final long serialVersionUID = 1L;
+
+  String namespaceName;
+  String namespaceId;
+  String user;
+
+  public Map<String,String> props;
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/server/master/src/main/java/org/apache/accumulo/master/tableOps/PopulateMetadata.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/PopulateMetadata.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/PopulateMetadata.java
new file mode 100644
index 0000000..da13ecc
--- /dev/null
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/PopulateMetadata.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.master.tableOps;
+
+import org.apache.accumulo.core.data.impl.KeyExtent;
+import org.apache.accumulo.fate.Repo;
+import org.apache.accumulo.master.Master;
+import org.apache.accumulo.server.util.MetadataTableUtil;
+import org.apache.hadoop.io.Text;
+
+class PopulateMetadata extends MasterRepo {
+
+  private static final long serialVersionUID = 1L;
+
+  private TableInfo tableInfo;
+
+  PopulateMetadata(TableInfo ti) {
+    this.tableInfo = ti;
+  }
+
+  @Override
+  public long isReady(long tid, Master environment) throws Exception {
+    return 0;
+  }
+
+  @Override
+  public Repo<Master> call(long tid, Master environment) throws Exception {
+    KeyExtent extent = new KeyExtent(new Text(tableInfo.tableId), null, null);
+    MetadataTableUtil.addTablet(extent, tableInfo.dir, environment, tableInfo.timeType, environment.getMasterLock());
+
+    return new FinishCreateTable(tableInfo);
+
+  }
+
+  @Override
+  public void undo(long tid, Master environment) throws Exception {
+    MetadataTableUtil.deleteTable(tableInfo.tableId, false, environment, environment.getMasterLock());
+  }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/server/master/src/main/java/org/apache/accumulo/master/tableOps/PopulateMetadataTable.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/PopulateMetadataTable.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/PopulateMetadataTable.java
new file mode 100644
index 0000000..72832ba
--- /dev/null
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/PopulateMetadataTable.java
@@ -0,0 +1,217 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.master.tableOps;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+
+import java.io.BufferedInputStream;
+import java.io.BufferedReader;
+import java.io.DataInputStream;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.zip.ZipEntry;
+import java.util.zip.ZipInputStream;
+
+import org.apache.accumulo.core.Constants;
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.impl.thrift.TableOperation;
+import org.apache.accumulo.core.client.impl.thrift.TableOperationExceptionType;
+import org.apache.accumulo.core.client.impl.thrift.ThriftTableOperationException;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.data.impl.KeyExtent;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
+import org.apache.accumulo.core.util.FastFormat;
+import org.apache.accumulo.fate.Repo;
+import org.apache.accumulo.master.Master;
+import org.apache.accumulo.server.ServerConstants;
+import org.apache.accumulo.server.fs.VolumeManager;
+import org.apache.accumulo.server.util.MetadataTableUtil;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.Text;
+
+import com.google.common.base.Optional;
+
+class PopulateMetadataTable extends MasterRepo {
+
+  private static final long serialVersionUID = 1L;
+
+  private ImportedTableInfo tableInfo;
+
+  PopulateMetadataTable(ImportedTableInfo ti) {
+    this.tableInfo = ti;
+  }
+
+  static Map<String,String> readMappingFile(VolumeManager fs, ImportedTableInfo tableInfo) throws Exception {
+    BufferedReader in = new BufferedReader(new InputStreamReader(fs.open(new Path(tableInfo.importDir, "mappings.txt")), UTF_8));
+
+    try {
+      Map<String,String> map = new HashMap<String,String>();
+
+      String line = null;
+      while ((line = in.readLine()) != null) {
+        String sa[] = line.split(":", 2);
+        map.put(sa[0], sa[1]);
+      }
+
+      return map;
+    } finally {
+      in.close();
+    }
+
+  }
+
+  @Override
+  public Repo<Master> call(long tid, Master master) throws Exception {
+
+    Path path = new Path(tableInfo.exportDir, Constants.EXPORT_FILE);
+
+    BatchWriter mbw = null;
+    ZipInputStream zis = null;
+
+    try {
+      VolumeManager fs = master.getFileSystem();
+
+      mbw = master.getConnector().createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
+
+      zis = new ZipInputStream(fs.open(path));
+
+      Map<String,String> fileNameMappings = readMappingFile(fs, tableInfo);
+
+      log.info("importDir is " + tableInfo.importDir);
+
+      // This is a directory already prefixed with proper volume information e.g. hdfs://localhost:8020/path/to/accumulo/tables/...
+      final String bulkDir = tableInfo.importDir;
+
+      final String[] tableDirs = ServerConstants.getTablesDirs();
+
+      ZipEntry zipEntry;
+      while ((zipEntry = zis.getNextEntry()) != null) {
+        if (zipEntry.getName().equals(Constants.EXPORT_METADATA_FILE)) {
+          DataInputStream in = new DataInputStream(new BufferedInputStream(zis));
+
+          Key key = new Key();
+          Value val = new Value();
+
+          Mutation m = null;
+          Text currentRow = null;
+          int dirCount = 0;
+
+          while (true) {
+            key.readFields(in);
+            val.readFields(in);
+
+            Text endRow = new KeyExtent(key.getRow(), (Text) null).getEndRow();
+            Text metadataRow = new KeyExtent(new Text(tableInfo.tableId), endRow, null).getMetadataEntry();
+
+            Text cq;
+
+            if (key.getColumnFamily().equals(DataFileColumnFamily.NAME)) {
+              String oldName = new Path(key.getColumnQualifier().toString()).getName();
+              String newName = fileNameMappings.get(oldName);
+
+              if (newName == null) {
+                throw new ThriftTableOperationException(tableInfo.tableId, tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER,
+                    "File " + oldName + " does not exist in import dir");
+              }
+
+              cq = new Text(bulkDir + "/" + newName);
+            } else {
+              cq = key.getColumnQualifier();
+            }
+
+            if (m == null) {
+              // Make a unique directory inside the table's dir. Cannot import multiple tables into one table, so don't need to use unique allocator
+              String tabletDir = new String(FastFormat.toZeroPaddedString(dirCount++, 8, 16, Constants.CLONE_PREFIX_BYTES), UTF_8);
+
+              // Build up a full hdfs://localhost:8020/accumulo/tables/$id/c-XXXXXXX
+              String absolutePath = getClonedTabletDir(master, tableDirs, tabletDir);
+
+              m = new Mutation(metadataRow);
+              TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.put(m, new Value(absolutePath.getBytes(UTF_8)));
+              currentRow = metadataRow;
+            }
+
+            if (!currentRow.equals(metadataRow)) {
+              mbw.addMutation(m);
+
+              // Make a unique directory inside the table's dir. Cannot import multiple tables into one table, so don't need to use unique allocator
+              String tabletDir = new String(FastFormat.toZeroPaddedString(dirCount++, 8, 16, Constants.CLONE_PREFIX_BYTES), UTF_8);
+
+              // Build up a full hdfs://localhost:8020/accumulo/tables/$id/c-XXXXXXX
+              String absolutePath = getClonedTabletDir(master, tableDirs, tabletDir);
+
+              m = new Mutation(metadataRow);
+              TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.put(m, new Value(absolutePath.getBytes(UTF_8)));
+            }
+
+            m.put(key.getColumnFamily(), cq, val);
+
+            if (endRow == null && TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.hasColumns(key)) {
+              mbw.addMutation(m);
+              break; // its the last column in the last row
+            }
+          }
+
+          break;
+        }
+      }
+
+      return new MoveExportedFiles(tableInfo);
+    } catch (IOException ioe) {
+      log.warn("{}", ioe.getMessage(), ioe);
+      throw new ThriftTableOperationException(tableInfo.tableId, tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER,
+          "Error reading " + path + " " + ioe.getMessage());
+    } finally {
+      if (zis != null) {
+        try {
+          zis.close();
+        } catch (IOException ioe) {
+          log.warn("Failed to close zip file ", ioe);
+        }
+      }
+
+      if (mbw != null) {
+        mbw.close();
+      }
+    }
+  }
+
+  /**
+   * Given options for tables (across multiple volumes), construct an absolute path using the unique name within the chosen volume
+   *
+   * @return An absolute, unique path for the imported table
+   */
+  protected String getClonedTabletDir(Master master, String[] tableDirs, String tabletDir) {
+    // We can try to spread out the tablet dirs across all volumes
+    String tableDir = master.getFileSystem().choose(Optional.of(tableInfo.tableId), tableDirs);
+
+    // Build up a full hdfs://localhost:8020/accumulo/tables/$id/c-XXXXXXX
+    return tableDir + "/" + tableInfo.tableId + "/" + tabletDir;
+  }
+
+  @Override
+  public void undo(long tid, Master environment) throws Exception {
+    MetadataTableUtil.deleteTable(tableInfo.tableId, false, environment, environment.getMasterLock());
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/server/master/src/main/java/org/apache/accumulo/master/tableOps/PopulateZookeeper.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/PopulateZookeeper.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/PopulateZookeeper.java
new file mode 100644
index 0000000..8ec8834
--- /dev/null
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/PopulateZookeeper.java
@@ -0,0 +1,77 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.master.tableOps;
+
+import java.util.Map.Entry;
+
+import org.apache.accumulo.core.client.Instance;
+import org.apache.accumulo.core.client.impl.Tables;
+import org.apache.accumulo.core.client.impl.thrift.TableOperation;
+import org.apache.accumulo.fate.Repo;
+import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeExistsPolicy;
+import org.apache.accumulo.master.Master;
+import org.apache.accumulo.server.tables.TableManager;
+import org.apache.accumulo.server.util.TablePropUtil;
+
+class PopulateZookeeper extends MasterRepo {
+
+  private static final long serialVersionUID = 1L;
+
+  private TableInfo tableInfo;
+
+  PopulateZookeeper(TableInfo ti) {
+    this.tableInfo = ti;
+  }
+
+  @Override
+  public long isReady(long tid, Master environment) throws Exception {
+    return Utils.reserveTable(tableInfo.tableId, tid, true, false, TableOperation.CREATE);
+  }
+
+  @Override
+  public Repo<Master> call(long tid, Master master) throws Exception {
+    // reserve the table name in zookeeper or fail
+
+    Utils.tableNameLock.lock();
+    try {
+      // write tableName & tableId to zookeeper
+      Instance instance = master.getInstance();
+
+      Utils.checkTableDoesNotExist(instance, tableInfo.tableName, tableInfo.tableId, TableOperation.CREATE);
+
+      TableManager.getInstance().addTable(tableInfo.tableId, tableInfo.namespaceId, tableInfo.tableName, NodeExistsPolicy.OVERWRITE);
+
+      for (Entry<String,String> entry : tableInfo.props.entrySet())
+        TablePropUtil.setTableProperty(tableInfo.tableId, entry.getKey(), entry.getValue());
+
+      Tables.clearCache(instance);
+      return new ChooseDir(tableInfo);
+    } finally {
+      Utils.tableNameLock.unlock();
+    }
+
+  }
+
+  @Override
+  public void undo(long tid, Master master) throws Exception {
+    Instance instance = master.getInstance();
+    TableManager.getInstance().removeTable(tableInfo.tableId);
+    Utils.unreserveTable(tableInfo.tableId, tid, true);
+    Tables.clearCache(instance);
+  }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/server/master/src/main/java/org/apache/accumulo/master/tableOps/PopulateZookeeperWithNamespace.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/PopulateZookeeperWithNamespace.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/PopulateZookeeperWithNamespace.java
new file mode 100644
index 0000000..bf101ae
--- /dev/null
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/PopulateZookeeperWithNamespace.java
@@ -0,0 +1,74 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.master.tableOps;
+
+import java.util.Map.Entry;
+
+import org.apache.accumulo.core.client.Instance;
+import org.apache.accumulo.core.client.impl.Tables;
+import org.apache.accumulo.core.client.impl.thrift.TableOperation;
+import org.apache.accumulo.fate.Repo;
+import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeExistsPolicy;
+import org.apache.accumulo.master.Master;
+import org.apache.accumulo.server.tables.TableManager;
+import org.apache.accumulo.server.util.NamespacePropUtil;
+
+class PopulateZookeeperWithNamespace extends MasterRepo {
+
+  private static final long serialVersionUID = 1L;
+
+  private NamespaceInfo namespaceInfo;
+
+  PopulateZookeeperWithNamespace(NamespaceInfo ti) {
+    this.namespaceInfo = ti;
+  }
+
+  @Override
+  public long isReady(long id, Master environment) throws Exception {
+    return Utils.reserveNamespace(namespaceInfo.namespaceId, id, true, false, TableOperation.CREATE);
+  }
+
+  @Override
+  public Repo<Master> call(long tid, Master master) throws Exception {
+
+    Utils.tableNameLock.lock();
+    try {
+      Instance instance = master.getInstance();
+
+      Utils.checkNamespaceDoesNotExist(instance, namespaceInfo.namespaceName, namespaceInfo.namespaceId, TableOperation.CREATE);
+
+      TableManager.prepareNewNamespaceState(instance.getInstanceID(), namespaceInfo.namespaceId, namespaceInfo.namespaceName, NodeExistsPolicy.OVERWRITE);
+
+      for (Entry<String,String> entry : namespaceInfo.props.entrySet())
+        NamespacePropUtil.setNamespaceProperty(namespaceInfo.namespaceId, entry.getKey(), entry.getValue());
+
+      Tables.clearCache(instance);
+
+      return new FinishCreateNamespace(namespaceInfo);
+    } finally {
+      Utils.tableNameLock.unlock();
+    }
+  }
+
+  @Override
+  public void undo(long tid, Master master) throws Exception {
+    TableManager.getInstance().removeNamespace(namespaceInfo.namespaceId);
+    Tables.clearCache(master.getInstance());
+    Utils.unreserveNamespace(namespaceInfo.namespaceId, tid, true);
+  }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/server/master/src/main/java/org/apache/accumulo/master/tableOps/SetupNamespacePermissions.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/SetupNamespacePermissions.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/SetupNamespacePermissions.java
new file mode 100644
index 0000000..ace3935
--- /dev/null
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/SetupNamespacePermissions.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.master.tableOps;
+
+import org.apache.accumulo.core.client.impl.thrift.ThriftSecurityException;
+import org.apache.accumulo.core.security.NamespacePermission;
+import org.apache.accumulo.fate.Repo;
+import org.apache.accumulo.master.Master;
+import org.apache.accumulo.server.security.AuditedSecurityOperation;
+import org.apache.accumulo.server.security.SecurityOperation;
+import org.slf4j.LoggerFactory;
+
+class SetupNamespacePermissions extends MasterRepo {
+
+  private static final long serialVersionUID = 1L;
+
+  private NamespaceInfo namespaceInfo;
+
+  public SetupNamespacePermissions(NamespaceInfo ti) {
+    this.namespaceInfo = ti;
+  }
+
+  @Override
+  public Repo<Master> call(long tid, Master env) throws Exception {
+    // give all namespace permissions to the creator
+    SecurityOperation security = AuditedSecurityOperation.getInstance(env);
+    for (NamespacePermission permission : NamespacePermission.values()) {
+      try {
+        security.grantNamespacePermission(env.rpcCreds(), namespaceInfo.user, namespaceInfo.namespaceId, permission);
+      } catch (ThriftSecurityException e) {
+        LoggerFactory.getLogger(FinishCreateNamespace.class).error("{}", e.getMessage(), e);
+        throw e;
+      }
+    }
+
+    // setup permissions in zookeeper before table info in zookeeper
+    // this way concurrent users will not get a spurious permission denied
+    // error
+    return new PopulateZookeeperWithNamespace(namespaceInfo);
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/server/master/src/main/java/org/apache/accumulo/master/tableOps/SetupPermissions.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/SetupPermissions.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/SetupPermissions.java
new file mode 100644
index 0000000..fd3b7da
--- /dev/null
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/SetupPermissions.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.master.tableOps;
+
+import org.apache.accumulo.core.client.impl.thrift.ThriftSecurityException;
+import org.apache.accumulo.core.security.TablePermission;
+import org.apache.accumulo.fate.Repo;
+import org.apache.accumulo.master.Master;
+import org.apache.accumulo.server.security.AuditedSecurityOperation;
+import org.apache.accumulo.server.security.SecurityOperation;
+import org.slf4j.LoggerFactory;
+
+class SetupPermissions extends MasterRepo {
+
+  private static final long serialVersionUID = 1L;
+
+  private TableInfo tableInfo;
+
+  public SetupPermissions(TableInfo ti) {
+    this.tableInfo = ti;
+  }
+
+  @Override
+  public Repo<Master> call(long tid, Master env) throws Exception {
+    // give all table permissions to the creator
+    SecurityOperation security = AuditedSecurityOperation.getInstance(env);
+    if (!tableInfo.user.equals(env.getCredentials().getPrincipal())) {
+      for (TablePermission permission : TablePermission.values()) {
+        try {
+          security.grantTablePermission(env.rpcCreds(), tableInfo.user, tableInfo.tableId, permission, tableInfo.namespaceId);
+        } catch (ThriftSecurityException e) {
+          LoggerFactory.getLogger(FinishCreateTable.class).error("{}", e.getMessage(), e);
+          throw e;
+        }
+      }
+    }
+
+    // setup permissions in zookeeper before table info in zookeeper
+    // this way concurrent users will not get a spurious permission denied
+    // error
+    return new PopulateZookeeper(tableInfo);
+  }
+
+  @Override
+  public void undo(long tid, Master env) throws Exception {
+    AuditedSecurityOperation.getInstance(env).deleteTable(env.rpcCreds(), tableInfo.tableId, tableInfo.namespaceId);
+  }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/server/master/src/main/java/org/apache/accumulo/master/tableOps/TableInfo.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/TableInfo.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/TableInfo.java
new file mode 100644
index 0000000..e2057d1
--- /dev/null
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/TableInfo.java
@@ -0,0 +1,35 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.master.tableOps;
+
+import java.io.Serializable;
+import java.util.Map;
+
+class TableInfo implements Serializable {
+
+  private static final long serialVersionUID = 1L;
+
+  String tableName;
+  String tableId;
+  String namespaceId;
+  char timeType;
+  String user;
+
+  public Map<String,String> props;
+
+  public String dir = null;
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/server/master/src/main/java/org/apache/accumulo/master/tableOps/TableRangeOp.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/TableRangeOp.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/TableRangeOp.java
index a9a923b..1d8b116 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/TableRangeOp.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/TableRangeOp.java
@@ -30,51 +30,6 @@ import org.apache.accumulo.server.master.state.MergeInfo.Operation;
 import org.apache.accumulo.server.master.state.MergeState;
 import org.apache.hadoop.io.Text;
 
-/**
- * Merge makes things hard.
- *
- * Typically, a client will read the list of tablets, and begin an operation on that tablet at the location listed in the metadata table. When a tablet splits,
- * the information read from the metadata table doesn't match reality, so the operation fails, and must be retried. But the operation will take place either on
- * the parent, or at a later time on the children. It won't take place on just half of the tablet.
- *
- * However, when a merge occurs, the operation may have succeeded on one section of the merged area, and not on the others, when the merge occurs. There is no
- * way to retry the request at a later time on an unmodified tablet.
- *
- * The code below uses read-write lock to prevent some operations while a merge is taking place. Normal operations, like bulk imports, will grab the read lock
- * and prevent merges (writes) while they run. Merge operations will lock out some operations while they run.
- */
-class TableRangeOpWait extends MasterRepo {
-
-  private static final long serialVersionUID = 1L;
-  private String tableId;
-
-  public TableRangeOpWait(String tableId) {
-    this.tableId = tableId;
-  }
-
-  @Override
-  public long isReady(long tid, Master env) throws Exception {
-    Text tableIdText = new Text(tableId);
-    if (!env.getMergeInfo(tableIdText).getState().equals(MergeState.NONE)) {
-      return 50;
-    }
-    return 0;
-  }
-
-  @Override
-  public Repo<Master> call(long tid, Master master) throws Exception {
-    String namespaceId = Tables.getNamespaceId(master.getInstance(), tableId);
-    Text tableIdText = new Text(tableId);
-    MergeInfo mergeInfo = master.getMergeInfo(tableIdText);
-    log.info("removing merge information " + mergeInfo);
-    master.clearMergeState(tableIdText);
-    Utils.unreserveNamespace(namespaceId, tid, false);
-    Utils.unreserveTable(tableId, tid, true);
-    return null;
-  }
-
-}
-
 public class TableRangeOp extends MasterRepo {
 
   private static final long serialVersionUID = 1L;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/server/master/src/main/java/org/apache/accumulo/master/tableOps/TableRangeOpWait.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/TableRangeOpWait.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/TableRangeOpWait.java
new file mode 100644
index 0000000..bdab469
--- /dev/null
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/TableRangeOpWait.java
@@ -0,0 +1,69 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.master.tableOps;
+
+import org.apache.accumulo.core.client.impl.Tables;
+import org.apache.accumulo.fate.Repo;
+import org.apache.accumulo.master.Master;
+import org.apache.accumulo.server.master.state.MergeInfo;
+import org.apache.accumulo.server.master.state.MergeState;
+import org.apache.hadoop.io.Text;
+
+/**
+ * Merge makes things hard.
+ *
+ * Typically, a client will read the list of tablets, and begin an operation on that tablet at the location listed in the metadata table. When a tablet splits,
+ * the information read from the metadata table doesn't match reality, so the operation fails, and must be retried. But the operation will take place either on
+ * the parent, or at a later time on the children. It won't take place on just half of the tablet.
+ *
+ * However, when a merge occurs, the operation may have succeeded on one section of the merged area, and not on the others, when the merge occurs. There is no
+ * way to retry the request at a later time on an unmodified tablet.
+ *
+ * The code below uses read-write lock to prevent some operations while a merge is taking place. Normal operations, like bulk imports, will grab the read lock
+ * and prevent merges (writes) while they run. Merge operations will lock out some operations while they run.
+ */
+class TableRangeOpWait extends MasterRepo {
+
+  private static final long serialVersionUID = 1L;
+  private String tableId;
+
+  public TableRangeOpWait(String tableId) {
+    this.tableId = tableId;
+  }
+
+  @Override
+  public long isReady(long tid, Master env) throws Exception {
+    Text tableIdText = new Text(tableId);
+    if (!env.getMergeInfo(tableIdText).getState().equals(MergeState.NONE)) {
+      return 50;
+    }
+    return 0;
+  }
+
+  @Override
+  public Repo<Master> call(long tid, Master master) throws Exception {
+    String namespaceId = Tables.getNamespaceId(master.getInstance(), tableId);
+    Text tableIdText = new Text(tableId);
+    MergeInfo mergeInfo = master.getMergeInfo(tableIdText);
+    log.info("removing merge information " + mergeInfo);
+    master.clearMergeState(tableIdText);
+    Utils.unreserveNamespace(namespaceId, tid, false);
+    Utils.unreserveTable(tableId, tid, true);
+    return null;
+  }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/server/master/src/main/java/org/apache/accumulo/master/tableOps/WriteExportFiles.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/WriteExportFiles.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/WriteExportFiles.java
new file mode 100644
index 0000000..ca31d48
--- /dev/null
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/WriteExportFiles.java
@@ -0,0 +1,268 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.master.tableOps;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+
+import java.io.BufferedOutputStream;
+import java.io.BufferedWriter;
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.io.OutputStreamWriter;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.zip.ZipEntry;
+import java.util.zip.ZipOutputStream;
+
+import org.apache.accumulo.core.Constants;
+import org.apache.accumulo.core.client.AccumuloException;
+import org.apache.accumulo.core.client.AccumuloSecurityException;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.client.TableNotFoundException;
+import org.apache.accumulo.core.client.impl.Tables;
+import org.apache.accumulo.core.client.impl.thrift.TableOperation;
+import org.apache.accumulo.core.client.impl.thrift.TableOperationExceptionType;
+import org.apache.accumulo.core.client.impl.thrift.ThriftTableOperationException;
+import org.apache.accumulo.core.conf.AccumuloConfiguration;
+import org.apache.accumulo.core.conf.DefaultConfiguration;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.data.impl.KeyExtent;
+import org.apache.accumulo.core.master.state.tables.TableState;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.LogColumnFamily;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.fate.Repo;
+import org.apache.accumulo.master.Master;
+import org.apache.accumulo.server.AccumuloServerContext;
+import org.apache.accumulo.server.ServerConstants;
+import org.apache.accumulo.server.conf.TableConfiguration;
+import org.apache.accumulo.server.fs.VolumeManager;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.Text;
+
+class WriteExportFiles extends MasterRepo {
+
+  private static final long serialVersionUID = 1L;
+  private final ExportInfo tableInfo;
+
+  WriteExportFiles(ExportInfo tableInfo) {
+    this.tableInfo = tableInfo;
+  }
+
+  private void checkOffline(Connector conn) throws Exception {
+    if (Tables.getTableState(conn.getInstance(), tableInfo.tableID) != TableState.OFFLINE) {
+      Tables.clearCache(conn.getInstance());
+      if (Tables.getTableState(conn.getInstance(), tableInfo.tableID) != TableState.OFFLINE) {
+        throw new ThriftTableOperationException(tableInfo.tableID, tableInfo.tableName, TableOperation.EXPORT, TableOperationExceptionType.OTHER,
+            "Table is not offline");
+      }
+    }
+  }
+
+  @Override
+  public long isReady(long tid, Master master) throws Exception {
+
+    long reserved = Utils.reserveNamespace(tableInfo.namespaceID, tid, false, true, TableOperation.EXPORT)
+        + Utils.reserveTable(tableInfo.tableID, tid, false, true, TableOperation.EXPORT);
+    if (reserved > 0)
+      return reserved;
+
+    Connector conn = master.getConnector();
+
+    checkOffline(conn);
+
+    Scanner metaScanner = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
+    metaScanner.setRange(new KeyExtent(new Text(tableInfo.tableID), null, null).toMetadataRange());
+
+    // scan for locations
+    metaScanner.fetchColumnFamily(TabletsSection.CurrentLocationColumnFamily.NAME);
+    metaScanner.fetchColumnFamily(TabletsSection.FutureLocationColumnFamily.NAME);
+
+    if (metaScanner.iterator().hasNext()) {
+      return 500;
+    }
+
+    // use the same range to check for walogs that we used to check for hosted (or future hosted) tablets
+    // this is done as a separate scan after we check for locations, because walogs are okay only if there is no location
+    metaScanner.clearColumns();
+    metaScanner.fetchColumnFamily(LogColumnFamily.NAME);
+
+    if (metaScanner.iterator().hasNext()) {
+      throw new ThriftTableOperationException(tableInfo.tableID, tableInfo.tableName, TableOperation.EXPORT, TableOperationExceptionType.OTHER,
+          "Write ahead logs found for table");
+    }
+
+    return 0;
+  }
+
+  @Override
+  public Repo<Master> call(long tid, Master master) throws Exception {
+    try {
+      exportTable(master.getFileSystem(), master, tableInfo.tableName, tableInfo.tableID, tableInfo.exportDir);
+    } catch (IOException ioe) {
+      throw new ThriftTableOperationException(tableInfo.tableID, tableInfo.tableName, TableOperation.EXPORT, TableOperationExceptionType.OTHER,
+          "Failed to create export files " + ioe.getMessage());
+    }
+    Utils.unreserveNamespace(tableInfo.namespaceID, tid, false);
+    Utils.unreserveTable(tableInfo.tableID, tid, false);
+    Utils.unreserveHdfsDirectory(new Path(tableInfo.exportDir).toString(), tid);
+    return null;
+  }
+
+  @Override
+  public void undo(long tid, Master env) throws Exception {
+    Utils.unreserveNamespace(tableInfo.namespaceID, tid, false);
+    Utils.unreserveTable(tableInfo.tableID, tid, false);
+  }
+
+  public static void exportTable(VolumeManager fs, AccumuloServerContext context, String tableName, String tableID, String exportDir) throws Exception {
+
+    fs.mkdirs(new Path(exportDir));
+    Path exportMetaFilePath = fs.getVolumeByPath(new Path(exportDir)).getFileSystem().makeQualified(new Path(exportDir, Constants.EXPORT_FILE));
+
+    FSDataOutputStream fileOut = fs.create(exportMetaFilePath, false);
+    ZipOutputStream zipOut = new ZipOutputStream(fileOut);
+    BufferedOutputStream bufOut = new BufferedOutputStream(zipOut);
+    DataOutputStream dataOut = new DataOutputStream(bufOut);
+
+    try {
+
+      zipOut.putNextEntry(new ZipEntry(Constants.EXPORT_INFO_FILE));
+      OutputStreamWriter osw = new OutputStreamWriter(dataOut, UTF_8);
+      osw.append(ExportTable.EXPORT_VERSION_PROP + ":" + ExportTable.VERSION + "\n");
+      osw.append("srcInstanceName:" + context.getInstance().getInstanceName() + "\n");
+      osw.append("srcInstanceID:" + context.getInstance().getInstanceID() + "\n");
+      osw.append("srcZookeepers:" + context.getInstance().getZooKeepers() + "\n");
+      osw.append("srcTableName:" + tableName + "\n");
+      osw.append("srcTableID:" + tableID + "\n");
+      osw.append(ExportTable.DATA_VERSION_PROP + ":" + ServerConstants.DATA_VERSION + "\n");
+      osw.append("srcCodeVersion:" + Constants.VERSION + "\n");
+
+      osw.flush();
+      dataOut.flush();
+
+      exportConfig(context, tableID, zipOut, dataOut);
+      dataOut.flush();
+
+      Map<String,String> uniqueFiles = exportMetadata(fs, context, tableID, zipOut, dataOut);
+
+      dataOut.close();
+      dataOut = null;
+
+      createDistcpFile(fs, exportDir, exportMetaFilePath, uniqueFiles);
+
+    } finally {
+      if (dataOut != null)
+        dataOut.close();
+    }
+  }
+
+  private static void createDistcpFile(VolumeManager fs, String exportDir, Path exportMetaFilePath, Map<String,String> uniqueFiles) throws IOException {
+    BufferedWriter distcpOut = new BufferedWriter(new OutputStreamWriter(fs.create(new Path(exportDir, "distcp.txt"), false), UTF_8));
+
+    try {
+      for (String file : uniqueFiles.values()) {
+        distcpOut.append(file);
+        distcpOut.newLine();
+      }
+
+      distcpOut.append(exportMetaFilePath.toString());
+      distcpOut.newLine();
+
+      distcpOut.close();
+      distcpOut = null;
+
+    } finally {
+      if (distcpOut != null)
+        distcpOut.close();
+    }
+  }
+
+  private static Map<String,String> exportMetadata(VolumeManager fs, AccumuloServerContext context, String tableID, ZipOutputStream zipOut,
+      DataOutputStream dataOut) throws IOException, TableNotFoundException, AccumuloException, AccumuloSecurityException {
+    zipOut.putNextEntry(new ZipEntry(Constants.EXPORT_METADATA_FILE));
+
+    Map<String,String> uniqueFiles = new HashMap<String,String>();
+
+    Scanner metaScanner = context.getConnector().createScanner(MetadataTable.NAME, Authorizations.EMPTY);
+    metaScanner.fetchColumnFamily(DataFileColumnFamily.NAME);
+    TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.fetch(metaScanner);
+    TabletsSection.ServerColumnFamily.TIME_COLUMN.fetch(metaScanner);
+    metaScanner.setRange(new KeyExtent(new Text(tableID), null, null).toMetadataRange());
+
+    for (Entry<Key,Value> entry : metaScanner) {
+      entry.getKey().write(dataOut);
+      entry.getValue().write(dataOut);
+
+      if (entry.getKey().getColumnFamily().equals(DataFileColumnFamily.NAME)) {
+        String path = fs.getFullPath(entry.getKey()).toString();
+        String tokens[] = path.split("/");
+        if (tokens.length < 1) {
+          throw new RuntimeException("Illegal path " + path);
+        }
+
+        String filename = tokens[tokens.length - 1];
+
+        String existingPath = uniqueFiles.get(filename);
+        if (existingPath == null) {
+          uniqueFiles.put(filename, path);
+        } else if (!existingPath.equals(path)) {
+          // make sure file names are unique, should only apply for tables with file names generated by Accumulo 1.3 and earlier
+          throw new IOException("Cannot export table with nonunique file names " + filename + ". Major compact table.");
+        }
+
+      }
+    }
+    return uniqueFiles;
+  }
+
+  private static void exportConfig(AccumuloServerContext context, String tableID, ZipOutputStream zipOut, DataOutputStream dataOut) throws AccumuloException,
+      AccumuloSecurityException, TableNotFoundException, IOException {
+    Connector conn = context.getConnector();
+
+    DefaultConfiguration defaultConfig = AccumuloConfiguration.getDefaultConfiguration();
+    Map<String,String> siteConfig = conn.instanceOperations().getSiteConfiguration();
+    Map<String,String> systemConfig = conn.instanceOperations().getSystemConfiguration();
+
+    TableConfiguration tableConfig = context.getServerConfigurationFactory().getTableConfiguration(tableID);
+
+    OutputStreamWriter osw = new OutputStreamWriter(dataOut, UTF_8);
+
+    // only put props that are different than defaults and higher level configurations
+    zipOut.putNextEntry(new ZipEntry(Constants.EXPORT_TABLE_CONFIG_FILE));
+    for (Entry<String,String> prop : tableConfig) {
+      if (prop.getKey().startsWith(Property.TABLE_PREFIX.getKey())) {
+        Property key = Property.getPropertyByKey(prop.getKey());
+
+        if (key == null || !defaultConfig.get(key).equals(prop.getValue())) {
+          if (!prop.getValue().equals(siteConfig.get(prop.getKey())) && !prop.getValue().equals(systemConfig.get(prop.getKey()))) {
+            osw.append(prop.getKey() + "=" + prop.getValue() + "\n");
+          }
+        }
+      }
+    }
+
+    osw.flush();
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/server/tserver/src/main/java/org/apache/accumulo/tserver/InMemoryMap.java
----------------------------------------------------------------------
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/InMemoryMap.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/InMemoryMap.java
index 80feb47..2d3a0a1 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/InMemoryMap.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/InMemoryMap.java
@@ -17,11 +17,8 @@
 package org.apache.accumulo.tserver;
 
 import java.io.IOException;
-import java.io.Serializable;
 import java.util.ArrayList;
-import java.util.Collection;
 import java.util.Collections;
-import java.util.Comparator;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Iterator;
@@ -50,7 +47,6 @@ import org.apache.accumulo.core.file.FileSKVWriter;
 import org.apache.accumulo.core.file.rfile.RFile;
 import org.apache.accumulo.core.file.rfile.RFileOperations;
 import org.apache.accumulo.core.iterators.IteratorEnvironment;
-import org.apache.accumulo.core.iterators.SkippingIterator;
 import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
 import org.apache.accumulo.core.iterators.SortedMapIterator;
 import org.apache.accumulo.core.iterators.WrappingIterator;
@@ -72,121 +68,6 @@ import org.apache.hadoop.fs.Path;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-class MemKeyComparator implements Comparator<Key>, Serializable {
-
-  private static final long serialVersionUID = 1L;
-
-  @Override
-  public int compare(Key k1, Key k2) {
-    int cmp = k1.compareTo(k2);
-
-    if (cmp == 0) {
-      if (k1 instanceof MemKey)
-        if (k2 instanceof MemKey)
-          cmp = ((MemKey) k2).kvCount - ((MemKey) k1).kvCount;
-        else
-          cmp = 1;
-      else if (k2 instanceof MemKey)
-        cmp = -1;
-    }
-
-    return cmp;
-  }
-}
-
-class PartialMutationSkippingIterator extends SkippingIterator implements InterruptibleIterator {
-
-  private int kvCount;
-
-  public PartialMutationSkippingIterator(SortedKeyValueIterator<Key,Value> source, int maxKVCount) {
-    setSource(source);
-    this.kvCount = maxKVCount;
-  }
-
-  @Override
-  protected void consume() throws IOException {
-    while (getSource().hasTop() && ((MemKey) getSource().getTopKey()).kvCount > kvCount)
-      getSource().next();
-  }
-
-  @Override
-  public SortedKeyValueIterator<Key,Value> deepCopy(IteratorEnvironment env) {
-    return new PartialMutationSkippingIterator(getSource().deepCopy(env), kvCount);
-  }
-
-  @Override
-  public void setInterruptFlag(AtomicBoolean flag) {
-    ((InterruptibleIterator) getSource()).setInterruptFlag(flag);
-  }
-
-}
-
-class MemKeyConversionIterator extends WrappingIterator implements InterruptibleIterator {
-  private MemKey currKey = null;
-  private Value currVal = null;
-
-  public MemKeyConversionIterator(SortedKeyValueIterator<Key,Value> source) {
-    super();
-    setSource(source);
-  }
-
-  @Override
-  public SortedKeyValueIterator<Key,Value> deepCopy(IteratorEnvironment env) {
-    return new MemKeyConversionIterator(getSource().deepCopy(env));
-  }
-
-  @Override
-  public Key getTopKey() {
-    return currKey;
-  }
-
-  @Override
-  public Value getTopValue() {
-    return currVal;
-  }
-
-  private void getTopKeyVal() {
-    Key k = super.getTopKey();
-    Value v = super.getTopValue();
-    if (k instanceof MemKey || k == null) {
-      currKey = (MemKey) k;
-      currVal = v;
-      return;
-    }
-    currVal = new Value(v);
-    int mc = MemValue.splitKVCount(currVal);
-    currKey = new MemKey(k, mc);
-
-  }
-
-  @Override
-  public void next() throws IOException {
-    super.next();
-    if (hasTop())
-      getTopKeyVal();
-  }
-
-  @Override
-  public void seek(Range range, Collection<ByteSequence> columnFamilies, boolean inclusive) throws IOException {
-    super.seek(range, columnFamilies, inclusive);
-
-    if (hasTop())
-      getTopKeyVal();
-
-    Key k = range.getStartKey();
-    if (k instanceof MemKey && hasTop()) {
-      while (hasTop() && currKey.compareTo(k) < 0)
-        next();
-    }
-  }
-
-  @Override
-  public void setInterruptFlag(AtomicBoolean flag) {
-    ((InterruptibleIterator) getSource()).setInterruptFlag(flag);
-  }
-
-}
-
 public class InMemoryMap {
   private SimpleMap map = null;
 

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/server/tserver/src/main/java/org/apache/accumulo/tserver/MemKeyComparator.java
----------------------------------------------------------------------
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/MemKeyComparator.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/MemKeyComparator.java
new file mode 100644
index 0000000..6c8b0f3
--- /dev/null
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/MemKeyComparator.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.tserver;
+
+import java.io.Serializable;
+import java.util.Comparator;
+
+import org.apache.accumulo.core.data.Key;
+
+class MemKeyComparator implements Comparator<Key>, Serializable {
+
+  private static final long serialVersionUID = 1L;
+
+  @Override
+  public int compare(Key k1, Key k2) {
+    int cmp = k1.compareTo(k2);
+
+    if (cmp == 0) {
+      if (k1 instanceof MemKey)
+        if (k2 instanceof MemKey)
+          cmp = ((MemKey) k2).kvCount - ((MemKey) k1).kvCount;
+        else
+          cmp = 1;
+      else if (k2 instanceof MemKey)
+        cmp = -1;
+    }
+
+    return cmp;
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/server/tserver/src/main/java/org/apache/accumulo/tserver/MemKeyConversionIterator.java
----------------------------------------------------------------------
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/MemKeyConversionIterator.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/MemKeyConversionIterator.java
new file mode 100644
index 0000000..891a0ba
--- /dev/null
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/MemKeyConversionIterator.java
@@ -0,0 +1,96 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.tserver;
+
+import java.io.IOException;
+import java.util.Collection;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import org.apache.accumulo.core.data.ByteSequence;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Range;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.iterators.IteratorEnvironment;
+import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
+import org.apache.accumulo.core.iterators.WrappingIterator;
+import org.apache.accumulo.core.iterators.system.InterruptibleIterator;
+
+class MemKeyConversionIterator extends WrappingIterator implements InterruptibleIterator {
+  private MemKey currKey = null;
+  private Value currVal = null;
+
+  public MemKeyConversionIterator(SortedKeyValueIterator<Key,Value> source) {
+    super();
+    setSource(source);
+  }
+
+  @Override
+  public SortedKeyValueIterator<Key,Value> deepCopy(IteratorEnvironment env) {
+    return new MemKeyConversionIterator(getSource().deepCopy(env));
+  }
+
+  @Override
+  public Key getTopKey() {
+    return currKey;
+  }
+
+  @Override
+  public Value getTopValue() {
+    return currVal;
+  }
+
+  private void getTopKeyVal() {
+    Key k = super.getTopKey();
+    Value v = super.getTopValue();
+    if (k instanceof MemKey || k == null) {
+      currKey = (MemKey) k;
+      currVal = v;
+      return;
+    }
+    currVal = new Value(v);
+    int mc = MemValue.splitKVCount(currVal);
+    currKey = new MemKey(k, mc);
+
+  }
+
+  @Override
+  public void next() throws IOException {
+    super.next();
+    if (hasTop())
+      getTopKeyVal();
+  }
+
+  @Override
+  public void seek(Range range, Collection<ByteSequence> columnFamilies, boolean inclusive) throws IOException {
+    super.seek(range, columnFamilies, inclusive);
+
+    if (hasTop())
+      getTopKeyVal();
+
+    Key k = range.getStartKey();
+    if (k instanceof MemKey && hasTop()) {
+      while (hasTop() && currKey.compareTo(k) < 0)
+        next();
+    }
+  }
+
+  @Override
+  public void setInterruptFlag(AtomicBoolean flag) {
+    ((InterruptibleIterator) getSource()).setInterruptFlag(flag);
+  }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/server/tserver/src/main/java/org/apache/accumulo/tserver/PartialMutationSkippingIterator.java
----------------------------------------------------------------------
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/PartialMutationSkippingIterator.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/PartialMutationSkippingIterator.java
new file mode 100644
index 0000000..8e2f113
--- /dev/null
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/PartialMutationSkippingIterator.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.tserver;
+
+import java.io.IOException;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.iterators.IteratorEnvironment;
+import org.apache.accumulo.core.iterators.SkippingIterator;
+import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
+import org.apache.accumulo.core.iterators.system.InterruptibleIterator;
+
+class PartialMutationSkippingIterator extends SkippingIterator implements InterruptibleIterator {
+
+  private int kvCount;
+
+  public PartialMutationSkippingIterator(SortedKeyValueIterator<Key,Value> source, int maxKVCount) {
+    setSource(source);
+    this.kvCount = maxKVCount;
+  }
+
+  @Override
+  protected void consume() throws IOException {
+    while (getSource().hasTop() && ((MemKey) getSource().getTopKey()).kvCount > kvCount)
+      getSource().next();
+  }
+
+  @Override
+  public SortedKeyValueIterator<Key,Value> deepCopy(IteratorEnvironment env) {
+    return new PartialMutationSkippingIterator(getSource().deepCopy(env), kvCount);
+  }
+
+  @Override
+  public void setInterruptFlag(AtomicBoolean flag) {
+    ((InterruptibleIterator) getSource()).setInterruptFlag(flag);
+  }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/test/src/main/java/org/apache/accumulo/test/EstimateInMemMapOverhead.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/EstimateInMemMapOverhead.java b/test/src/main/java/org/apache/accumulo/test/EstimateInMemMapOverhead.java
index 668b9cc..fb3c8a0 100644
--- a/test/src/main/java/org/apache/accumulo/test/EstimateInMemMapOverhead.java
+++ b/test/src/main/java/org/apache/accumulo/test/EstimateInMemMapOverhead.java
@@ -16,323 +16,6 @@
  */
 package org.apache.accumulo.test;
 
-import java.util.Collections;
-import java.util.TreeMap;
-
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.security.ColumnVisibility;
-import org.apache.accumulo.tserver.InMemoryMap;
-import org.apache.hadoop.io.Text;
-
-abstract class MemoryUsageTest {
-  abstract void addEntry(int i);
-
-  abstract int getEstimatedBytesPerEntry();
-
-  abstract void clear();
-
-  abstract int getNumPasses();
-
-  abstract String getName();
-
-  abstract void init();
-
-  public void run() {
-    System.gc();
-    long usedMem = Runtime.getRuntime().totalMemory() - Runtime.getRuntime().freeMemory();
-    int count = 0;
-    while (usedMem > 1024 * 1024 && count < 10) {
-      System.gc();
-      usedMem = Runtime.getRuntime().totalMemory() - Runtime.getRuntime().freeMemory();
-      count++;
-    }
-
-    init();
-
-    for (int i = 0; i < getNumPasses(); i++) {
-      addEntry(i);
-    }
-
-    System.gc();
-
-    long memSize = (Runtime.getRuntime().totalMemory() - Runtime.getRuntime().freeMemory()) - usedMem;
-
-    double actualBytesPerEntry = memSize / (double) getNumPasses();
-    double expectedBytesPerEntry = getEstimatedBytesPerEntry();
-    double diff = actualBytesPerEntry - expectedBytesPerEntry;
-    double ratio = actualBytesPerEntry / expectedBytesPerEntry * 100;
-
-    System.out.printf("%30s | %,10d | %6.2fGB | %6.2f | %6.2f | %6.2f | %6.2f%s%n", getName(), getNumPasses(), memSize / (1024 * 1024 * 1024.0),
-        actualBytesPerEntry, expectedBytesPerEntry, diff, ratio, "%");
-
-    clear();
-
-  }
-
-}
-
-class TextMemoryUsageTest extends MemoryUsageTest {
-
-  private int keyLen;
-  private int colFamLen;
-  private int colQualLen;
-  private int dataLen;
-  private TreeMap<Text,Value> map;
-  private int passes;
-
-  TextMemoryUsageTest(int passes, int keyLen, int colFamLen, int colQualLen, int dataLen) {
-    this.keyLen = keyLen;
-    this.colFamLen = colFamLen;
-    this.colQualLen = colQualLen;
-    this.dataLen = dataLen;
-    this.passes = passes;
-
-  }
-
-  @Override
-  void init() {
-    map = new TreeMap<Text,Value>();
-  }
-
-  @Override
-  public void addEntry(int i) {
-    Text key = new Text(String.format("%0" + keyLen + "d:%0" + colFamLen + "d:%0" + colQualLen + "d", i, 0, 0).getBytes());
-    //
-    byte data[] = new byte[dataLen];
-    for (int j = 0; j < data.length; j++) {
-      data[j] = (byte) (j % 10 + 65);
-    }
-    Value value = new Value(data);
-
-    map.put(key, value);
-
-  }
-
-  @Override
-  public void clear() {
-    map.clear();
-    map = null;
-  }
-
-  @Override
-  public int getEstimatedBytesPerEntry() {
-    return keyLen + colFamLen + colQualLen + dataLen;
-  }
-
-  @Override
-  int getNumPasses() {
-    return passes;
-  }
-
-  @Override
-  String getName() {
-    return "Text " + keyLen + " " + colFamLen + " " + colQualLen + " " + dataLen;
-  }
-
-}
-
-class InMemoryMapMemoryUsageTest extends MemoryUsageTest {
-
-  private int keyLen;
-  private int colFamLen;
-  private int colQualLen;
-  private int colVisLen;
-  private int dataLen;
-
-  private InMemoryMap imm;
-  private Text key;
-  private Text colf;
-  private Text colq;
-  private ColumnVisibility colv;
-  private int passes;
-
-  InMemoryMapMemoryUsageTest(int passes, int keyLen, int colFamLen, int colQualLen, int colVisLen, int dataLen) {
-    this.keyLen = keyLen;
-    this.colFamLen = colFamLen;
-    this.colQualLen = colQualLen;
-    this.dataLen = dataLen;
-    this.passes = passes;
-    this.colVisLen = colVisLen;
-
-  }
-
-  @Override
-  void init() {
-    imm = new InMemoryMap(false, "/tmp");
-    key = new Text();
-
-    colf = new Text(String.format("%0" + colFamLen + "d", 0));
-    colq = new Text(String.format("%0" + colQualLen + "d", 0));
-    colv = new ColumnVisibility(String.format("%0" + colVisLen + "d", 0));
-  }
-
-  @Override
-  public void addEntry(int i) {
-    key.set(String.format("%0" + keyLen + "d", i));
-
-    Mutation m = new Mutation(key);
-
-    byte data[] = new byte[dataLen];
-    for (int j = 0; j < data.length; j++) {
-      data[j] = (byte) (j % 10 + 65);
-    }
-    Value idata = new Value(data);
-
-    m.put(colf, colq, colv, idata);
-
-    imm.mutate(Collections.singletonList(m));
-
-  }
-
-  @Override
-  public int getEstimatedBytesPerEntry() {
-    return keyLen + colFamLen + colQualLen + dataLen + 4 + colVisLen;
-  }
-
-  @Override
-  public void clear() {
-    imm = null;
-    key = null;
-    colf = null;
-    colq = null;
-  }
-
-  @Override
-  int getNumPasses() {
-    return passes;
-  }
-
-  @Override
-  String getName() {
-    return "IMM " + keyLen + " " + colFamLen + " " + colQualLen + " " + dataLen;
-  }
-}
-
-class MutationMemoryUsageTest extends MemoryUsageTest {
-
-  private int keyLen;
-  private int colFamLen;
-  private int colQualLen;
-  private int dataLen;
-
-  private Mutation[] mutations;
-  private Text key;
-  private Text colf;
-  private Text colq;
-  private int passes;
-
-  MutationMemoryUsageTest(int passes, int keyLen, int colFamLen, int colQualLen, int dataLen) {
-    this.keyLen = keyLen;
-    this.colFamLen = colFamLen;
-    this.colQualLen = colQualLen;
-    this.dataLen = dataLen;
-    this.passes = passes;
-    mutations = new Mutation[passes];
-
-  }
-
-  @Override
-  void init() {
-    key = new Text();
-
-    colf = new Text(String.format("%0" + colFamLen + "d", 0));
-    colq = new Text(String.format("%0" + colQualLen + "d", 0));
-
-    byte data[] = new byte[dataLen];
-    for (int i = 0; i < data.length; i++) {
-      data[i] = (byte) (i % 10 + 65);
-    }
-  }
-
-  @Override
-  public void addEntry(int i) {
-    key.set(String.format("%0" + keyLen + "d", i));
-
-    Mutation m = new Mutation(key);
-
-    byte data[] = new byte[dataLen];
-    for (int j = 0; j < data.length; j++) {
-      data[j] = (byte) (j % 10 + 65);
-    }
-    Value idata = new Value(data);
-
-    m.put(colf, colq, idata);
-
-    mutations[i] = m;
-  }
-
-  @Override
-  public int getEstimatedBytesPerEntry() {
-    return keyLen + colFamLen + colQualLen + dataLen;
-  }
-
-  @Override
-  public void clear() {
-    key = null;
-    colf = null;
-    colq = null;
-    mutations = null;
-  }
-
-  @Override
-  int getNumPasses() {
-    return passes;
-  }
-
-  @Override
-  String getName() {
-    return "Mutation " + keyLen + " " + colFamLen + " " + colQualLen + " " + dataLen;
-  }
-}
-
-class IntObjectMemoryUsageTest extends MemoryUsageTest {
-
-  private int passes;
-  private Object data[];
-
-  static class SimpleObject {
-    int d;
-
-    SimpleObject(int d) {
-      this.d = d;
-    }
-  }
-
-  IntObjectMemoryUsageTest(int numPasses) {
-    this.passes = numPasses;
-  }
-
-  @Override
-  void init() {
-    data = new Object[passes];
-  }
-
-  @Override
-  void addEntry(int i) {
-    data[i] = new SimpleObject(i);
-
-  }
-
-  @Override
-  void clear() {}
-
-  @Override
-  int getEstimatedBytesPerEntry() {
-    return 4;
-  }
-
-  @Override
-  String getName() {
-    return "int obj";
-  }
-
-  @Override
-  int getNumPasses() {
-    return passes;
-  }
-
-}
 
 public class EstimateInMemMapOverhead {
 

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/test/src/main/java/org/apache/accumulo/test/InMemoryMapMemoryUsageTest.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/InMemoryMapMemoryUsageTest.java b/test/src/main/java/org/apache/accumulo/test/InMemoryMapMemoryUsageTest.java
new file mode 100644
index 0000000..f325524
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/InMemoryMapMemoryUsageTest.java
@@ -0,0 +1,102 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test;
+
+import java.util.Collections;
+
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.security.ColumnVisibility;
+import org.apache.accumulo.tserver.InMemoryMap;
+import org.apache.hadoop.io.Text;
+
+class InMemoryMapMemoryUsageTest extends MemoryUsageTest {
+
+  private int keyLen;
+  private int colFamLen;
+  private int colQualLen;
+  private int colVisLen;
+  private int dataLen;
+
+  private InMemoryMap imm;
+  private Text key;
+  private Text colf;
+  private Text colq;
+  private ColumnVisibility colv;
+  private int passes;
+
+  InMemoryMapMemoryUsageTest(int passes, int keyLen, int colFamLen, int colQualLen, int colVisLen, int dataLen) {
+    this.keyLen = keyLen;
+    this.colFamLen = colFamLen;
+    this.colQualLen = colQualLen;
+    this.dataLen = dataLen;
+    this.passes = passes;
+    this.colVisLen = colVisLen;
+
+  }
+
+  @Override
+  void init() {
+    imm = new InMemoryMap(false, "/tmp");
+    key = new Text();
+
+    colf = new Text(String.format("%0" + colFamLen + "d", 0));
+    colq = new Text(String.format("%0" + colQualLen + "d", 0));
+    colv = new ColumnVisibility(String.format("%0" + colVisLen + "d", 0));
+  }
+
+  @Override
+  public void addEntry(int i) {
+    key.set(String.format("%0" + keyLen + "d", i));
+
+    Mutation m = new Mutation(key);
+
+    byte data[] = new byte[dataLen];
+    for (int j = 0; j < data.length; j++) {
+      data[j] = (byte) (j % 10 + 65);
+    }
+    Value idata = new Value(data);
+
+    m.put(colf, colq, colv, idata);
+
+    imm.mutate(Collections.singletonList(m));
+
+  }
+
+  @Override
+  public int getEstimatedBytesPerEntry() {
+    return keyLen + colFamLen + colQualLen + dataLen + 4 + colVisLen;
+  }
+
+  @Override
+  public void clear() {
+    imm = null;
+    key = null;
+    colf = null;
+    colq = null;
+  }
+
+  @Override
+  int getNumPasses() {
+    return passes;
+  }
+
+  @Override
+  String getName() {
+    return "IMM " + keyLen + " " + colFamLen + " " + colQualLen + " " + dataLen;
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/test/src/main/java/org/apache/accumulo/test/IntObjectMemoryUsageTest.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/IntObjectMemoryUsageTest.java b/test/src/main/java/org/apache/accumulo/test/IntObjectMemoryUsageTest.java
new file mode 100644
index 0000000..d83421a
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/IntObjectMemoryUsageTest.java
@@ -0,0 +1,65 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test;
+
+class IntObjectMemoryUsageTest extends MemoryUsageTest {
+
+  private int passes;
+  private Object data[];
+
+  static class SimpleObject {
+    int d;
+
+    SimpleObject(int d) {
+      this.d = d;
+    }
+  }
+
+  IntObjectMemoryUsageTest(int numPasses) {
+    this.passes = numPasses;
+  }
+
+  @Override
+  void init() {
+    data = new Object[passes];
+  }
+
+  @Override
+  void addEntry(int i) {
+    data[i] = new SimpleObject(i);
+
+  }
+
+  @Override
+  void clear() {}
+
+  @Override
+  int getEstimatedBytesPerEntry() {
+    return 4;
+  }
+
+  @Override
+  String getName() {
+    return "int obj";
+  }
+
+  @Override
+  int getNumPasses() {
+    return passes;
+  }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/test/src/main/java/org/apache/accumulo/test/MemoryUsageTest.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/MemoryUsageTest.java b/test/src/main/java/org/apache/accumulo/test/MemoryUsageTest.java
new file mode 100644
index 0000000..39e8d68
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/MemoryUsageTest.java
@@ -0,0 +1,64 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test;
+
+abstract class MemoryUsageTest {
+  abstract void addEntry(int i);
+
+  abstract int getEstimatedBytesPerEntry();
+
+  abstract void clear();
+
+  abstract int getNumPasses();
+
+  abstract String getName();
+
+  abstract void init();
+
+  public void run() {
+    System.gc();
+    long usedMem = Runtime.getRuntime().totalMemory() - Runtime.getRuntime().freeMemory();
+    int count = 0;
+    while (usedMem > 1024 * 1024 && count < 10) {
+      System.gc();
+      usedMem = Runtime.getRuntime().totalMemory() - Runtime.getRuntime().freeMemory();
+      count++;
+    }
+
+    init();
+
+    for (int i = 0; i < getNumPasses(); i++) {
+      addEntry(i);
+    }
+
+    System.gc();
+
+    long memSize = (Runtime.getRuntime().totalMemory() - Runtime.getRuntime().freeMemory()) - usedMem;
+
+    double actualBytesPerEntry = memSize / (double) getNumPasses();
+    double expectedBytesPerEntry = getEstimatedBytesPerEntry();
+    double diff = actualBytesPerEntry - expectedBytesPerEntry;
+    double ratio = actualBytesPerEntry / expectedBytesPerEntry * 100;
+
+    System.out.printf("%30s | %,10d | %6.2fGB | %6.2f | %6.2f | %6.2f | %6.2f%s%n", getName(), getNumPasses(), memSize / (1024 * 1024 * 1024.0),
+        actualBytesPerEntry, expectedBytesPerEntry, diff, ratio, "%");
+
+    clear();
+
+  }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/test/src/main/java/org/apache/accumulo/test/MutationMemoryUsageTest.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/MutationMemoryUsageTest.java b/test/src/main/java/org/apache/accumulo/test/MutationMemoryUsageTest.java
new file mode 100644
index 0000000..011fbfe
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/MutationMemoryUsageTest.java
@@ -0,0 +1,98 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test;
+
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Value;
+import org.apache.hadoop.io.Text;
+
+class MutationMemoryUsageTest extends MemoryUsageTest {
+
+  private int keyLen;
+  private int colFamLen;
+  private int colQualLen;
+  private int dataLen;
+
+  private Mutation[] mutations;
+  private Text key;
+  private Text colf;
+  private Text colq;
+  private int passes;
+
+  MutationMemoryUsageTest(int passes, int keyLen, int colFamLen, int colQualLen, int dataLen) {
+    this.keyLen = keyLen;
+    this.colFamLen = colFamLen;
+    this.colQualLen = colQualLen;
+    this.dataLen = dataLen;
+    this.passes = passes;
+    mutations = new Mutation[passes];
+
+  }
+
+  @Override
+  void init() {
+    key = new Text();
+
+    colf = new Text(String.format("%0" + colFamLen + "d", 0));
+    colq = new Text(String.format("%0" + colQualLen + "d", 0));
+
+    byte data[] = new byte[dataLen];
+    for (int i = 0; i < data.length; i++) {
+      data[i] = (byte) (i % 10 + 65);
+    }
+  }
+
+  @Override
+  public void addEntry(int i) {
+    key.set(String.format("%0" + keyLen + "d", i));
+
+    Mutation m = new Mutation(key);
+
+    byte data[] = new byte[dataLen];
+    for (int j = 0; j < data.length; j++) {
+      data[j] = (byte) (j % 10 + 65);
+    }
+    Value idata = new Value(data);
+
+    m.put(colf, colq, idata);
+
+    mutations[i] = m;
+  }
+
+  @Override
+  public int getEstimatedBytesPerEntry() {
+    return keyLen + colFamLen + colQualLen + dataLen;
+  }
+
+  @Override
+  public void clear() {
+    key = null;
+    colf = null;
+    colq = null;
+    mutations = null;
+  }
+
+  @Override
+  int getNumPasses() {
+    return passes;
+  }
+
+  @Override
+  String getName() {
+    return "Mutation " + keyLen + " " + colFamLen + " " + colQualLen + " " + dataLen;
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/test/src/main/java/org/apache/accumulo/test/TextMemoryUsageTest.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/TextMemoryUsageTest.java b/test/src/main/java/org/apache/accumulo/test/TextMemoryUsageTest.java
new file mode 100644
index 0000000..14b8184
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/TextMemoryUsageTest.java
@@ -0,0 +1,82 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test;
+
+import java.util.TreeMap;
+
+import org.apache.accumulo.core.data.Value;
+import org.apache.hadoop.io.Text;
+
+class TextMemoryUsageTest extends MemoryUsageTest {
+
+  private int keyLen;
+  private int colFamLen;
+  private int colQualLen;
+  private int dataLen;
+  private TreeMap<Text,Value> map;
+  private int passes;
+
+  TextMemoryUsageTest(int passes, int keyLen, int colFamLen, int colQualLen, int dataLen) {
+    this.keyLen = keyLen;
+    this.colFamLen = colFamLen;
+    this.colQualLen = colQualLen;
+    this.dataLen = dataLen;
+    this.passes = passes;
+
+  }
+
+  @Override
+  void init() {
+    map = new TreeMap<Text,Value>();
+  }
+
+  @Override
+  public void addEntry(int i) {
+    Text key = new Text(String.format("%0" + keyLen + "d:%0" + colFamLen + "d:%0" + colQualLen + "d", i, 0, 0).getBytes());
+    //
+    byte data[] = new byte[dataLen];
+    for (int j = 0; j < data.length; j++) {
+      data[j] = (byte) (j % 10 + 65);
+    }
+    Value value = new Value(data);
+
+    map.put(key, value);
+
+  }
+
+  @Override
+  public void clear() {
+    map.clear();
+    map = null;
+  }
+
+  @Override
+  public int getEstimatedBytesPerEntry() {
+    return keyLen + colFamLen + colQualLen + dataLen;
+  }
+
+  @Override
+  int getNumPasses() {
+    return passes;
+  }
+
+  @Override
+  String getName() {
+    return "Text " + keyLen + " " + colFamLen + " " + colQualLen + " " + dataLen;
+  }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/test/src/main/java/org/apache/accumulo/test/continuous/HistData.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/continuous/HistData.java b/test/src/main/java/org/apache/accumulo/test/continuous/HistData.java
new file mode 100644
index 0000000..f53a6a6
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/continuous/HistData.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.continuous;
+
+import java.io.Serializable;
+import java.util.Objects;
+
+class HistData<T> implements Comparable<HistData<T>>, Serializable {
+  private static final long serialVersionUID = 1L;
+
+  T bin;
+  long count;
+
+  HistData(T bin) {
+    this.bin = bin;
+    count = 0;
+  }
+
+  @Override
+  public int hashCode() {
+    return Objects.hashCode(bin) + Objects.hashCode(count);
+  }
+
+  @SuppressWarnings("unchecked")
+  @Override
+  public boolean equals(Object obj) {
+    return obj == this || (obj != null && obj instanceof HistData && 0 == compareTo((HistData<T>) obj));
+  }
+
+  @SuppressWarnings("unchecked")
+  @Override
+  public int compareTo(HistData<T> o) {
+    return ((Comparable<T>) bin).compareTo(o.bin);
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/test/src/main/java/org/apache/accumulo/test/continuous/Histogram.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/continuous/Histogram.java b/test/src/main/java/org/apache/accumulo/test/continuous/Histogram.java
index dd17f3d..8dd3c9d 100644
--- a/test/src/main/java/org/apache/accumulo/test/continuous/Histogram.java
+++ b/test/src/main/java/org/apache/accumulo/test/continuous/Histogram.java
@@ -29,39 +29,9 @@ import java.util.Comparator;
 import java.util.HashMap;
 import java.util.Iterator;
 import java.util.List;
-import java.util.Objects;
 import java.util.Set;
 import java.util.TreeSet;
 
-class HistData<T> implements Comparable<HistData<T>>, Serializable {
-  private static final long serialVersionUID = 1L;
-
-  T bin;
-  long count;
-
-  HistData(T bin) {
-    this.bin = bin;
-    count = 0;
-  }
-
-  @Override
-  public int hashCode() {
-    return Objects.hashCode(bin) + Objects.hashCode(count);
-  }
-
-  @SuppressWarnings("unchecked")
-  @Override
-  public boolean equals(Object obj) {
-    return obj == this || (obj != null && obj instanceof HistData && 0 == compareTo((HistData<T>) obj));
-  }
-
-  @SuppressWarnings("unchecked")
-  @Override
-  public int compareTo(HistData<T> o) {
-    return ((Comparable<T>) bin).compareTo(o.bin);
-  }
-}
-
 public class Histogram<T> implements Serializable {
 
   private static final long serialVersionUID = 1L;


[9/9] accumulo git commit: Merge branch '1.7'

Posted by ct...@apache.org.
Merge branch '1.7'


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/a7de08b7
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/a7de08b7
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/a7de08b7

Branch: refs/heads/master
Commit: a7de08b7cadfbb328876993c281cad26f444f29e
Parents: 26d6691 6e2e678
Author: Christopher Tubbs <ct...@apache.org>
Authored: Tue Apr 28 20:32:42 2015 -0400
Committer: Christopher Tubbs <ct...@apache.org>
Committed: Tue Apr 28 20:32:42 2015 -0400

----------------------------------------------------------------------
 .../core/client/impl/OfflineIterator.java       | 340 ++++++++++++
 .../core/client/impl/OfflineScanner.java        | 314 -----------
 .../core/compaction/CompactionSettings.java     |  42 --
 .../accumulo/core/compaction/PatternType.java   |  28 +
 .../accumulo/core/compaction/SizeType.java      |  30 ++
 .../accumulo/core/compaction/StringType.java    |  24 +
 .../apache/accumulo/core/compaction/Type.java   |  21 +
 .../accumulo/core/compaction/UIntType.java      |  27 +
 .../core/file/DispatchingFileFactory.java       | 136 +++++
 .../accumulo/core/file/FileOperations.java      | 106 ----
 .../accumulo/core/cli/TestClientOpts.java       |   5 +
 .../client/CountingVerifyingReceiver.java       |  64 +++
 .../simple/client/RandomBatchScanner.java       |  38 --
 pom.xml                                         |   1 +
 .../accumulo/master/tableOps/BulkImport.java    | 363 -------------
 .../master/tableOps/CancelCompactions.java      |  23 -
 .../accumulo/master/tableOps/ChooseDir.java     |  53 ++
 .../accumulo/master/tableOps/CleanUp.java       | 287 ++++++++++
 .../master/tableOps/CleanUpBulkImport.java      |  64 +++
 .../accumulo/master/tableOps/CloneInfo.java     |  36 ++
 .../accumulo/master/tableOps/CloneMetadata.java |  54 ++
 .../master/tableOps/ClonePermissions.java       |  73 +++
 .../accumulo/master/tableOps/CloneTable.java    | 195 -------
 .../master/tableOps/CloneZookeeper.java         |  76 +++
 .../accumulo/master/tableOps/CompactRange.java  | 159 ------
 .../master/tableOps/CompactionDriver.java       | 188 +++++++
 .../master/tableOps/CompleteBulkImport.java     |  45 ++
 .../accumulo/master/tableOps/CopyFailed.java    | 158 ++++++
 .../accumulo/master/tableOps/CreateDir.java     |  51 ++
 .../master/tableOps/CreateImportDir.java        |  61 +++
 .../master/tableOps/CreateNamespace.java        | 137 -----
 .../accumulo/master/tableOps/CreateTable.java   | 251 ---------
 .../master/tableOps/DeleteNamespace.java        |  55 --
 .../accumulo/master/tableOps/DeleteTable.java   | 265 ----------
 .../accumulo/master/tableOps/ExportInfo.java    |  29 ++
 .../accumulo/master/tableOps/ExportTable.java   | 257 ---------
 .../master/tableOps/FinishCancelCompaction.java |  40 ++
 .../master/tableOps/FinishCloneTable.java       |  64 +++
 .../master/tableOps/FinishCreateNamespace.java  |  58 +++
 .../master/tableOps/FinishCreateTable.java      |  62 +++
 .../master/tableOps/FinishImportTable.java      |  68 +++
 .../tableOps/ImportPopulateZookeeper.java       | 104 ++++
 .../master/tableOps/ImportSetupPermissions.java |  65 +++
 .../accumulo/master/tableOps/ImportTable.java   | 521 -------------------
 .../master/tableOps/ImportedTableInfo.java      |  31 ++
 .../accumulo/master/tableOps/LoadFiles.java     | 209 ++++++++
 .../master/tableOps/MapImportFileNames.java     | 111 ++++
 .../master/tableOps/MoveExportedFiles.java      |  71 +++
 .../master/tableOps/NamespaceCleanUp.java       |  75 +++
 .../accumulo/master/tableOps/NamespaceInfo.java |  31 ++
 .../master/tableOps/PopulateMetadata.java       |  54 ++
 .../master/tableOps/PopulateMetadataTable.java  | 217 ++++++++
 .../master/tableOps/PopulateZookeeper.java      |  77 +++
 .../PopulateZookeeperWithNamespace.java         |  74 +++
 .../tableOps/SetupNamespacePermissions.java     |  55 ++
 .../master/tableOps/SetupPermissions.java       |  63 +++
 .../accumulo/master/tableOps/TableInfo.java     |  35 ++
 .../accumulo/master/tableOps/TableRangeOp.java  |  45 --
 .../master/tableOps/TableRangeOpWait.java       |  69 +++
 .../master/tableOps/WriteExportFiles.java       | 268 ++++++++++
 .../apache/accumulo/tserver/InMemoryMap.java    | 119 -----
 .../accumulo/tserver/MemKeyComparator.java      |  44 ++
 .../tserver/MemKeyConversionIterator.java       |  96 ++++
 .../PartialMutationSkippingIterator.java        |  54 ++
 .../accumulo/test/EstimateInMemMapOverhead.java | 317 -----------
 .../test/InMemoryMapMemoryUsageTest.java        | 102 ++++
 .../accumulo/test/IntObjectMemoryUsageTest.java |  65 +++
 .../apache/accumulo/test/MemoryUsageTest.java   |  64 +++
 .../accumulo/test/MutationMemoryUsageTest.java  |  98 ++++
 .../accumulo/test/TextMemoryUsageTest.java      |  82 +++
 .../accumulo/test/continuous/HistData.java      |  49 ++
 .../accumulo/test/continuous/Histogram.java     |  30 --
 72 files changed, 4406 insertions(+), 3237 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/a7de08b7/pom.xml
----------------------------------------------------------------------


[3/9] accumulo git commit: ACCUMULO-3759 Fix Java 8 compiler warnings

Posted by ct...@apache.org.
http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/server/master/src/main/java/org/apache/accumulo/master/tableOps/CleanUp.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CleanUp.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CleanUp.java
new file mode 100644
index 0000000..f696198
--- /dev/null
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CleanUp.java
@@ -0,0 +1,287 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.master.tableOps;
+
+import java.io.IOException;
+import java.net.UnknownHostException;
+import java.util.Arrays;
+import java.util.Map.Entry;
+
+import org.apache.accumulo.core.client.BatchScanner;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.IteratorSetting;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.client.impl.Tables;
+import org.apache.accumulo.core.client.impl.thrift.ThriftSecurityException;
+import org.apache.accumulo.core.conf.AccumuloConfiguration;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Range;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.data.impl.KeyExtent;
+import org.apache.accumulo.core.iterators.user.GrepIterator;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.volume.Volume;
+import org.apache.accumulo.fate.Repo;
+import org.apache.accumulo.master.Master;
+import org.apache.accumulo.server.ServerConstants;
+import org.apache.accumulo.server.fs.VolumeManager;
+import org.apache.accumulo.server.master.state.MetaDataTableScanner;
+import org.apache.accumulo.server.master.state.TabletLocationState;
+import org.apache.accumulo.server.master.state.TabletState;
+import org.apache.accumulo.server.problems.ProblemReports;
+import org.apache.accumulo.server.security.AuditedSecurityOperation;
+import org.apache.accumulo.server.tables.TableManager;
+import org.apache.accumulo.server.util.MetadataTableUtil;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.Text;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+class CleanUp extends MasterRepo {
+
+  final private static Logger log = LoggerFactory.getLogger(CleanUp.class);
+
+  private static final long serialVersionUID = 1L;
+
+  private String tableId, namespaceId;
+
+  private long creationTime;
+
+  private void readObject(java.io.ObjectInputStream in) throws IOException, ClassNotFoundException {
+    in.defaultReadObject();
+
+    /*
+     * handle the case where we start executing on a new machine where the current time is in the past relative to the previous machine
+     *
+     * if the new machine has time in the future, that will work ok w/ hasCycled
+     */
+    if (System.currentTimeMillis() < creationTime) {
+      creationTime = System.currentTimeMillis();
+    }
+
+  }
+
+  public CleanUp(String tableId, String namespaceId) {
+    this.tableId = tableId;
+    this.namespaceId = namespaceId;
+    creationTime = System.currentTimeMillis();
+  }
+
+  @Override
+  public long isReady(long tid, Master master) throws Exception {
+    if (!master.hasCycled(creationTime)) {
+      return 50;
+    }
+
+    boolean done = true;
+    Range tableRange = new KeyExtent(new Text(tableId), null, null).toMetadataRange();
+    Scanner scanner = master.getConnector().createScanner(MetadataTable.NAME, Authorizations.EMPTY);
+    MetaDataTableScanner.configureScanner(scanner, master);
+    scanner.setRange(tableRange);
+
+    for (Entry<Key,Value> entry : scanner) {
+      TabletLocationState locationState = MetaDataTableScanner.createTabletLocationState(entry.getKey(), entry.getValue());
+      TabletState state = locationState.getState(master.onlineTabletServers());
+      if (state.equals(TabletState.ASSIGNED) || state.equals(TabletState.HOSTED)) {
+        log.debug("Still waiting for table to be deleted: " + tableId + " locationState: " + locationState);
+        done = false;
+        break;
+      }
+    }
+
+    if (!done)
+      return 50;
+
+    return 0;
+  }
+
+  @Override
+  public Repo<Master> call(long tid, Master master) throws Exception {
+
+    master.clearMigrations(tableId);
+
+    int refCount = 0;
+
+    try {
+      // look for other tables that references this table's files
+      Connector conn = master.getConnector();
+      BatchScanner bs = conn.createBatchScanner(MetadataTable.NAME, Authorizations.EMPTY, 8);
+      try {
+        Range allTables = MetadataSchema.TabletsSection.getRange();
+        Range tableRange = MetadataSchema.TabletsSection.getRange(tableId);
+        Range beforeTable = new Range(allTables.getStartKey(), true, tableRange.getStartKey(), false);
+        Range afterTable = new Range(tableRange.getEndKey(), false, allTables.getEndKey(), true);
+        bs.setRanges(Arrays.asList(beforeTable, afterTable));
+        bs.fetchColumnFamily(DataFileColumnFamily.NAME);
+        IteratorSetting cfg = new IteratorSetting(40, "grep", GrepIterator.class);
+        GrepIterator.setTerm(cfg, "/" + tableId + "/");
+        bs.addScanIterator(cfg);
+
+        for (Entry<Key,Value> entry : bs) {
+          if (entry.getKey().getColumnQualifier().toString().contains("/" + tableId + "/")) {
+            refCount++;
+          }
+        }
+      } finally {
+        bs.close();
+      }
+
+    } catch (Exception e) {
+      refCount = -1;
+      log.error("Failed to scan " + MetadataTable.NAME + " looking for references to deleted table " + tableId, e);
+    }
+
+    // remove metadata table entries
+    try {
+      // Intentionally do not pass master lock. If master loses lock, this operation may complete before master can kill itself.
+      // If the master lock passed to deleteTable, it is possible that the delete mutations will be dropped. If the delete operations
+      // are dropped and the operation completes, then the deletes will not be repeated.
+      MetadataTableUtil.deleteTable(tableId, refCount != 0, master, null);
+    } catch (Exception e) {
+      log.error("error deleting " + tableId + " from metadata table", e);
+    }
+
+    // remove any problem reports the table may have
+    try {
+      ProblemReports.getInstance(master).deleteProblemReports(tableId);
+    } catch (Exception e) {
+      log.error("Failed to delete problem reports for table " + tableId, e);
+    }
+
+    if (refCount == 0) {
+      final AccumuloConfiguration conf = master.getConfiguration();
+      boolean archiveFiles = conf.getBoolean(Property.GC_FILE_ARCHIVE);
+
+      // delete the map files
+      try {
+        VolumeManager fs = master.getFileSystem();
+        for (String dir : ServerConstants.getTablesDirs()) {
+          if (archiveFiles) {
+            archiveFile(fs, dir, tableId);
+          } else {
+            fs.deleteRecursively(new Path(dir, tableId));
+          }
+        }
+      } catch (IOException e) {
+        log.error("Unable to remove deleted table directory", e);
+      } catch (IllegalArgumentException exception) {
+        if (exception.getCause() instanceof UnknownHostException) {
+          /* Thrown if HDFS encounters a DNS problem in some edge cases */
+          log.error("Unable to remove deleted table directory", exception);
+        } else {
+          throw exception;
+        }
+      }
+    }
+
+    // remove table from zookeeper
+    try {
+      TableManager.getInstance().removeTable(tableId);
+      Tables.clearCache(master.getInstance());
+    } catch (Exception e) {
+      log.error("Failed to find table id in zookeeper", e);
+    }
+
+    // remove any permissions associated with this table
+    try {
+      AuditedSecurityOperation.getInstance(master).deleteTable(master.rpcCreds(), tableId, namespaceId);
+    } catch (ThriftSecurityException e) {
+      log.error("{}", e.getMessage(), e);
+    }
+
+    Utils.unreserveTable(tableId, tid, true);
+    Utils.unreserveNamespace(namespaceId, tid, false);
+
+    LoggerFactory.getLogger(CleanUp.class).debug("Deleted table " + tableId);
+
+    return null;
+  }
+
+  protected void archiveFile(VolumeManager fs, String dir, String tableId) throws IOException {
+    Path tableDirectory = new Path(dir, tableId);
+    Volume v = fs.getVolumeByPath(tableDirectory);
+    String basePath = v.getBasePath();
+
+    // Path component of URI
+    String tableDirPath = tableDirectory.toUri().getPath();
+
+    // Just the suffix of the path (after the Volume's base path)
+    String tableDirSuffix = tableDirPath.substring(basePath.length());
+
+    // Remove a leading path separator char because Path will treat the "child" as an absolute path with it
+    if (Path.SEPARATOR_CHAR == tableDirSuffix.charAt(0)) {
+      if (tableDirSuffix.length() > 1) {
+        tableDirSuffix = tableDirSuffix.substring(1);
+      } else {
+        tableDirSuffix = "";
+      }
+    }
+
+    // Get the file archive directory on this volume
+    final Path fileArchiveDir = new Path(basePath, ServerConstants.FILE_ARCHIVE_DIR);
+
+    // Make sure it exists just to be safe
+    fs.mkdirs(fileArchiveDir);
+
+    // The destination to archive this table to
+    final Path destTableDir = new Path(fileArchiveDir, tableDirSuffix);
+
+    log.debug("Archiving " + tableDirectory + " to " + tableDirectory);
+
+    if (fs.exists(destTableDir)) {
+      merge(fs, tableDirectory, destTableDir);
+    } else {
+      fs.rename(tableDirectory, destTableDir);
+    }
+  }
+
+  protected void merge(VolumeManager fs, Path src, Path dest) throws IOException {
+    for (FileStatus child : fs.listStatus(src)) {
+      final String childName = child.getPath().getName();
+      final Path childInSrc = new Path(src, childName), childInDest = new Path(dest, childName);
+
+      if (child.isFile()) {
+        if (fs.exists(childInDest)) {
+          log.warn("File already exists in archive, ignoring. " + childInDest);
+        } else {
+          fs.rename(childInSrc, childInDest);
+        }
+      } else if (child.isDirectory()) {
+        if (fs.exists(childInDest)) {
+          // Recurse
+          merge(fs, childInSrc, childInDest);
+        } else {
+          fs.rename(childInSrc, childInDest);
+        }
+      } else {
+        // Symlinks shouldn't exist in table directories..
+        log.warn("Ignoring archiving of non file/directory: " + child);
+      }
+    }
+  }
+
+  @Override
+  public void undo(long tid, Master environment) throws Exception {
+    // nothing to do
+  }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/server/master/src/main/java/org/apache/accumulo/master/tableOps/CleanUpBulkImport.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CleanUpBulkImport.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CleanUpBulkImport.java
new file mode 100644
index 0000000..85f9a8c
--- /dev/null
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CleanUpBulkImport.java
@@ -0,0 +1,64 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.master.tableOps;
+
+import org.apache.accumulo.core.Constants;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.fate.Repo;
+import org.apache.accumulo.master.Master;
+import org.apache.accumulo.server.util.MetadataTableUtil;
+import org.apache.accumulo.server.zookeeper.TransactionWatcher.ZooArbitrator;
+import org.apache.hadoop.fs.Path;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+class CleanUpBulkImport extends MasterRepo {
+
+  private static final long serialVersionUID = 1L;
+
+  private static final Logger log = LoggerFactory.getLogger(CleanUpBulkImport.class);
+
+  private String tableId;
+  private String source;
+  private String bulk;
+  private String error;
+
+  public CleanUpBulkImport(String tableId, String source, String bulk, String error) {
+    this.tableId = tableId;
+    this.source = source;
+    this.bulk = bulk;
+    this.error = error;
+  }
+
+  @Override
+  public Repo<Master> call(long tid, Master master) throws Exception {
+    log.debug("removing the bulk processing flag file in " + bulk);
+    Path bulkDir = new Path(bulk);
+    MetadataTableUtil.removeBulkLoadInProgressFlag(master, "/" + bulkDir.getParent().getName() + "/" + bulkDir.getName());
+    MetadataTableUtil.addDeleteEntry(master, tableId, bulkDir.toString());
+    log.debug("removing the metadata table markers for loaded files");
+    Connector conn = master.getConnector();
+    MetadataTableUtil.removeBulkLoadEntries(conn, tableId, tid);
+    log.debug("releasing HDFS reservations for " + source + " and " + error);
+    Utils.unreserveHdfsDirectory(source, tid);
+    Utils.unreserveHdfsDirectory(error, tid);
+    Utils.getReadLock(tableId, tid).unlock();
+    log.debug("completing bulk import transaction " + tid);
+    ZooArbitrator.cleanup(Constants.BULK_ARBITRATOR_TYPE, tid);
+    return null;
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/server/master/src/main/java/org/apache/accumulo/master/tableOps/CloneInfo.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CloneInfo.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CloneInfo.java
new file mode 100644
index 0000000..335d65d
--- /dev/null
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CloneInfo.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.master.tableOps;
+
+import java.io.Serializable;
+import java.util.Map;
+import java.util.Set;
+
+class CloneInfo implements Serializable {
+
+  private static final long serialVersionUID = 1L;
+
+  String srcTableId;
+  String tableName;
+  String tableId;
+  String namespaceId;
+  String srcNamespaceId;
+  Map<String,String> propertiesToSet;
+  Set<String> propertiesToExclude;
+
+  public String user;
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/server/master/src/main/java/org/apache/accumulo/master/tableOps/CloneMetadata.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CloneMetadata.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CloneMetadata.java
new file mode 100644
index 0000000..045f6b1
--- /dev/null
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CloneMetadata.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.master.tableOps;
+
+import org.apache.accumulo.fate.Repo;
+import org.apache.accumulo.master.Master;
+import org.apache.accumulo.server.util.MetadataTableUtil;
+import org.slf4j.LoggerFactory;
+
+class CloneMetadata extends MasterRepo {
+
+  private static final long serialVersionUID = 1L;
+  private CloneInfo cloneInfo;
+
+  public CloneMetadata(CloneInfo cloneInfo) {
+    this.cloneInfo = cloneInfo;
+  }
+
+  @Override
+  public long isReady(long tid, Master environment) throws Exception {
+    return 0;
+  }
+
+  @Override
+  public Repo<Master> call(long tid, Master environment) throws Exception {
+    LoggerFactory.getLogger(CloneMetadata.class).info(
+        String.format("Cloning %s with tableId %s from srcTableId %s", cloneInfo.tableName, cloneInfo.tableId, cloneInfo.srcTableId));
+    // need to clear out any metadata entries for tableId just in case this
+    // died before and is executing again
+    MetadataTableUtil.deleteTable(cloneInfo.tableId, false, environment, environment.getMasterLock());
+    MetadataTableUtil.cloneTable(environment, cloneInfo.srcTableId, cloneInfo.tableId, environment.getFileSystem());
+    return new FinishCloneTable(cloneInfo);
+  }
+
+  @Override
+  public void undo(long tid, Master environment) throws Exception {
+    MetadataTableUtil.deleteTable(cloneInfo.tableId, false, environment, environment.getMasterLock());
+  }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/server/master/src/main/java/org/apache/accumulo/master/tableOps/ClonePermissions.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/ClonePermissions.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/ClonePermissions.java
new file mode 100644
index 0000000..3572c31
--- /dev/null
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/ClonePermissions.java
@@ -0,0 +1,73 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.master.tableOps;
+
+import org.apache.accumulo.core.client.NamespaceNotFoundException;
+import org.apache.accumulo.core.client.impl.thrift.TableOperation;
+import org.apache.accumulo.core.client.impl.thrift.TableOperationExceptionType;
+import org.apache.accumulo.core.client.impl.thrift.ThriftSecurityException;
+import org.apache.accumulo.core.client.impl.thrift.ThriftTableOperationException;
+import org.apache.accumulo.core.security.TablePermission;
+import org.apache.accumulo.fate.Repo;
+import org.apache.accumulo.master.Master;
+import org.apache.accumulo.server.security.AuditedSecurityOperation;
+import org.slf4j.LoggerFactory;
+
+class ClonePermissions extends MasterRepo {
+
+  private static final long serialVersionUID = 1L;
+
+  private CloneInfo cloneInfo;
+
+  public ClonePermissions(CloneInfo cloneInfo) {
+    this.cloneInfo = cloneInfo;
+  }
+
+  @Override
+  public long isReady(long tid, Master environment) throws Exception {
+    return 0;
+  }
+
+  @Override
+  public Repo<Master> call(long tid, Master environment) throws Exception {
+    // give all table permissions to the creator
+    for (TablePermission permission : TablePermission.values()) {
+      try {
+        AuditedSecurityOperation.getInstance(environment).grantTablePermission(environment.rpcCreds(), cloneInfo.user, cloneInfo.tableId, permission,
+            cloneInfo.namespaceId);
+      } catch (ThriftSecurityException e) {
+        LoggerFactory.getLogger(FinishCloneTable.class).error("{}", e.getMessage(), e);
+        throw e;
+      }
+    }
+
+    // setup permissions in zookeeper before table info in zookeeper
+    // this way concurrent users will not get a spurious pemission denied
+    // error
+    try {
+      return new CloneZookeeper(cloneInfo);
+    } catch (NamespaceNotFoundException e) {
+      throw new ThriftTableOperationException(null, cloneInfo.tableName, TableOperation.CLONE, TableOperationExceptionType.NAMESPACE_NOTFOUND,
+          "Namespace for target table not found");
+    }
+  }
+
+  @Override
+  public void undo(long tid, Master environment) throws Exception {
+    AuditedSecurityOperation.getInstance(environment).deleteTable(environment.rpcCreds(), cloneInfo.tableId, cloneInfo.namespaceId);
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/server/master/src/main/java/org/apache/accumulo/master/tableOps/CloneTable.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CloneTable.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CloneTable.java
index 192d182..eb2370e 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CloneTable.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CloneTable.java
@@ -16,209 +16,14 @@
  */
 package org.apache.accumulo.master.tableOps;
 
-import java.io.Serializable;
 import java.util.Map;
 import java.util.Set;
 
-import org.apache.accumulo.core.client.NamespaceNotFoundException;
-import org.apache.accumulo.core.client.impl.Namespaces;
 import org.apache.accumulo.core.client.impl.Tables;
 import org.apache.accumulo.core.client.impl.thrift.TableOperation;
-import org.apache.accumulo.core.client.impl.thrift.TableOperationExceptionType;
-import org.apache.accumulo.core.client.impl.thrift.ThriftSecurityException;
-import org.apache.accumulo.core.client.impl.thrift.ThriftTableOperationException;
-import org.apache.accumulo.core.master.state.tables.TableState;
-import org.apache.accumulo.core.security.TablePermission;
 import org.apache.accumulo.fate.Repo;
-import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeExistsPolicy;
 import org.apache.accumulo.master.Master;
 import org.apache.accumulo.server.client.HdfsZooInstance;
-import org.apache.accumulo.server.security.AuditedSecurityOperation;
-import org.apache.accumulo.server.tables.TableManager;
-import org.apache.accumulo.server.util.MetadataTableUtil;
-import org.slf4j.LoggerFactory;
-
-class CloneInfo implements Serializable {
-
-  private static final long serialVersionUID = 1L;
-
-  String srcTableId;
-  String tableName;
-  String tableId;
-  String namespaceId;
-  String srcNamespaceId;
-  Map<String,String> propertiesToSet;
-  Set<String> propertiesToExclude;
-
-  public String user;
-}
-
-class FinishCloneTable extends MasterRepo {
-
-  private static final long serialVersionUID = 1L;
-  private CloneInfo cloneInfo;
-
-  public FinishCloneTable(CloneInfo cloneInfo) {
-    this.cloneInfo = cloneInfo;
-  }
-
-  @Override
-  public long isReady(long tid, Master environment) throws Exception {
-    return 0;
-  }
-
-  @Override
-  public Repo<Master> call(long tid, Master environment) throws Exception {
-    // directories are intentionally not created.... this is done because directories should be unique
-    // because they occupy a different namespace than normal tablet directories... also some clones
-    // may never create files.. therefore there is no need to consume namenode space w/ directories
-    // that are not used... tablet will create directories as needed
-
-    TableManager.getInstance().transitionTableState(cloneInfo.tableId, TableState.ONLINE);
-
-    Utils.unreserveNamespace(cloneInfo.srcNamespaceId, tid, false);
-    if (!cloneInfo.srcNamespaceId.equals(cloneInfo.namespaceId))
-      Utils.unreserveNamespace(cloneInfo.namespaceId, tid, false);
-    Utils.unreserveTable(cloneInfo.srcTableId, tid, false);
-    Utils.unreserveTable(cloneInfo.tableId, tid, true);
-
-    environment.getEventCoordinator().event("Cloned table %s from %s", cloneInfo.tableName, cloneInfo.srcTableId);
-
-    LoggerFactory.getLogger(FinishCloneTable.class).debug("Cloned table " + cloneInfo.srcTableId + " " + cloneInfo.tableId + " " + cloneInfo.tableName);
-
-    return null;
-  }
-
-  @Override
-  public void undo(long tid, Master environment) throws Exception {}
-
-}
-
-class CloneMetadata extends MasterRepo {
-
-  private static final long serialVersionUID = 1L;
-  private CloneInfo cloneInfo;
-
-  public CloneMetadata(CloneInfo cloneInfo) {
-    this.cloneInfo = cloneInfo;
-  }
-
-  @Override
-  public long isReady(long tid, Master environment) throws Exception {
-    return 0;
-  }
-
-  @Override
-  public Repo<Master> call(long tid, Master environment) throws Exception {
-    LoggerFactory.getLogger(CloneMetadata.class).info(
-        String.format("Cloning %s with tableId %s from srcTableId %s", cloneInfo.tableName, cloneInfo.tableId, cloneInfo.srcTableId));
-    // need to clear out any metadata entries for tableId just in case this
-    // died before and is executing again
-    MetadataTableUtil.deleteTable(cloneInfo.tableId, false, environment, environment.getMasterLock());
-    MetadataTableUtil.cloneTable(environment, cloneInfo.srcTableId, cloneInfo.tableId, environment.getFileSystem());
-    return new FinishCloneTable(cloneInfo);
-  }
-
-  @Override
-  public void undo(long tid, Master environment) throws Exception {
-    MetadataTableUtil.deleteTable(cloneInfo.tableId, false, environment, environment.getMasterLock());
-  }
-
-}
-
-class CloneZookeeper extends MasterRepo {
-
-  private static final long serialVersionUID = 1L;
-
-  private CloneInfo cloneInfo;
-
-  public CloneZookeeper(CloneInfo cloneInfo) throws NamespaceNotFoundException {
-    this.cloneInfo = cloneInfo;
-    this.cloneInfo.namespaceId = Namespaces.getNamespaceId(HdfsZooInstance.getInstance(), Tables.qualify(this.cloneInfo.tableName).getFirst());
-  }
-
-  @Override
-  public long isReady(long tid, Master environment) throws Exception {
-    long val = 0;
-    if (!cloneInfo.srcNamespaceId.equals(cloneInfo.namespaceId))
-      val += Utils.reserveNamespace(cloneInfo.namespaceId, tid, false, true, TableOperation.CLONE);
-    val += Utils.reserveTable(cloneInfo.tableId, tid, true, false, TableOperation.CLONE);
-    return val;
-  }
-
-  @Override
-  public Repo<Master> call(long tid, Master environment) throws Exception {
-    Utils.tableNameLock.lock();
-    try {
-      // write tableName & tableId to zookeeper
-
-      Utils.checkTableDoesNotExist(environment.getInstance(), cloneInfo.tableName, cloneInfo.tableId, TableOperation.CLONE);
-
-      TableManager.getInstance().cloneTable(cloneInfo.srcTableId, cloneInfo.tableId, cloneInfo.tableName, cloneInfo.namespaceId, cloneInfo.propertiesToSet,
-          cloneInfo.propertiesToExclude, NodeExistsPolicy.OVERWRITE);
-      Tables.clearCache(environment.getInstance());
-
-      return new CloneMetadata(cloneInfo);
-    } finally {
-      Utils.tableNameLock.unlock();
-    }
-  }
-
-  @Override
-  public void undo(long tid, Master environment) throws Exception {
-    TableManager.getInstance().removeTable(cloneInfo.tableId);
-    if (!cloneInfo.srcNamespaceId.equals(cloneInfo.namespaceId))
-      Utils.unreserveNamespace(cloneInfo.namespaceId, tid, false);
-    Utils.unreserveTable(cloneInfo.tableId, tid, true);
-    Tables.clearCache(environment.getInstance());
-  }
-
-}
-
-class ClonePermissions extends MasterRepo {
-
-  private static final long serialVersionUID = 1L;
-
-  private CloneInfo cloneInfo;
-
-  public ClonePermissions(CloneInfo cloneInfo) {
-    this.cloneInfo = cloneInfo;
-  }
-
-  @Override
-  public long isReady(long tid, Master environment) throws Exception {
-    return 0;
-  }
-
-  @Override
-  public Repo<Master> call(long tid, Master environment) throws Exception {
-    // give all table permissions to the creator
-    for (TablePermission permission : TablePermission.values()) {
-      try {
-        AuditedSecurityOperation.getInstance(environment).grantTablePermission(environment.rpcCreds(), cloneInfo.user, cloneInfo.tableId, permission,
-            cloneInfo.namespaceId);
-      } catch (ThriftSecurityException e) {
-        LoggerFactory.getLogger(FinishCloneTable.class).error("{}", e.getMessage(), e);
-        throw e;
-      }
-    }
-
-    // setup permissions in zookeeper before table info in zookeeper
-    // this way concurrent users will not get a spurious pemission denied
-    // error
-    try {
-      return new CloneZookeeper(cloneInfo);
-    } catch (NamespaceNotFoundException e) {
-      throw new ThriftTableOperationException(null, cloneInfo.tableName, TableOperation.CLONE, TableOperationExceptionType.NAMESPACE_NOTFOUND,
-          "Namespace for target table not found");
-    }
-  }
-
-  @Override
-  public void undo(long tid, Master environment) throws Exception {
-    AuditedSecurityOperation.getInstance(environment).deleteTable(environment.rpcCreds(), cloneInfo.tableId, cloneInfo.namespaceId);
-  }
-}
 
 public class CloneTable extends MasterRepo {
 

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/server/master/src/main/java/org/apache/accumulo/master/tableOps/CloneZookeeper.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CloneZookeeper.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CloneZookeeper.java
new file mode 100644
index 0000000..072f5de
--- /dev/null
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CloneZookeeper.java
@@ -0,0 +1,76 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.master.tableOps;
+
+import org.apache.accumulo.core.client.NamespaceNotFoundException;
+import org.apache.accumulo.core.client.impl.Namespaces;
+import org.apache.accumulo.core.client.impl.Tables;
+import org.apache.accumulo.core.client.impl.thrift.TableOperation;
+import org.apache.accumulo.fate.Repo;
+import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeExistsPolicy;
+import org.apache.accumulo.master.Master;
+import org.apache.accumulo.server.client.HdfsZooInstance;
+import org.apache.accumulo.server.tables.TableManager;
+
+class CloneZookeeper extends MasterRepo {
+
+  private static final long serialVersionUID = 1L;
+
+  private CloneInfo cloneInfo;
+
+  public CloneZookeeper(CloneInfo cloneInfo) throws NamespaceNotFoundException {
+    this.cloneInfo = cloneInfo;
+    this.cloneInfo.namespaceId = Namespaces.getNamespaceId(HdfsZooInstance.getInstance(), Tables.qualify(this.cloneInfo.tableName).getFirst());
+  }
+
+  @Override
+  public long isReady(long tid, Master environment) throws Exception {
+    long val = 0;
+    if (!cloneInfo.srcNamespaceId.equals(cloneInfo.namespaceId))
+      val += Utils.reserveNamespace(cloneInfo.namespaceId, tid, false, true, TableOperation.CLONE);
+    val += Utils.reserveTable(cloneInfo.tableId, tid, true, false, TableOperation.CLONE);
+    return val;
+  }
+
+  @Override
+  public Repo<Master> call(long tid, Master environment) throws Exception {
+    Utils.tableNameLock.lock();
+    try {
+      // write tableName & tableId to zookeeper
+
+      Utils.checkTableDoesNotExist(environment.getInstance(), cloneInfo.tableName, cloneInfo.tableId, TableOperation.CLONE);
+
+      TableManager.getInstance().cloneTable(cloneInfo.srcTableId, cloneInfo.tableId, cloneInfo.tableName, cloneInfo.namespaceId, cloneInfo.propertiesToSet,
+          cloneInfo.propertiesToExclude, NodeExistsPolicy.OVERWRITE);
+      Tables.clearCache(environment.getInstance());
+
+      return new CloneMetadata(cloneInfo);
+    } finally {
+      Utils.tableNameLock.unlock();
+    }
+  }
+
+  @Override
+  public void undo(long tid, Master environment) throws Exception {
+    TableManager.getInstance().removeTable(cloneInfo.tableId);
+    if (!cloneInfo.srcNamespaceId.equals(cloneInfo.namespaceId))
+      Utils.unreserveNamespace(cloneInfo.namespaceId, tid, false);
+    Utils.unreserveTable(cloneInfo.tableId, tid, true);
+    Tables.clearCache(environment.getInstance());
+  }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/server/master/src/main/java/org/apache/accumulo/master/tableOps/CompactRange.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CompactRange.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CompactRange.java
index 133663d..befaea3 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CompactRange.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CompactRange.java
@@ -18,188 +18,29 @@ package org.apache.accumulo.master.tableOps;
 
 import static java.nio.charset.StandardCharsets.UTF_8;
 
-import java.util.Collections;
-import java.util.Iterator;
 import java.util.List;
-import java.util.Map.Entry;
 
 import org.apache.accumulo.core.Constants;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Instance;
-import org.apache.accumulo.core.client.IsolatedScanner;
 import org.apache.accumulo.core.client.IteratorSetting;
-import org.apache.accumulo.core.client.RowIterator;
-import org.apache.accumulo.core.client.Scanner;
 import org.apache.accumulo.core.client.admin.CompactionStrategyConfig;
 import org.apache.accumulo.core.client.impl.CompactionStrategyConfigUtil;
 import org.apache.accumulo.core.client.impl.Tables;
 import org.apache.accumulo.core.client.impl.thrift.TableOperation;
 import org.apache.accumulo.core.client.impl.thrift.TableOperationExceptionType;
 import org.apache.accumulo.core.client.impl.thrift.ThriftTableOperationException;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.data.impl.KeyExtent;
-import org.apache.accumulo.core.master.state.tables.TableState;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.metadata.RootTable;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.util.MapCounter;
 import org.apache.accumulo.fate.Repo;
 import org.apache.accumulo.fate.zookeeper.IZooReaderWriter;
 import org.apache.accumulo.fate.zookeeper.IZooReaderWriter.Mutator;
 import org.apache.accumulo.master.Master;
-import org.apache.accumulo.server.master.LiveTServerSet.TServerConnection;
-import org.apache.accumulo.server.master.state.TServerInstance;
 import org.apache.accumulo.server.master.tableOps.UserCompactionConfig;
 import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
 import org.apache.commons.codec.binary.Hex;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.WritableUtils;
-import org.apache.thrift.TException;
 import org.apache.zookeeper.KeeperException.NoNodeException;
-import org.slf4j.LoggerFactory;
 
 import com.google.common.base.Preconditions;
 
-class CompactionDriver extends MasterRepo {
-
-  private static final long serialVersionUID = 1L;
-
-  private long compactId;
-  private final String tableId;
-  private byte[] startRow;
-  private byte[] endRow;
-
-  public CompactionDriver(long compactId, String tableId, byte[] startRow, byte[] endRow) {
-
-    this.compactId = compactId;
-    this.tableId = tableId;
-    this.startRow = startRow;
-    this.endRow = endRow;
-  }
-
-  @Override
-  public long isReady(long tid, Master master) throws Exception {
-
-    String zCancelID = Constants.ZROOT + "/" + master.getInstance().getInstanceID() + Constants.ZTABLES + "/" + tableId + Constants.ZTABLE_COMPACT_CANCEL_ID;
-
-    IZooReaderWriter zoo = ZooReaderWriter.getInstance();
-
-    if (Long.parseLong(new String(zoo.getData(zCancelID, null))) >= compactId) {
-      // compaction was canceled
-      throw new ThriftTableOperationException(tableId, null, TableOperation.COMPACT, TableOperationExceptionType.OTHER, "Compaction canceled");
-    }
-
-    MapCounter<TServerInstance> serversToFlush = new MapCounter<TServerInstance>();
-    Connector conn = master.getConnector();
-
-    Scanner scanner;
-
-    if (tableId.equals(MetadataTable.ID)) {
-      scanner = new IsolatedScanner(conn.createScanner(RootTable.NAME, Authorizations.EMPTY));
-      scanner.setRange(MetadataSchema.TabletsSection.getRange());
-    } else {
-      scanner = new IsolatedScanner(conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY));
-      Range range = new KeyExtent(new Text(tableId), null, startRow == null ? null : new Text(startRow)).toMetadataRange();
-      scanner.setRange(range);
-    }
-
-    TabletsSection.ServerColumnFamily.COMPACT_COLUMN.fetch(scanner);
-    TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.fetch(scanner);
-    scanner.fetchColumnFamily(TabletsSection.CurrentLocationColumnFamily.NAME);
-
-    long t1 = System.currentTimeMillis();
-    RowIterator ri = new RowIterator(scanner);
-
-    int tabletsToWaitFor = 0;
-    int tabletCount = 0;
-
-    while (ri.hasNext()) {
-      Iterator<Entry<Key,Value>> row = ri.next();
-      long tabletCompactID = -1;
-
-      TServerInstance server = null;
-
-      Entry<Key,Value> entry = null;
-      while (row.hasNext()) {
-        entry = row.next();
-        Key key = entry.getKey();
-
-        if (TabletsSection.ServerColumnFamily.COMPACT_COLUMN.equals(key.getColumnFamily(), key.getColumnQualifier()))
-          tabletCompactID = Long.parseLong(entry.getValue().toString());
-
-        if (TabletsSection.CurrentLocationColumnFamily.NAME.equals(key.getColumnFamily()))
-          server = new TServerInstance(entry.getValue(), key.getColumnQualifier());
-      }
-
-      if (tabletCompactID < compactId) {
-        tabletsToWaitFor++;
-        if (server != null)
-          serversToFlush.increment(server, 1);
-      }
-
-      tabletCount++;
-
-      Text tabletEndRow = new KeyExtent(entry.getKey().getRow(), (Text) null).getEndRow();
-      if (tabletEndRow == null || (endRow != null && tabletEndRow.compareTo(new Text(endRow)) >= 0))
-        break;
-    }
-
-    long scanTime = System.currentTimeMillis() - t1;
-
-    Instance instance = master.getInstance();
-    Tables.clearCache(instance);
-    if (tabletCount == 0 && !Tables.exists(instance, tableId))
-      throw new ThriftTableOperationException(tableId, null, TableOperation.COMPACT, TableOperationExceptionType.NOTFOUND, null);
-
-    if (serversToFlush.size() == 0 && Tables.getTableState(instance, tableId) == TableState.OFFLINE)
-      throw new ThriftTableOperationException(tableId, null, TableOperation.COMPACT, TableOperationExceptionType.OFFLINE, null);
-
-    if (tabletsToWaitFor == 0)
-      return 0;
-
-    for (TServerInstance tsi : serversToFlush.keySet()) {
-      try {
-        final TServerConnection server = master.getConnection(tsi);
-        if (server != null)
-          server.compact(master.getMasterLock(), tableId, startRow, endRow);
-      } catch (TException ex) {
-        LoggerFactory.getLogger(CompactionDriver.class).error(ex.toString());
-      }
-    }
-
-    long sleepTime = 500;
-
-    if (serversToFlush.size() > 0)
-      sleepTime = Collections.max(serversToFlush.values()) * sleepTime; // make wait time depend on the server with the most to
-                                                                        // compact
-
-    sleepTime = Math.max(2 * scanTime, sleepTime);
-
-    sleepTime = Math.min(sleepTime, 30000);
-
-    return sleepTime;
-  }
-
-  @Override
-  public Repo<Master> call(long tid, Master environment) throws Exception {
-    String namespaceId = Tables.getNamespaceId(environment.getInstance(), tableId);
-    CompactRange.removeIterators(environment, tid, tableId);
-    Utils.getReadLock(tableId, tid).unlock();
-    Utils.getReadLock(namespaceId, tid).unlock();
-    return null;
-  }
-
-  @Override
-  public void undo(long tid, Master environment) throws Exception {
-
-  }
-
-}
-
 public class CompactRange extends MasterRepo {
 
   private static final long serialVersionUID = 1L;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/server/master/src/main/java/org/apache/accumulo/master/tableOps/CompactionDriver.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CompactionDriver.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CompactionDriver.java
new file mode 100644
index 0000000..e3d0820
--- /dev/null
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CompactionDriver.java
@@ -0,0 +1,188 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.master.tableOps;
+
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.Map.Entry;
+
+import org.apache.accumulo.core.Constants;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Instance;
+import org.apache.accumulo.core.client.IsolatedScanner;
+import org.apache.accumulo.core.client.RowIterator;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.client.impl.Tables;
+import org.apache.accumulo.core.client.impl.thrift.TableOperation;
+import org.apache.accumulo.core.client.impl.thrift.TableOperationExceptionType;
+import org.apache.accumulo.core.client.impl.thrift.ThriftTableOperationException;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Range;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.data.impl.KeyExtent;
+import org.apache.accumulo.core.master.state.tables.TableState;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.RootTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.util.MapCounter;
+import org.apache.accumulo.fate.Repo;
+import org.apache.accumulo.fate.zookeeper.IZooReaderWriter;
+import org.apache.accumulo.master.Master;
+import org.apache.accumulo.server.master.LiveTServerSet.TServerConnection;
+import org.apache.accumulo.server.master.state.TServerInstance;
+import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
+import org.apache.hadoop.io.Text;
+import org.apache.thrift.TException;
+import org.slf4j.LoggerFactory;
+
+class CompactionDriver extends MasterRepo {
+
+  private static final long serialVersionUID = 1L;
+
+  private long compactId;
+  private final String tableId;
+  private byte[] startRow;
+  private byte[] endRow;
+
+  public CompactionDriver(long compactId, String tableId, byte[] startRow, byte[] endRow) {
+
+    this.compactId = compactId;
+    this.tableId = tableId;
+    this.startRow = startRow;
+    this.endRow = endRow;
+  }
+
+  @Override
+  public long isReady(long tid, Master master) throws Exception {
+
+    String zCancelID = Constants.ZROOT + "/" + master.getInstance().getInstanceID() + Constants.ZTABLES + "/" + tableId + Constants.ZTABLE_COMPACT_CANCEL_ID;
+
+    IZooReaderWriter zoo = ZooReaderWriter.getInstance();
+
+    if (Long.parseLong(new String(zoo.getData(zCancelID, null))) >= compactId) {
+      // compaction was canceled
+      throw new ThriftTableOperationException(tableId, null, TableOperation.COMPACT, TableOperationExceptionType.OTHER, "Compaction canceled");
+    }
+
+    MapCounter<TServerInstance> serversToFlush = new MapCounter<TServerInstance>();
+    Connector conn = master.getConnector();
+
+    Scanner scanner;
+
+    if (tableId.equals(MetadataTable.ID)) {
+      scanner = new IsolatedScanner(conn.createScanner(RootTable.NAME, Authorizations.EMPTY));
+      scanner.setRange(MetadataSchema.TabletsSection.getRange());
+    } else {
+      scanner = new IsolatedScanner(conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY));
+      Range range = new KeyExtent(new Text(tableId), null, startRow == null ? null : new Text(startRow)).toMetadataRange();
+      scanner.setRange(range);
+    }
+
+    TabletsSection.ServerColumnFamily.COMPACT_COLUMN.fetch(scanner);
+    TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.fetch(scanner);
+    scanner.fetchColumnFamily(TabletsSection.CurrentLocationColumnFamily.NAME);
+
+    long t1 = System.currentTimeMillis();
+    RowIterator ri = new RowIterator(scanner);
+
+    int tabletsToWaitFor = 0;
+    int tabletCount = 0;
+
+    while (ri.hasNext()) {
+      Iterator<Entry<Key,Value>> row = ri.next();
+      long tabletCompactID = -1;
+
+      TServerInstance server = null;
+
+      Entry<Key,Value> entry = null;
+      while (row.hasNext()) {
+        entry = row.next();
+        Key key = entry.getKey();
+
+        if (TabletsSection.ServerColumnFamily.COMPACT_COLUMN.equals(key.getColumnFamily(), key.getColumnQualifier()))
+          tabletCompactID = Long.parseLong(entry.getValue().toString());
+
+        if (TabletsSection.CurrentLocationColumnFamily.NAME.equals(key.getColumnFamily()))
+          server = new TServerInstance(entry.getValue(), key.getColumnQualifier());
+      }
+
+      if (tabletCompactID < compactId) {
+        tabletsToWaitFor++;
+        if (server != null)
+          serversToFlush.increment(server, 1);
+      }
+
+      tabletCount++;
+
+      Text tabletEndRow = new KeyExtent(entry.getKey().getRow(), (Text) null).getEndRow();
+      if (tabletEndRow == null || (endRow != null && tabletEndRow.compareTo(new Text(endRow)) >= 0))
+        break;
+    }
+
+    long scanTime = System.currentTimeMillis() - t1;
+
+    Instance instance = master.getInstance();
+    Tables.clearCache(instance);
+    if (tabletCount == 0 && !Tables.exists(instance, tableId))
+      throw new ThriftTableOperationException(tableId, null, TableOperation.COMPACT, TableOperationExceptionType.NOTFOUND, null);
+
+    if (serversToFlush.size() == 0 && Tables.getTableState(instance, tableId) == TableState.OFFLINE)
+      throw new ThriftTableOperationException(tableId, null, TableOperation.COMPACT, TableOperationExceptionType.OFFLINE, null);
+
+    if (tabletsToWaitFor == 0)
+      return 0;
+
+    for (TServerInstance tsi : serversToFlush.keySet()) {
+      try {
+        final TServerConnection server = master.getConnection(tsi);
+        if (server != null)
+          server.compact(master.getMasterLock(), tableId, startRow, endRow);
+      } catch (TException ex) {
+        LoggerFactory.getLogger(CompactionDriver.class).error(ex.toString());
+      }
+    }
+
+    long sleepTime = 500;
+
+    if (serversToFlush.size() > 0)
+      sleepTime = Collections.max(serversToFlush.values()) * sleepTime; // make wait time depend on the server with the most to
+                                                                        // compact
+
+    sleepTime = Math.max(2 * scanTime, sleepTime);
+
+    sleepTime = Math.min(sleepTime, 30000);
+
+    return sleepTime;
+  }
+
+  @Override
+  public Repo<Master> call(long tid, Master environment) throws Exception {
+    String namespaceId = Tables.getNamespaceId(environment.getInstance(), tableId);
+    CompactRange.removeIterators(environment, tid, tableId);
+    Utils.getReadLock(tableId, tid).unlock();
+    Utils.getReadLock(namespaceId, tid).unlock();
+    return null;
+  }
+
+  @Override
+  public void undo(long tid, Master environment) throws Exception {
+
+  }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/server/master/src/main/java/org/apache/accumulo/master/tableOps/CompleteBulkImport.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CompleteBulkImport.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CompleteBulkImport.java
new file mode 100644
index 0000000..8905c80
--- /dev/null
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CompleteBulkImport.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.master.tableOps;
+
+import org.apache.accumulo.core.Constants;
+import org.apache.accumulo.fate.Repo;
+import org.apache.accumulo.master.Master;
+import org.apache.accumulo.server.zookeeper.TransactionWatcher.ZooArbitrator;
+
+class CompleteBulkImport extends MasterRepo {
+
+  private static final long serialVersionUID = 1L;
+
+  private String tableId;
+  private String source;
+  private String bulk;
+  private String error;
+
+  public CompleteBulkImport(String tableId, String source, String bulk, String error) {
+    this.tableId = tableId;
+    this.source = source;
+    this.bulk = bulk;
+    this.error = error;
+  }
+
+  @Override
+  public Repo<Master> call(long tid, Master master) throws Exception {
+    ZooArbitrator.stop(Constants.BULK_ARBITRATOR_TYPE, tid);
+    return new CopyFailed(tableId, source, bulk, error);
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/server/master/src/main/java/org/apache/accumulo/master/tableOps/CopyFailed.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CopyFailed.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CopyFailed.java
new file mode 100644
index 0000000..e0cc8ec
--- /dev/null
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CopyFailed.java
@@ -0,0 +1,158 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.master.tableOps;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+
+import java.io.BufferedReader;
+import java.io.InputStreamReader;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Set;
+import java.util.Map.Entry;
+
+import org.apache.accumulo.core.Constants;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.IsolatedScanner;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.data.impl.KeyExtent;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.fate.Repo;
+import org.apache.accumulo.master.Master;
+import org.apache.accumulo.server.fs.FileRef;
+import org.apache.accumulo.server.fs.VolumeManager;
+import org.apache.accumulo.server.master.LiveTServerSet.TServerConnection;
+import org.apache.accumulo.server.master.state.TServerInstance;
+import org.apache.accumulo.server.zookeeper.DistributedWorkQueue;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.Text;
+import org.apache.thrift.TException;
+
+class CopyFailed extends MasterRepo {
+
+  private static final long serialVersionUID = 1L;
+
+  private String tableId;
+  private String source;
+  private String bulk;
+  private String error;
+
+  public CopyFailed(String tableId, String source, String bulk, String error) {
+    this.tableId = tableId;
+    this.source = source;
+    this.bulk = bulk;
+    this.error = error;
+  }
+
+  @Override
+  public long isReady(long tid, Master master) throws Exception {
+    Set<TServerInstance> finished = new HashSet<TServerInstance>();
+    Set<TServerInstance> running = master.onlineTabletServers();
+    for (TServerInstance server : running) {
+      try {
+        TServerConnection client = master.getConnection(server);
+        if (client != null && !client.isActive(tid))
+          finished.add(server);
+      } catch (TException ex) {
+        log.info("Ignoring error trying to check on tid " + tid + " from server " + server + ": " + ex);
+      }
+    }
+    if (finished.containsAll(running))
+      return 0;
+    return 500;
+  }
+
+  @Override
+  public Repo<Master> call(long tid, Master master) throws Exception {
+    // This needs to execute after the arbiter is stopped
+
+    VolumeManager fs = master.getFileSystem();
+
+    if (!fs.exists(new Path(error, BulkImport.FAILURES_TXT)))
+      return new CleanUpBulkImport(tableId, source, bulk, error);
+
+    HashMap<FileRef,String> failures = new HashMap<FileRef,String>();
+    HashMap<FileRef,String> loadedFailures = new HashMap<FileRef,String>();
+
+    try (BufferedReader in = new BufferedReader(new InputStreamReader(fs.open(new Path(error, BulkImport.FAILURES_TXT)), UTF_8))) {
+      String line = null;
+      while ((line = in.readLine()) != null) {
+        Path path = new Path(line);
+        if (!fs.exists(new Path(error, path.getName())))
+          failures.put(new FileRef(line, path), line);
+      }
+    }
+
+    /*
+     * I thought I could move files that have no file references in the table. However its possible a clone references a file. Therefore only move files that
+     * have no loaded markers.
+     */
+
+    // determine which failed files were loaded
+    Connector conn = master.getConnector();
+    Scanner mscanner = new IsolatedScanner(conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY));
+    mscanner.setRange(new KeyExtent(new Text(tableId), null, null).toMetadataRange());
+    mscanner.fetchColumnFamily(TabletsSection.BulkFileColumnFamily.NAME);
+
+    for (Entry<Key,Value> entry : mscanner) {
+      if (Long.parseLong(entry.getValue().toString()) == tid) {
+        FileRef loadedFile = new FileRef(fs, entry.getKey());
+        String absPath = failures.remove(loadedFile);
+        if (absPath != null) {
+          loadedFailures.put(loadedFile, absPath);
+        }
+      }
+    }
+
+    // move failed files that were not loaded
+    for (String failure : failures.values()) {
+      Path orig = new Path(failure);
+      Path dest = new Path(error, orig.getName());
+      fs.rename(orig, dest);
+      log.debug("tid " + tid + " renamed " + orig + " to " + dest + ": import failed");
+    }
+
+    if (loadedFailures.size() > 0) {
+      DistributedWorkQueue bifCopyQueue = new DistributedWorkQueue(Constants.ZROOT + "/" + master.getInstance().getInstanceID() + Constants.ZBULK_FAILED_COPYQ,
+          master.getConfiguration());
+
+      HashSet<String> workIds = new HashSet<String>();
+
+      for (String failure : loadedFailures.values()) {
+        Path orig = new Path(failure);
+        Path dest = new Path(error, orig.getName());
+
+        if (fs.exists(dest))
+          continue;
+
+        bifCopyQueue.addWork(orig.getName(), (failure + "," + dest).getBytes(UTF_8));
+        workIds.add(orig.getName());
+        log.debug("tid " + tid + " added to copyq: " + orig + " to " + dest + ": failed");
+      }
+
+      bifCopyQueue.waitUntilDone(workIds);
+    }
+
+    fs.deleteRecursively(new Path(error, BulkImport.FAILURES_TXT));
+    return new CleanUpBulkImport(tableId, source, bulk, error);
+  }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/server/master/src/main/java/org/apache/accumulo/master/tableOps/CreateDir.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CreateDir.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CreateDir.java
new file mode 100644
index 0000000..6221624
--- /dev/null
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CreateDir.java
@@ -0,0 +1,51 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.master.tableOps;
+
+import org.apache.accumulo.fate.Repo;
+import org.apache.accumulo.master.Master;
+import org.apache.accumulo.server.fs.VolumeManager;
+import org.apache.hadoop.fs.Path;
+
+class CreateDir extends MasterRepo {
+  private static final long serialVersionUID = 1L;
+
+  private TableInfo tableInfo;
+
+  CreateDir(TableInfo ti) {
+    this.tableInfo = ti;
+  }
+
+  @Override
+  public long isReady(long tid, Master environment) throws Exception {
+    return 0;
+  }
+
+  @Override
+  public Repo<Master> call(long tid, Master master) throws Exception {
+    VolumeManager fs = master.getFileSystem();
+    fs.mkdirs(new Path(tableInfo.dir));
+    return new PopulateMetadata(tableInfo);
+  }
+
+  @Override
+  public void undo(long tid, Master master) throws Exception {
+    VolumeManager fs = master.getFileSystem();
+    fs.deleteRecursively(new Path(tableInfo.dir));
+
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/server/master/src/main/java/org/apache/accumulo/master/tableOps/CreateImportDir.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CreateImportDir.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CreateImportDir.java
new file mode 100644
index 0000000..4f0e7f8
--- /dev/null
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CreateImportDir.java
@@ -0,0 +1,61 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.master.tableOps;
+
+import java.util.Arrays;
+
+import org.apache.accumulo.core.Constants;
+import org.apache.accumulo.fate.Repo;
+import org.apache.accumulo.master.Master;
+import org.apache.accumulo.server.ServerConstants;
+import org.apache.accumulo.server.tablets.UniqueNameAllocator;
+import org.apache.hadoop.fs.Path;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+class CreateImportDir extends MasterRepo {
+  private static final Logger log = LoggerFactory.getLogger(CreateImportDir.class);
+  private static final long serialVersionUID = 1L;
+
+  private ImportedTableInfo tableInfo;
+
+  CreateImportDir(ImportedTableInfo ti) {
+    this.tableInfo = ti;
+  }
+
+  @Override
+  public Repo<Master> call(long tid, Master master) throws Exception {
+
+    UniqueNameAllocator namer = UniqueNameAllocator.getInstance();
+
+    Path exportDir = new Path(tableInfo.exportDir);
+    String[] tableDirs = ServerConstants.getTablesDirs();
+
+    log.info("Looking for matching filesystem for " + exportDir + " from options " + Arrays.toString(tableDirs));
+    Path base = master.getFileSystem().matchingFileSystem(exportDir, tableDirs);
+    log.info("Chose base table directory of " + base);
+    Path directory = new Path(base, tableInfo.tableId);
+
+    Path newBulkDir = new Path(directory, Constants.BULK_PREFIX + namer.getNextName());
+
+    tableInfo.importDir = newBulkDir.toString();
+
+    log.info("Using import dir: " + tableInfo.importDir);
+
+    return new MapImportFileNames(tableInfo);
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/server/master/src/main/java/org/apache/accumulo/master/tableOps/CreateNamespace.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CreateNamespace.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CreateNamespace.java
index 9264031..b01fbcc 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CreateNamespace.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CreateNamespace.java
@@ -16,147 +16,10 @@
  */
 package org.apache.accumulo.master.tableOps;
 
-import java.io.Serializable;
 import java.util.Map;
-import java.util.Map.Entry;
 
-import org.apache.accumulo.core.client.Instance;
-import org.apache.accumulo.core.client.impl.Tables;
-import org.apache.accumulo.core.client.impl.thrift.TableOperation;
-import org.apache.accumulo.core.client.impl.thrift.ThriftSecurityException;
-import org.apache.accumulo.core.security.NamespacePermission;
 import org.apache.accumulo.fate.Repo;
-import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeExistsPolicy;
 import org.apache.accumulo.master.Master;
-import org.apache.accumulo.server.security.AuditedSecurityOperation;
-import org.apache.accumulo.server.security.SecurityOperation;
-import org.apache.accumulo.server.tables.TableManager;
-import org.apache.accumulo.server.util.NamespacePropUtil;
-import org.slf4j.LoggerFactory;
-
-class NamespaceInfo implements Serializable {
-
-  private static final long serialVersionUID = 1L;
-
-  String namespaceName;
-  String namespaceId;
-  String user;
-
-  public Map<String,String> props;
-}
-
-class FinishCreateNamespace extends MasterRepo {
-
-  private static final long serialVersionUID = 1L;
-
-  private NamespaceInfo namespaceInfo;
-
-  public FinishCreateNamespace(NamespaceInfo ti) {
-    this.namespaceInfo = ti;
-  }
-
-  @Override
-  public long isReady(long tid, Master environment) throws Exception {
-    return 0;
-  }
-
-  @Override
-  public Repo<Master> call(long id, Master env) throws Exception {
-
-    Utils.unreserveNamespace(namespaceInfo.namespaceId, id, true);
-
-    env.getEventCoordinator().event("Created namespace %s ", namespaceInfo.namespaceName);
-
-    LoggerFactory.getLogger(FinishCreateNamespace.class).debug("Created table " + namespaceInfo.namespaceId + " " + namespaceInfo.namespaceName);
-
-    return null;
-  }
-
-  @Override
-  public String getReturn() {
-    return namespaceInfo.namespaceId;
-  }
-
-  @Override
-  public void undo(long tid, Master env) throws Exception {}
-
-}
-
-class PopulateZookeeperWithNamespace extends MasterRepo {
-
-  private static final long serialVersionUID = 1L;
-
-  private NamespaceInfo namespaceInfo;
-
-  PopulateZookeeperWithNamespace(NamespaceInfo ti) {
-    this.namespaceInfo = ti;
-  }
-
-  @Override
-  public long isReady(long id, Master environment) throws Exception {
-    return Utils.reserveNamespace(namespaceInfo.namespaceId, id, true, false, TableOperation.CREATE);
-  }
-
-  @Override
-  public Repo<Master> call(long tid, Master master) throws Exception {
-
-    Utils.tableNameLock.lock();
-    try {
-      Instance instance = master.getInstance();
-
-      Utils.checkNamespaceDoesNotExist(instance, namespaceInfo.namespaceName, namespaceInfo.namespaceId, TableOperation.CREATE);
-
-      TableManager.prepareNewNamespaceState(instance.getInstanceID(), namespaceInfo.namespaceId, namespaceInfo.namespaceName, NodeExistsPolicy.OVERWRITE);
-
-      for (Entry<String,String> entry : namespaceInfo.props.entrySet())
-        NamespacePropUtil.setNamespaceProperty(namespaceInfo.namespaceId, entry.getKey(), entry.getValue());
-
-      Tables.clearCache(instance);
-
-      return new FinishCreateNamespace(namespaceInfo);
-    } finally {
-      Utils.tableNameLock.unlock();
-    }
-  }
-
-  @Override
-  public void undo(long tid, Master master) throws Exception {
-    TableManager.getInstance().removeNamespace(namespaceInfo.namespaceId);
-    Tables.clearCache(master.getInstance());
-    Utils.unreserveNamespace(namespaceInfo.namespaceId, tid, true);
-  }
-
-}
-
-class SetupNamespacePermissions extends MasterRepo {
-
-  private static final long serialVersionUID = 1L;
-
-  private NamespaceInfo namespaceInfo;
-
-  public SetupNamespacePermissions(NamespaceInfo ti) {
-    this.namespaceInfo = ti;
-  }
-
-  @Override
-  public Repo<Master> call(long tid, Master env) throws Exception {
-    // give all namespace permissions to the creator
-    SecurityOperation security = AuditedSecurityOperation.getInstance(env);
-    for (NamespacePermission permission : NamespacePermission.values()) {
-      try {
-        security.grantNamespacePermission(env.rpcCreds(), namespaceInfo.user, namespaceInfo.namespaceId, permission);
-      } catch (ThriftSecurityException e) {
-        LoggerFactory.getLogger(FinishCreateNamespace.class).error("{}", e.getMessage(), e);
-        throw e;
-      }
-    }
-
-    // setup permissions in zookeeper before table info in zookeeper
-    // this way concurrent users will not get a spurious permission denied
-    // error
-    return new PopulateZookeeperWithNamespace(namespaceInfo);
-  }
-}
 
 public class CreateNamespace extends MasterRepo {
   private static final long serialVersionUID = 1L;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/server/master/src/main/java/org/apache/accumulo/master/tableOps/CreateTable.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CreateTable.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CreateTable.java
index 9436704..ea2e395 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CreateTable.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CreateTable.java
@@ -16,264 +16,13 @@
  */
 package org.apache.accumulo.master.tableOps;
 
-import java.io.Serializable;
 import java.util.Map;
-import java.util.Map.Entry;
 
-import org.apache.accumulo.core.Constants;
-import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.admin.TimeType;
-import org.apache.accumulo.core.client.impl.Tables;
 import org.apache.accumulo.core.client.impl.thrift.TableOperation;
-import org.apache.accumulo.core.client.impl.thrift.ThriftSecurityException;
-import org.apache.accumulo.core.data.impl.KeyExtent;
-import org.apache.accumulo.core.master.state.tables.TableState;
-import org.apache.accumulo.core.security.TablePermission;
 import org.apache.accumulo.fate.Repo;
-import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeExistsPolicy;
 import org.apache.accumulo.master.Master;
-import org.apache.accumulo.server.ServerConstants;
-import org.apache.accumulo.server.fs.VolumeManager;
-import org.apache.accumulo.server.security.AuditedSecurityOperation;
-import org.apache.accumulo.server.security.SecurityOperation;
-import org.apache.accumulo.server.tables.TableManager;
 import org.apache.accumulo.server.tablets.TabletTime;
-import org.apache.accumulo.server.util.MetadataTableUtil;
-import org.apache.accumulo.server.util.TablePropUtil;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.Text;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.base.Optional;
-
-class TableInfo implements Serializable {
-
-  private static final long serialVersionUID = 1L;
-
-  String tableName;
-  String tableId;
-  String namespaceId;
-  char timeType;
-  String user;
-
-  public Map<String,String> props;
-
-  public String dir = null;
-}
-
-class FinishCreateTable extends MasterRepo {
-
-  private static final long serialVersionUID = 1L;
-
-  private TableInfo tableInfo;
-
-  public FinishCreateTable(TableInfo ti) {
-    this.tableInfo = ti;
-  }
-
-  @Override
-  public long isReady(long tid, Master environment) throws Exception {
-    return 0;
-  }
-
-  @Override
-  public Repo<Master> call(long tid, Master env) throws Exception {
-    TableManager.getInstance().transitionTableState(tableInfo.tableId, TableState.ONLINE);
-
-    Utils.unreserveNamespace(tableInfo.namespaceId, tid, false);
-    Utils.unreserveTable(tableInfo.tableId, tid, true);
-
-    env.getEventCoordinator().event("Created table %s ", tableInfo.tableName);
-
-    LoggerFactory.getLogger(FinishCreateTable.class).debug("Created table " + tableInfo.tableId + " " + tableInfo.tableName);
-
-    return null;
-  }
-
-  @Override
-  public String getReturn() {
-    return tableInfo.tableId;
-  }
-
-  @Override
-  public void undo(long tid, Master env) throws Exception {}
-
-}
-
-class PopulateMetadata extends MasterRepo {
-
-  private static final long serialVersionUID = 1L;
-
-  private TableInfo tableInfo;
-
-  PopulateMetadata(TableInfo ti) {
-    this.tableInfo = ti;
-  }
-
-  @Override
-  public long isReady(long tid, Master environment) throws Exception {
-    return 0;
-  }
-
-  @Override
-  public Repo<Master> call(long tid, Master environment) throws Exception {
-    KeyExtent extent = new KeyExtent(new Text(tableInfo.tableId), null, null);
-    MetadataTableUtil.addTablet(extent, tableInfo.dir, environment, tableInfo.timeType, environment.getMasterLock());
-
-    return new FinishCreateTable(tableInfo);
-
-  }
-
-  @Override
-  public void undo(long tid, Master environment) throws Exception {
-    MetadataTableUtil.deleteTable(tableInfo.tableId, false, environment, environment.getMasterLock());
-  }
-
-}
-
-class CreateDir extends MasterRepo {
-  private static final long serialVersionUID = 1L;
-
-  private TableInfo tableInfo;
-
-  CreateDir(TableInfo ti) {
-    this.tableInfo = ti;
-  }
-
-  @Override
-  public long isReady(long tid, Master environment) throws Exception {
-    return 0;
-  }
-
-  @Override
-  public Repo<Master> call(long tid, Master master) throws Exception {
-    VolumeManager fs = master.getFileSystem();
-    fs.mkdirs(new Path(tableInfo.dir));
-    return new PopulateMetadata(tableInfo);
-  }
-
-  @Override
-  public void undo(long tid, Master master) throws Exception {
-    VolumeManager fs = master.getFileSystem();
-    fs.deleteRecursively(new Path(tableInfo.dir));
-
-  }
-}
-
-class ChooseDir extends MasterRepo {
-  private static final long serialVersionUID = 1L;
-
-  private TableInfo tableInfo;
-
-  ChooseDir(TableInfo ti) {
-    this.tableInfo = ti;
-  }
-
-  @Override
-  public long isReady(long tid, Master environment) throws Exception {
-    return 0;
-  }
-
-  @Override
-  public Repo<Master> call(long tid, Master master) throws Exception {
-    // Constants.DEFAULT_TABLET_LOCATION has a leading slash prepended to it so we don't need to add one here
-    tableInfo.dir = master.getFileSystem().choose(Optional.of(tableInfo.tableId), ServerConstants.getBaseUris()) + Constants.HDFS_TABLES_DIR + Path.SEPARATOR
-        + tableInfo.tableId + Constants.DEFAULT_TABLET_LOCATION;
-    return new CreateDir(tableInfo);
-  }
-
-  @Override
-  public void undo(long tid, Master master) throws Exception {
-
-  }
-}
-
-class PopulateZookeeper extends MasterRepo {
-
-  private static final long serialVersionUID = 1L;
-
-  private TableInfo tableInfo;
-
-  PopulateZookeeper(TableInfo ti) {
-    this.tableInfo = ti;
-  }
-
-  @Override
-  public long isReady(long tid, Master environment) throws Exception {
-    return Utils.reserveTable(tableInfo.tableId, tid, true, false, TableOperation.CREATE);
-  }
-
-  @Override
-  public Repo<Master> call(long tid, Master master) throws Exception {
-    // reserve the table name in zookeeper or fail
-
-    Utils.tableNameLock.lock();
-    try {
-      // write tableName & tableId to zookeeper
-      Instance instance = master.getInstance();
-
-      Utils.checkTableDoesNotExist(instance, tableInfo.tableName, tableInfo.tableId, TableOperation.CREATE);
-
-      TableManager.getInstance().addTable(tableInfo.tableId, tableInfo.namespaceId, tableInfo.tableName, NodeExistsPolicy.OVERWRITE);
-
-      for (Entry<String,String> entry : tableInfo.props.entrySet())
-        TablePropUtil.setTableProperty(tableInfo.tableId, entry.getKey(), entry.getValue());
-
-      Tables.clearCache(instance);
-      return new ChooseDir(tableInfo);
-    } finally {
-      Utils.tableNameLock.unlock();
-    }
-
-  }
-
-  @Override
-  public void undo(long tid, Master master) throws Exception {
-    Instance instance = master.getInstance();
-    TableManager.getInstance().removeTable(tableInfo.tableId);
-    Utils.unreserveTable(tableInfo.tableId, tid, true);
-    Tables.clearCache(instance);
-  }
-
-}
-
-class SetupPermissions extends MasterRepo {
-
-  private static final long serialVersionUID = 1L;
-
-  private TableInfo tableInfo;
-
-  public SetupPermissions(TableInfo ti) {
-    this.tableInfo = ti;
-  }
-
-  @Override
-  public Repo<Master> call(long tid, Master env) throws Exception {
-    // give all table permissions to the creator
-    SecurityOperation security = AuditedSecurityOperation.getInstance(env);
-    if (!tableInfo.user.equals(env.getCredentials().getPrincipal())) {
-      for (TablePermission permission : TablePermission.values()) {
-        try {
-          security.grantTablePermission(env.rpcCreds(), tableInfo.user, tableInfo.tableId, permission, tableInfo.namespaceId);
-        } catch (ThriftSecurityException e) {
-          LoggerFactory.getLogger(FinishCreateTable.class).error("{}", e.getMessage(), e);
-          throw e;
-        }
-      }
-    }
-
-    // setup permissions in zookeeper before table info in zookeeper
-    // this way concurrent users will not get a spurious permission denied
-    // error
-    return new PopulateZookeeper(tableInfo);
-  }
-
-  @Override
-  public void undo(long tid, Master env) throws Exception {
-    AuditedSecurityOperation.getInstance(env).deleteTable(env.rpcCreds(), tableInfo.tableId, tableInfo.namespaceId);
-  }
-
-}
 
 public class CreateTable extends MasterRepo {
   private static final long serialVersionUID = 1L;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/server/master/src/main/java/org/apache/accumulo/master/tableOps/DeleteNamespace.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/DeleteNamespace.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/DeleteNamespace.java
index 3aa3719..f84671e 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/DeleteNamespace.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/DeleteNamespace.java
@@ -16,64 +16,9 @@
  */
 package org.apache.accumulo.master.tableOps;
 
-import org.apache.accumulo.core.client.impl.Tables;
 import org.apache.accumulo.core.client.impl.thrift.TableOperation;
-import org.apache.accumulo.core.client.impl.thrift.ThriftSecurityException;
 import org.apache.accumulo.fate.Repo;
 import org.apache.accumulo.master.Master;
-import org.apache.accumulo.server.security.AuditedSecurityOperation;
-import org.apache.accumulo.server.tables.TableManager;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-class NamespaceCleanUp extends MasterRepo {
-
-  final private static Logger log = LoggerFactory.getLogger(CleanUp.class);
-
-  private static final long serialVersionUID = 1L;
-
-  private String namespaceId;
-
-  public NamespaceCleanUp(String namespaceId) {
-    this.namespaceId = namespaceId;
-  }
-
-  @Override
-  public long isReady(long tid, Master master) throws Exception {
-    return 0;
-  }
-
-  @Override
-  public Repo<Master> call(long id, Master master) throws Exception {
-
-    // remove from zookeeper
-    try {
-      TableManager.getInstance().removeNamespace(namespaceId);
-    } catch (Exception e) {
-      log.error("Failed to find namespace in zookeeper", e);
-    }
-    Tables.clearCache(master.getInstance());
-
-    // remove any permissions associated with this namespace
-    try {
-      AuditedSecurityOperation.getInstance(master).deleteNamespace(master.rpcCreds(), namespaceId);
-    } catch (ThriftSecurityException e) {
-      log.error("{}", e.getMessage(), e);
-    }
-
-    Utils.unreserveNamespace(namespaceId, id, true);
-
-    LoggerFactory.getLogger(CleanUp.class).debug("Deleted namespace " + namespaceId);
-
-    return null;
-  }
-
-  @Override
-  public void undo(long tid, Master environment) throws Exception {
-    // nothing to do
-  }
-
-}
 
 public class DeleteNamespace extends MasterRepo {
 


[5/9] accumulo git commit: ACCUMULO-3759 Fix Java 8 compiler warnings

Posted by ct...@apache.org.
http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/server/master/src/main/java/org/apache/accumulo/master/tableOps/NamespaceInfo.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/NamespaceInfo.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/NamespaceInfo.java
new file mode 100644
index 0000000..ef2becd
--- /dev/null
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/NamespaceInfo.java
@@ -0,0 +1,31 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.master.tableOps;
+
+import java.io.Serializable;
+import java.util.Map;
+
+class NamespaceInfo implements Serializable {
+
+  private static final long serialVersionUID = 1L;
+
+  String namespaceName;
+  String namespaceId;
+  String user;
+
+  public Map<String,String> props;
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/server/master/src/main/java/org/apache/accumulo/master/tableOps/PopulateMetadata.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/PopulateMetadata.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/PopulateMetadata.java
new file mode 100644
index 0000000..da13ecc
--- /dev/null
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/PopulateMetadata.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.master.tableOps;
+
+import org.apache.accumulo.core.data.impl.KeyExtent;
+import org.apache.accumulo.fate.Repo;
+import org.apache.accumulo.master.Master;
+import org.apache.accumulo.server.util.MetadataTableUtil;
+import org.apache.hadoop.io.Text;
+
+class PopulateMetadata extends MasterRepo {
+
+  private static final long serialVersionUID = 1L;
+
+  private TableInfo tableInfo;
+
+  PopulateMetadata(TableInfo ti) {
+    this.tableInfo = ti;
+  }
+
+  @Override
+  public long isReady(long tid, Master environment) throws Exception {
+    return 0;
+  }
+
+  @Override
+  public Repo<Master> call(long tid, Master environment) throws Exception {
+    KeyExtent extent = new KeyExtent(new Text(tableInfo.tableId), null, null);
+    MetadataTableUtil.addTablet(extent, tableInfo.dir, environment, tableInfo.timeType, environment.getMasterLock());
+
+    return new FinishCreateTable(tableInfo);
+
+  }
+
+  @Override
+  public void undo(long tid, Master environment) throws Exception {
+    MetadataTableUtil.deleteTable(tableInfo.tableId, false, environment, environment.getMasterLock());
+  }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/server/master/src/main/java/org/apache/accumulo/master/tableOps/PopulateMetadataTable.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/PopulateMetadataTable.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/PopulateMetadataTable.java
new file mode 100644
index 0000000..72832ba
--- /dev/null
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/PopulateMetadataTable.java
@@ -0,0 +1,217 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.master.tableOps;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+
+import java.io.BufferedInputStream;
+import java.io.BufferedReader;
+import java.io.DataInputStream;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.zip.ZipEntry;
+import java.util.zip.ZipInputStream;
+
+import org.apache.accumulo.core.Constants;
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.impl.thrift.TableOperation;
+import org.apache.accumulo.core.client.impl.thrift.TableOperationExceptionType;
+import org.apache.accumulo.core.client.impl.thrift.ThriftTableOperationException;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.data.impl.KeyExtent;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
+import org.apache.accumulo.core.util.FastFormat;
+import org.apache.accumulo.fate.Repo;
+import org.apache.accumulo.master.Master;
+import org.apache.accumulo.server.ServerConstants;
+import org.apache.accumulo.server.fs.VolumeManager;
+import org.apache.accumulo.server.util.MetadataTableUtil;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.Text;
+
+import com.google.common.base.Optional;
+
+class PopulateMetadataTable extends MasterRepo {
+
+  private static final long serialVersionUID = 1L;
+
+  private ImportedTableInfo tableInfo;
+
+  PopulateMetadataTable(ImportedTableInfo ti) {
+    this.tableInfo = ti;
+  }
+
+  static Map<String,String> readMappingFile(VolumeManager fs, ImportedTableInfo tableInfo) throws Exception {
+    BufferedReader in = new BufferedReader(new InputStreamReader(fs.open(new Path(tableInfo.importDir, "mappings.txt")), UTF_8));
+
+    try {
+      Map<String,String> map = new HashMap<String,String>();
+
+      String line = null;
+      while ((line = in.readLine()) != null) {
+        String sa[] = line.split(":", 2);
+        map.put(sa[0], sa[1]);
+      }
+
+      return map;
+    } finally {
+      in.close();
+    }
+
+  }
+
+  @Override
+  public Repo<Master> call(long tid, Master master) throws Exception {
+
+    Path path = new Path(tableInfo.exportDir, Constants.EXPORT_FILE);
+
+    BatchWriter mbw = null;
+    ZipInputStream zis = null;
+
+    try {
+      VolumeManager fs = master.getFileSystem();
+
+      mbw = master.getConnector().createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
+
+      zis = new ZipInputStream(fs.open(path));
+
+      Map<String,String> fileNameMappings = readMappingFile(fs, tableInfo);
+
+      log.info("importDir is " + tableInfo.importDir);
+
+      // This is a directory already prefixed with proper volume information e.g. hdfs://localhost:8020/path/to/accumulo/tables/...
+      final String bulkDir = tableInfo.importDir;
+
+      final String[] tableDirs = ServerConstants.getTablesDirs();
+
+      ZipEntry zipEntry;
+      while ((zipEntry = zis.getNextEntry()) != null) {
+        if (zipEntry.getName().equals(Constants.EXPORT_METADATA_FILE)) {
+          DataInputStream in = new DataInputStream(new BufferedInputStream(zis));
+
+          Key key = new Key();
+          Value val = new Value();
+
+          Mutation m = null;
+          Text currentRow = null;
+          int dirCount = 0;
+
+          while (true) {
+            key.readFields(in);
+            val.readFields(in);
+
+            Text endRow = new KeyExtent(key.getRow(), (Text) null).getEndRow();
+            Text metadataRow = new KeyExtent(new Text(tableInfo.tableId), endRow, null).getMetadataEntry();
+
+            Text cq;
+
+            if (key.getColumnFamily().equals(DataFileColumnFamily.NAME)) {
+              String oldName = new Path(key.getColumnQualifier().toString()).getName();
+              String newName = fileNameMappings.get(oldName);
+
+              if (newName == null) {
+                throw new ThriftTableOperationException(tableInfo.tableId, tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER,
+                    "File " + oldName + " does not exist in import dir");
+              }
+
+              cq = new Text(bulkDir + "/" + newName);
+            } else {
+              cq = key.getColumnQualifier();
+            }
+
+            if (m == null) {
+              // Make a unique directory inside the table's dir. Cannot import multiple tables into one table, so don't need to use unique allocator
+              String tabletDir = new String(FastFormat.toZeroPaddedString(dirCount++, 8, 16, Constants.CLONE_PREFIX_BYTES), UTF_8);
+
+              // Build up a full hdfs://localhost:8020/accumulo/tables/$id/c-XXXXXXX
+              String absolutePath = getClonedTabletDir(master, tableDirs, tabletDir);
+
+              m = new Mutation(metadataRow);
+              TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.put(m, new Value(absolutePath.getBytes(UTF_8)));
+              currentRow = metadataRow;
+            }
+
+            if (!currentRow.equals(metadataRow)) {
+              mbw.addMutation(m);
+
+              // Make a unique directory inside the table's dir. Cannot import multiple tables into one table, so don't need to use unique allocator
+              String tabletDir = new String(FastFormat.toZeroPaddedString(dirCount++, 8, 16, Constants.CLONE_PREFIX_BYTES), UTF_8);
+
+              // Build up a full hdfs://localhost:8020/accumulo/tables/$id/c-XXXXXXX
+              String absolutePath = getClonedTabletDir(master, tableDirs, tabletDir);
+
+              m = new Mutation(metadataRow);
+              TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.put(m, new Value(absolutePath.getBytes(UTF_8)));
+            }
+
+            m.put(key.getColumnFamily(), cq, val);
+
+            if (endRow == null && TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.hasColumns(key)) {
+              mbw.addMutation(m);
+              break; // its the last column in the last row
+            }
+          }
+
+          break;
+        }
+      }
+
+      return new MoveExportedFiles(tableInfo);
+    } catch (IOException ioe) {
+      log.warn("{}", ioe.getMessage(), ioe);
+      throw new ThriftTableOperationException(tableInfo.tableId, tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER,
+          "Error reading " + path + " " + ioe.getMessage());
+    } finally {
+      if (zis != null) {
+        try {
+          zis.close();
+        } catch (IOException ioe) {
+          log.warn("Failed to close zip file ", ioe);
+        }
+      }
+
+      if (mbw != null) {
+        mbw.close();
+      }
+    }
+  }
+
+  /**
+   * Given options for tables (across multiple volumes), construct an absolute path using the unique name within the chosen volume
+   *
+   * @return An absolute, unique path for the imported table
+   */
+  protected String getClonedTabletDir(Master master, String[] tableDirs, String tabletDir) {
+    // We can try to spread out the tablet dirs across all volumes
+    String tableDir = master.getFileSystem().choose(Optional.of(tableInfo.tableId), tableDirs);
+
+    // Build up a full hdfs://localhost:8020/accumulo/tables/$id/c-XXXXXXX
+    return tableDir + "/" + tableInfo.tableId + "/" + tabletDir;
+  }
+
+  @Override
+  public void undo(long tid, Master environment) throws Exception {
+    MetadataTableUtil.deleteTable(tableInfo.tableId, false, environment, environment.getMasterLock());
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/server/master/src/main/java/org/apache/accumulo/master/tableOps/PopulateZookeeper.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/PopulateZookeeper.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/PopulateZookeeper.java
new file mode 100644
index 0000000..8ec8834
--- /dev/null
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/PopulateZookeeper.java
@@ -0,0 +1,77 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.master.tableOps;
+
+import java.util.Map.Entry;
+
+import org.apache.accumulo.core.client.Instance;
+import org.apache.accumulo.core.client.impl.Tables;
+import org.apache.accumulo.core.client.impl.thrift.TableOperation;
+import org.apache.accumulo.fate.Repo;
+import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeExistsPolicy;
+import org.apache.accumulo.master.Master;
+import org.apache.accumulo.server.tables.TableManager;
+import org.apache.accumulo.server.util.TablePropUtil;
+
+class PopulateZookeeper extends MasterRepo {
+
+  private static final long serialVersionUID = 1L;
+
+  private TableInfo tableInfo;
+
+  PopulateZookeeper(TableInfo ti) {
+    this.tableInfo = ti;
+  }
+
+  @Override
+  public long isReady(long tid, Master environment) throws Exception {
+    return Utils.reserveTable(tableInfo.tableId, tid, true, false, TableOperation.CREATE);
+  }
+
+  @Override
+  public Repo<Master> call(long tid, Master master) throws Exception {
+    // reserve the table name in zookeeper or fail
+
+    Utils.tableNameLock.lock();
+    try {
+      // write tableName & tableId to zookeeper
+      Instance instance = master.getInstance();
+
+      Utils.checkTableDoesNotExist(instance, tableInfo.tableName, tableInfo.tableId, TableOperation.CREATE);
+
+      TableManager.getInstance().addTable(tableInfo.tableId, tableInfo.namespaceId, tableInfo.tableName, NodeExistsPolicy.OVERWRITE);
+
+      for (Entry<String,String> entry : tableInfo.props.entrySet())
+        TablePropUtil.setTableProperty(tableInfo.tableId, entry.getKey(), entry.getValue());
+
+      Tables.clearCache(instance);
+      return new ChooseDir(tableInfo);
+    } finally {
+      Utils.tableNameLock.unlock();
+    }
+
+  }
+
+  @Override
+  public void undo(long tid, Master master) throws Exception {
+    Instance instance = master.getInstance();
+    TableManager.getInstance().removeTable(tableInfo.tableId);
+    Utils.unreserveTable(tableInfo.tableId, tid, true);
+    Tables.clearCache(instance);
+  }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/server/master/src/main/java/org/apache/accumulo/master/tableOps/PopulateZookeeperWithNamespace.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/PopulateZookeeperWithNamespace.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/PopulateZookeeperWithNamespace.java
new file mode 100644
index 0000000..bf101ae
--- /dev/null
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/PopulateZookeeperWithNamespace.java
@@ -0,0 +1,74 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.master.tableOps;
+
+import java.util.Map.Entry;
+
+import org.apache.accumulo.core.client.Instance;
+import org.apache.accumulo.core.client.impl.Tables;
+import org.apache.accumulo.core.client.impl.thrift.TableOperation;
+import org.apache.accumulo.fate.Repo;
+import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeExistsPolicy;
+import org.apache.accumulo.master.Master;
+import org.apache.accumulo.server.tables.TableManager;
+import org.apache.accumulo.server.util.NamespacePropUtil;
+
+class PopulateZookeeperWithNamespace extends MasterRepo {
+
+  private static final long serialVersionUID = 1L;
+
+  private NamespaceInfo namespaceInfo;
+
+  PopulateZookeeperWithNamespace(NamespaceInfo ti) {
+    this.namespaceInfo = ti;
+  }
+
+  @Override
+  public long isReady(long id, Master environment) throws Exception {
+    return Utils.reserveNamespace(namespaceInfo.namespaceId, id, true, false, TableOperation.CREATE);
+  }
+
+  @Override
+  public Repo<Master> call(long tid, Master master) throws Exception {
+
+    Utils.tableNameLock.lock();
+    try {
+      Instance instance = master.getInstance();
+
+      Utils.checkNamespaceDoesNotExist(instance, namespaceInfo.namespaceName, namespaceInfo.namespaceId, TableOperation.CREATE);
+
+      TableManager.prepareNewNamespaceState(instance.getInstanceID(), namespaceInfo.namespaceId, namespaceInfo.namespaceName, NodeExistsPolicy.OVERWRITE);
+
+      for (Entry<String,String> entry : namespaceInfo.props.entrySet())
+        NamespacePropUtil.setNamespaceProperty(namespaceInfo.namespaceId, entry.getKey(), entry.getValue());
+
+      Tables.clearCache(instance);
+
+      return new FinishCreateNamespace(namespaceInfo);
+    } finally {
+      Utils.tableNameLock.unlock();
+    }
+  }
+
+  @Override
+  public void undo(long tid, Master master) throws Exception {
+    TableManager.getInstance().removeNamespace(namespaceInfo.namespaceId);
+    Tables.clearCache(master.getInstance());
+    Utils.unreserveNamespace(namespaceInfo.namespaceId, tid, true);
+  }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/server/master/src/main/java/org/apache/accumulo/master/tableOps/SetupNamespacePermissions.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/SetupNamespacePermissions.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/SetupNamespacePermissions.java
new file mode 100644
index 0000000..ace3935
--- /dev/null
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/SetupNamespacePermissions.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.master.tableOps;
+
+import org.apache.accumulo.core.client.impl.thrift.ThriftSecurityException;
+import org.apache.accumulo.core.security.NamespacePermission;
+import org.apache.accumulo.fate.Repo;
+import org.apache.accumulo.master.Master;
+import org.apache.accumulo.server.security.AuditedSecurityOperation;
+import org.apache.accumulo.server.security.SecurityOperation;
+import org.slf4j.LoggerFactory;
+
+class SetupNamespacePermissions extends MasterRepo {
+
+  private static final long serialVersionUID = 1L;
+
+  private NamespaceInfo namespaceInfo;
+
+  public SetupNamespacePermissions(NamespaceInfo ti) {
+    this.namespaceInfo = ti;
+  }
+
+  @Override
+  public Repo<Master> call(long tid, Master env) throws Exception {
+    // give all namespace permissions to the creator
+    SecurityOperation security = AuditedSecurityOperation.getInstance(env);
+    for (NamespacePermission permission : NamespacePermission.values()) {
+      try {
+        security.grantNamespacePermission(env.rpcCreds(), namespaceInfo.user, namespaceInfo.namespaceId, permission);
+      } catch (ThriftSecurityException e) {
+        LoggerFactory.getLogger(FinishCreateNamespace.class).error("{}", e.getMessage(), e);
+        throw e;
+      }
+    }
+
+    // setup permissions in zookeeper before table info in zookeeper
+    // this way concurrent users will not get a spurious permission denied
+    // error
+    return new PopulateZookeeperWithNamespace(namespaceInfo);
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/server/master/src/main/java/org/apache/accumulo/master/tableOps/SetupPermissions.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/SetupPermissions.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/SetupPermissions.java
new file mode 100644
index 0000000..fd3b7da
--- /dev/null
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/SetupPermissions.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.master.tableOps;
+
+import org.apache.accumulo.core.client.impl.thrift.ThriftSecurityException;
+import org.apache.accumulo.core.security.TablePermission;
+import org.apache.accumulo.fate.Repo;
+import org.apache.accumulo.master.Master;
+import org.apache.accumulo.server.security.AuditedSecurityOperation;
+import org.apache.accumulo.server.security.SecurityOperation;
+import org.slf4j.LoggerFactory;
+
+class SetupPermissions extends MasterRepo {
+
+  private static final long serialVersionUID = 1L;
+
+  private TableInfo tableInfo;
+
+  public SetupPermissions(TableInfo ti) {
+    this.tableInfo = ti;
+  }
+
+  @Override
+  public Repo<Master> call(long tid, Master env) throws Exception {
+    // give all table permissions to the creator
+    SecurityOperation security = AuditedSecurityOperation.getInstance(env);
+    if (!tableInfo.user.equals(env.getCredentials().getPrincipal())) {
+      for (TablePermission permission : TablePermission.values()) {
+        try {
+          security.grantTablePermission(env.rpcCreds(), tableInfo.user, tableInfo.tableId, permission, tableInfo.namespaceId);
+        } catch (ThriftSecurityException e) {
+          LoggerFactory.getLogger(FinishCreateTable.class).error("{}", e.getMessage(), e);
+          throw e;
+        }
+      }
+    }
+
+    // setup permissions in zookeeper before table info in zookeeper
+    // this way concurrent users will not get a spurious permission denied
+    // error
+    return new PopulateZookeeper(tableInfo);
+  }
+
+  @Override
+  public void undo(long tid, Master env) throws Exception {
+    AuditedSecurityOperation.getInstance(env).deleteTable(env.rpcCreds(), tableInfo.tableId, tableInfo.namespaceId);
+  }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/server/master/src/main/java/org/apache/accumulo/master/tableOps/TableInfo.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/TableInfo.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/TableInfo.java
new file mode 100644
index 0000000..e2057d1
--- /dev/null
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/TableInfo.java
@@ -0,0 +1,35 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.master.tableOps;
+
+import java.io.Serializable;
+import java.util.Map;
+
+class TableInfo implements Serializable {
+
+  private static final long serialVersionUID = 1L;
+
+  String tableName;
+  String tableId;
+  String namespaceId;
+  char timeType;
+  String user;
+
+  public Map<String,String> props;
+
+  public String dir = null;
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/server/master/src/main/java/org/apache/accumulo/master/tableOps/TableRangeOp.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/TableRangeOp.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/TableRangeOp.java
index a9a923b..1d8b116 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/TableRangeOp.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/TableRangeOp.java
@@ -30,51 +30,6 @@ import org.apache.accumulo.server.master.state.MergeInfo.Operation;
 import org.apache.accumulo.server.master.state.MergeState;
 import org.apache.hadoop.io.Text;
 
-/**
- * Merge makes things hard.
- *
- * Typically, a client will read the list of tablets, and begin an operation on that tablet at the location listed in the metadata table. When a tablet splits,
- * the information read from the metadata table doesn't match reality, so the operation fails, and must be retried. But the operation will take place either on
- * the parent, or at a later time on the children. It won't take place on just half of the tablet.
- *
- * However, when a merge occurs, the operation may have succeeded on one section of the merged area, and not on the others, when the merge occurs. There is no
- * way to retry the request at a later time on an unmodified tablet.
- *
- * The code below uses read-write lock to prevent some operations while a merge is taking place. Normal operations, like bulk imports, will grab the read lock
- * and prevent merges (writes) while they run. Merge operations will lock out some operations while they run.
- */
-class TableRangeOpWait extends MasterRepo {
-
-  private static final long serialVersionUID = 1L;
-  private String tableId;
-
-  public TableRangeOpWait(String tableId) {
-    this.tableId = tableId;
-  }
-
-  @Override
-  public long isReady(long tid, Master env) throws Exception {
-    Text tableIdText = new Text(tableId);
-    if (!env.getMergeInfo(tableIdText).getState().equals(MergeState.NONE)) {
-      return 50;
-    }
-    return 0;
-  }
-
-  @Override
-  public Repo<Master> call(long tid, Master master) throws Exception {
-    String namespaceId = Tables.getNamespaceId(master.getInstance(), tableId);
-    Text tableIdText = new Text(tableId);
-    MergeInfo mergeInfo = master.getMergeInfo(tableIdText);
-    log.info("removing merge information " + mergeInfo);
-    master.clearMergeState(tableIdText);
-    Utils.unreserveNamespace(namespaceId, tid, false);
-    Utils.unreserveTable(tableId, tid, true);
-    return null;
-  }
-
-}
-
 public class TableRangeOp extends MasterRepo {
 
   private static final long serialVersionUID = 1L;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/server/master/src/main/java/org/apache/accumulo/master/tableOps/TableRangeOpWait.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/TableRangeOpWait.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/TableRangeOpWait.java
new file mode 100644
index 0000000..bdab469
--- /dev/null
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/TableRangeOpWait.java
@@ -0,0 +1,69 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.master.tableOps;
+
+import org.apache.accumulo.core.client.impl.Tables;
+import org.apache.accumulo.fate.Repo;
+import org.apache.accumulo.master.Master;
+import org.apache.accumulo.server.master.state.MergeInfo;
+import org.apache.accumulo.server.master.state.MergeState;
+import org.apache.hadoop.io.Text;
+
+/**
+ * Merge makes things hard.
+ *
+ * Typically, a client will read the list of tablets, and begin an operation on that tablet at the location listed in the metadata table. When a tablet splits,
+ * the information read from the metadata table doesn't match reality, so the operation fails, and must be retried. But the operation will take place either on
+ * the parent, or at a later time on the children. It won't take place on just half of the tablet.
+ *
+ * However, when a merge occurs, the operation may have succeeded on one section of the merged area, and not on the others, when the merge occurs. There is no
+ * way to retry the request at a later time on an unmodified tablet.
+ *
+ * The code below uses read-write lock to prevent some operations while a merge is taking place. Normal operations, like bulk imports, will grab the read lock
+ * and prevent merges (writes) while they run. Merge operations will lock out some operations while they run.
+ */
+class TableRangeOpWait extends MasterRepo {
+
+  private static final long serialVersionUID = 1L;
+  private String tableId;
+
+  public TableRangeOpWait(String tableId) {
+    this.tableId = tableId;
+  }
+
+  @Override
+  public long isReady(long tid, Master env) throws Exception {
+    Text tableIdText = new Text(tableId);
+    if (!env.getMergeInfo(tableIdText).getState().equals(MergeState.NONE)) {
+      return 50;
+    }
+    return 0;
+  }
+
+  @Override
+  public Repo<Master> call(long tid, Master master) throws Exception {
+    String namespaceId = Tables.getNamespaceId(master.getInstance(), tableId);
+    Text tableIdText = new Text(tableId);
+    MergeInfo mergeInfo = master.getMergeInfo(tableIdText);
+    log.info("removing merge information " + mergeInfo);
+    master.clearMergeState(tableIdText);
+    Utils.unreserveNamespace(namespaceId, tid, false);
+    Utils.unreserveTable(tableId, tid, true);
+    return null;
+  }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/server/master/src/main/java/org/apache/accumulo/master/tableOps/WriteExportFiles.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/WriteExportFiles.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/WriteExportFiles.java
new file mode 100644
index 0000000..ca31d48
--- /dev/null
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/WriteExportFiles.java
@@ -0,0 +1,268 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.master.tableOps;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+
+import java.io.BufferedOutputStream;
+import java.io.BufferedWriter;
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.io.OutputStreamWriter;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.zip.ZipEntry;
+import java.util.zip.ZipOutputStream;
+
+import org.apache.accumulo.core.Constants;
+import org.apache.accumulo.core.client.AccumuloException;
+import org.apache.accumulo.core.client.AccumuloSecurityException;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.client.TableNotFoundException;
+import org.apache.accumulo.core.client.impl.Tables;
+import org.apache.accumulo.core.client.impl.thrift.TableOperation;
+import org.apache.accumulo.core.client.impl.thrift.TableOperationExceptionType;
+import org.apache.accumulo.core.client.impl.thrift.ThriftTableOperationException;
+import org.apache.accumulo.core.conf.AccumuloConfiguration;
+import org.apache.accumulo.core.conf.DefaultConfiguration;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.data.impl.KeyExtent;
+import org.apache.accumulo.core.master.state.tables.TableState;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.LogColumnFamily;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.fate.Repo;
+import org.apache.accumulo.master.Master;
+import org.apache.accumulo.server.AccumuloServerContext;
+import org.apache.accumulo.server.ServerConstants;
+import org.apache.accumulo.server.conf.TableConfiguration;
+import org.apache.accumulo.server.fs.VolumeManager;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.Text;
+
+class WriteExportFiles extends MasterRepo {
+
+  private static final long serialVersionUID = 1L;
+  private final ExportInfo tableInfo;
+
+  WriteExportFiles(ExportInfo tableInfo) {
+    this.tableInfo = tableInfo;
+  }
+
+  private void checkOffline(Connector conn) throws Exception {
+    if (Tables.getTableState(conn.getInstance(), tableInfo.tableID) != TableState.OFFLINE) {
+      Tables.clearCache(conn.getInstance());
+      if (Tables.getTableState(conn.getInstance(), tableInfo.tableID) != TableState.OFFLINE) {
+        throw new ThriftTableOperationException(tableInfo.tableID, tableInfo.tableName, TableOperation.EXPORT, TableOperationExceptionType.OTHER,
+            "Table is not offline");
+      }
+    }
+  }
+
+  @Override
+  public long isReady(long tid, Master master) throws Exception {
+
+    long reserved = Utils.reserveNamespace(tableInfo.namespaceID, tid, false, true, TableOperation.EXPORT)
+        + Utils.reserveTable(tableInfo.tableID, tid, false, true, TableOperation.EXPORT);
+    if (reserved > 0)
+      return reserved;
+
+    Connector conn = master.getConnector();
+
+    checkOffline(conn);
+
+    Scanner metaScanner = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
+    metaScanner.setRange(new KeyExtent(new Text(tableInfo.tableID), null, null).toMetadataRange());
+
+    // scan for locations
+    metaScanner.fetchColumnFamily(TabletsSection.CurrentLocationColumnFamily.NAME);
+    metaScanner.fetchColumnFamily(TabletsSection.FutureLocationColumnFamily.NAME);
+
+    if (metaScanner.iterator().hasNext()) {
+      return 500;
+    }
+
+    // use the same range to check for walogs that we used to check for hosted (or future hosted) tablets
+    // this is done as a separate scan after we check for locations, because walogs are okay only if there is no location
+    metaScanner.clearColumns();
+    metaScanner.fetchColumnFamily(LogColumnFamily.NAME);
+
+    if (metaScanner.iterator().hasNext()) {
+      throw new ThriftTableOperationException(tableInfo.tableID, tableInfo.tableName, TableOperation.EXPORT, TableOperationExceptionType.OTHER,
+          "Write ahead logs found for table");
+    }
+
+    return 0;
+  }
+
+  @Override
+  public Repo<Master> call(long tid, Master master) throws Exception {
+    try {
+      exportTable(master.getFileSystem(), master, tableInfo.tableName, tableInfo.tableID, tableInfo.exportDir);
+    } catch (IOException ioe) {
+      throw new ThriftTableOperationException(tableInfo.tableID, tableInfo.tableName, TableOperation.EXPORT, TableOperationExceptionType.OTHER,
+          "Failed to create export files " + ioe.getMessage());
+    }
+    Utils.unreserveNamespace(tableInfo.namespaceID, tid, false);
+    Utils.unreserveTable(tableInfo.tableID, tid, false);
+    Utils.unreserveHdfsDirectory(new Path(tableInfo.exportDir).toString(), tid);
+    return null;
+  }
+
+  @Override
+  public void undo(long tid, Master env) throws Exception {
+    Utils.unreserveNamespace(tableInfo.namespaceID, tid, false);
+    Utils.unreserveTable(tableInfo.tableID, tid, false);
+  }
+
+  public static void exportTable(VolumeManager fs, AccumuloServerContext context, String tableName, String tableID, String exportDir) throws Exception {
+
+    fs.mkdirs(new Path(exportDir));
+    Path exportMetaFilePath = fs.getVolumeByPath(new Path(exportDir)).getFileSystem().makeQualified(new Path(exportDir, Constants.EXPORT_FILE));
+
+    FSDataOutputStream fileOut = fs.create(exportMetaFilePath, false);
+    ZipOutputStream zipOut = new ZipOutputStream(fileOut);
+    BufferedOutputStream bufOut = new BufferedOutputStream(zipOut);
+    DataOutputStream dataOut = new DataOutputStream(bufOut);
+
+    try {
+
+      zipOut.putNextEntry(new ZipEntry(Constants.EXPORT_INFO_FILE));
+      OutputStreamWriter osw = new OutputStreamWriter(dataOut, UTF_8);
+      osw.append(ExportTable.EXPORT_VERSION_PROP + ":" + ExportTable.VERSION + "\n");
+      osw.append("srcInstanceName:" + context.getInstance().getInstanceName() + "\n");
+      osw.append("srcInstanceID:" + context.getInstance().getInstanceID() + "\n");
+      osw.append("srcZookeepers:" + context.getInstance().getZooKeepers() + "\n");
+      osw.append("srcTableName:" + tableName + "\n");
+      osw.append("srcTableID:" + tableID + "\n");
+      osw.append(ExportTable.DATA_VERSION_PROP + ":" + ServerConstants.DATA_VERSION + "\n");
+      osw.append("srcCodeVersion:" + Constants.VERSION + "\n");
+
+      osw.flush();
+      dataOut.flush();
+
+      exportConfig(context, tableID, zipOut, dataOut);
+      dataOut.flush();
+
+      Map<String,String> uniqueFiles = exportMetadata(fs, context, tableID, zipOut, dataOut);
+
+      dataOut.close();
+      dataOut = null;
+
+      createDistcpFile(fs, exportDir, exportMetaFilePath, uniqueFiles);
+
+    } finally {
+      if (dataOut != null)
+        dataOut.close();
+    }
+  }
+
+  private static void createDistcpFile(VolumeManager fs, String exportDir, Path exportMetaFilePath, Map<String,String> uniqueFiles) throws IOException {
+    BufferedWriter distcpOut = new BufferedWriter(new OutputStreamWriter(fs.create(new Path(exportDir, "distcp.txt"), false), UTF_8));
+
+    try {
+      for (String file : uniqueFiles.values()) {
+        distcpOut.append(file);
+        distcpOut.newLine();
+      }
+
+      distcpOut.append(exportMetaFilePath.toString());
+      distcpOut.newLine();
+
+      distcpOut.close();
+      distcpOut = null;
+
+    } finally {
+      if (distcpOut != null)
+        distcpOut.close();
+    }
+  }
+
+  private static Map<String,String> exportMetadata(VolumeManager fs, AccumuloServerContext context, String tableID, ZipOutputStream zipOut,
+      DataOutputStream dataOut) throws IOException, TableNotFoundException, AccumuloException, AccumuloSecurityException {
+    zipOut.putNextEntry(new ZipEntry(Constants.EXPORT_METADATA_FILE));
+
+    Map<String,String> uniqueFiles = new HashMap<String,String>();
+
+    Scanner metaScanner = context.getConnector().createScanner(MetadataTable.NAME, Authorizations.EMPTY);
+    metaScanner.fetchColumnFamily(DataFileColumnFamily.NAME);
+    TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.fetch(metaScanner);
+    TabletsSection.ServerColumnFamily.TIME_COLUMN.fetch(metaScanner);
+    metaScanner.setRange(new KeyExtent(new Text(tableID), null, null).toMetadataRange());
+
+    for (Entry<Key,Value> entry : metaScanner) {
+      entry.getKey().write(dataOut);
+      entry.getValue().write(dataOut);
+
+      if (entry.getKey().getColumnFamily().equals(DataFileColumnFamily.NAME)) {
+        String path = fs.getFullPath(entry.getKey()).toString();
+        String tokens[] = path.split("/");
+        if (tokens.length < 1) {
+          throw new RuntimeException("Illegal path " + path);
+        }
+
+        String filename = tokens[tokens.length - 1];
+
+        String existingPath = uniqueFiles.get(filename);
+        if (existingPath == null) {
+          uniqueFiles.put(filename, path);
+        } else if (!existingPath.equals(path)) {
+          // make sure file names are unique, should only apply for tables with file names generated by Accumulo 1.3 and earlier
+          throw new IOException("Cannot export table with nonunique file names " + filename + ". Major compact table.");
+        }
+
+      }
+    }
+    return uniqueFiles;
+  }
+
+  private static void exportConfig(AccumuloServerContext context, String tableID, ZipOutputStream zipOut, DataOutputStream dataOut) throws AccumuloException,
+      AccumuloSecurityException, TableNotFoundException, IOException {
+    Connector conn = context.getConnector();
+
+    DefaultConfiguration defaultConfig = AccumuloConfiguration.getDefaultConfiguration();
+    Map<String,String> siteConfig = conn.instanceOperations().getSiteConfiguration();
+    Map<String,String> systemConfig = conn.instanceOperations().getSystemConfiguration();
+
+    TableConfiguration tableConfig = context.getServerConfigurationFactory().getTableConfiguration(tableID);
+
+    OutputStreamWriter osw = new OutputStreamWriter(dataOut, UTF_8);
+
+    // only put props that are different than defaults and higher level configurations
+    zipOut.putNextEntry(new ZipEntry(Constants.EXPORT_TABLE_CONFIG_FILE));
+    for (Entry<String,String> prop : tableConfig) {
+      if (prop.getKey().startsWith(Property.TABLE_PREFIX.getKey())) {
+        Property key = Property.getPropertyByKey(prop.getKey());
+
+        if (key == null || !defaultConfig.get(key).equals(prop.getValue())) {
+          if (!prop.getValue().equals(siteConfig.get(prop.getKey())) && !prop.getValue().equals(systemConfig.get(prop.getKey()))) {
+            osw.append(prop.getKey() + "=" + prop.getValue() + "\n");
+          }
+        }
+      }
+    }
+
+    osw.flush();
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/server/tserver/src/main/java/org/apache/accumulo/tserver/InMemoryMap.java
----------------------------------------------------------------------
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/InMemoryMap.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/InMemoryMap.java
index 80feb47..2d3a0a1 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/InMemoryMap.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/InMemoryMap.java
@@ -17,11 +17,8 @@
 package org.apache.accumulo.tserver;
 
 import java.io.IOException;
-import java.io.Serializable;
 import java.util.ArrayList;
-import java.util.Collection;
 import java.util.Collections;
-import java.util.Comparator;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Iterator;
@@ -50,7 +47,6 @@ import org.apache.accumulo.core.file.FileSKVWriter;
 import org.apache.accumulo.core.file.rfile.RFile;
 import org.apache.accumulo.core.file.rfile.RFileOperations;
 import org.apache.accumulo.core.iterators.IteratorEnvironment;
-import org.apache.accumulo.core.iterators.SkippingIterator;
 import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
 import org.apache.accumulo.core.iterators.SortedMapIterator;
 import org.apache.accumulo.core.iterators.WrappingIterator;
@@ -72,121 +68,6 @@ import org.apache.hadoop.fs.Path;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-class MemKeyComparator implements Comparator<Key>, Serializable {
-
-  private static final long serialVersionUID = 1L;
-
-  @Override
-  public int compare(Key k1, Key k2) {
-    int cmp = k1.compareTo(k2);
-
-    if (cmp == 0) {
-      if (k1 instanceof MemKey)
-        if (k2 instanceof MemKey)
-          cmp = ((MemKey) k2).kvCount - ((MemKey) k1).kvCount;
-        else
-          cmp = 1;
-      else if (k2 instanceof MemKey)
-        cmp = -1;
-    }
-
-    return cmp;
-  }
-}
-
-class PartialMutationSkippingIterator extends SkippingIterator implements InterruptibleIterator {
-
-  private int kvCount;
-
-  public PartialMutationSkippingIterator(SortedKeyValueIterator<Key,Value> source, int maxKVCount) {
-    setSource(source);
-    this.kvCount = maxKVCount;
-  }
-
-  @Override
-  protected void consume() throws IOException {
-    while (getSource().hasTop() && ((MemKey) getSource().getTopKey()).kvCount > kvCount)
-      getSource().next();
-  }
-
-  @Override
-  public SortedKeyValueIterator<Key,Value> deepCopy(IteratorEnvironment env) {
-    return new PartialMutationSkippingIterator(getSource().deepCopy(env), kvCount);
-  }
-
-  @Override
-  public void setInterruptFlag(AtomicBoolean flag) {
-    ((InterruptibleIterator) getSource()).setInterruptFlag(flag);
-  }
-
-}
-
-class MemKeyConversionIterator extends WrappingIterator implements InterruptibleIterator {
-  private MemKey currKey = null;
-  private Value currVal = null;
-
-  public MemKeyConversionIterator(SortedKeyValueIterator<Key,Value> source) {
-    super();
-    setSource(source);
-  }
-
-  @Override
-  public SortedKeyValueIterator<Key,Value> deepCopy(IteratorEnvironment env) {
-    return new MemKeyConversionIterator(getSource().deepCopy(env));
-  }
-
-  @Override
-  public Key getTopKey() {
-    return currKey;
-  }
-
-  @Override
-  public Value getTopValue() {
-    return currVal;
-  }
-
-  private void getTopKeyVal() {
-    Key k = super.getTopKey();
-    Value v = super.getTopValue();
-    if (k instanceof MemKey || k == null) {
-      currKey = (MemKey) k;
-      currVal = v;
-      return;
-    }
-    currVal = new Value(v);
-    int mc = MemValue.splitKVCount(currVal);
-    currKey = new MemKey(k, mc);
-
-  }
-
-  @Override
-  public void next() throws IOException {
-    super.next();
-    if (hasTop())
-      getTopKeyVal();
-  }
-
-  @Override
-  public void seek(Range range, Collection<ByteSequence> columnFamilies, boolean inclusive) throws IOException {
-    super.seek(range, columnFamilies, inclusive);
-
-    if (hasTop())
-      getTopKeyVal();
-
-    Key k = range.getStartKey();
-    if (k instanceof MemKey && hasTop()) {
-      while (hasTop() && currKey.compareTo(k) < 0)
-        next();
-    }
-  }
-
-  @Override
-  public void setInterruptFlag(AtomicBoolean flag) {
-    ((InterruptibleIterator) getSource()).setInterruptFlag(flag);
-  }
-
-}
-
 public class InMemoryMap {
   private SimpleMap map = null;
 

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/server/tserver/src/main/java/org/apache/accumulo/tserver/MemKeyComparator.java
----------------------------------------------------------------------
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/MemKeyComparator.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/MemKeyComparator.java
new file mode 100644
index 0000000..6c8b0f3
--- /dev/null
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/MemKeyComparator.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.tserver;
+
+import java.io.Serializable;
+import java.util.Comparator;
+
+import org.apache.accumulo.core.data.Key;
+
+class MemKeyComparator implements Comparator<Key>, Serializable {
+
+  private static final long serialVersionUID = 1L;
+
+  @Override
+  public int compare(Key k1, Key k2) {
+    int cmp = k1.compareTo(k2);
+
+    if (cmp == 0) {
+      if (k1 instanceof MemKey)
+        if (k2 instanceof MemKey)
+          cmp = ((MemKey) k2).kvCount - ((MemKey) k1).kvCount;
+        else
+          cmp = 1;
+      else if (k2 instanceof MemKey)
+        cmp = -1;
+    }
+
+    return cmp;
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/server/tserver/src/main/java/org/apache/accumulo/tserver/MemKeyConversionIterator.java
----------------------------------------------------------------------
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/MemKeyConversionIterator.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/MemKeyConversionIterator.java
new file mode 100644
index 0000000..891a0ba
--- /dev/null
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/MemKeyConversionIterator.java
@@ -0,0 +1,96 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.tserver;
+
+import java.io.IOException;
+import java.util.Collection;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import org.apache.accumulo.core.data.ByteSequence;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Range;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.iterators.IteratorEnvironment;
+import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
+import org.apache.accumulo.core.iterators.WrappingIterator;
+import org.apache.accumulo.core.iterators.system.InterruptibleIterator;
+
+class MemKeyConversionIterator extends WrappingIterator implements InterruptibleIterator {
+  private MemKey currKey = null;
+  private Value currVal = null;
+
+  public MemKeyConversionIterator(SortedKeyValueIterator<Key,Value> source) {
+    super();
+    setSource(source);
+  }
+
+  @Override
+  public SortedKeyValueIterator<Key,Value> deepCopy(IteratorEnvironment env) {
+    return new MemKeyConversionIterator(getSource().deepCopy(env));
+  }
+
+  @Override
+  public Key getTopKey() {
+    return currKey;
+  }
+
+  @Override
+  public Value getTopValue() {
+    return currVal;
+  }
+
+  private void getTopKeyVal() {
+    Key k = super.getTopKey();
+    Value v = super.getTopValue();
+    if (k instanceof MemKey || k == null) {
+      currKey = (MemKey) k;
+      currVal = v;
+      return;
+    }
+    currVal = new Value(v);
+    int mc = MemValue.splitKVCount(currVal);
+    currKey = new MemKey(k, mc);
+
+  }
+
+  @Override
+  public void next() throws IOException {
+    super.next();
+    if (hasTop())
+      getTopKeyVal();
+  }
+
+  @Override
+  public void seek(Range range, Collection<ByteSequence> columnFamilies, boolean inclusive) throws IOException {
+    super.seek(range, columnFamilies, inclusive);
+
+    if (hasTop())
+      getTopKeyVal();
+
+    Key k = range.getStartKey();
+    if (k instanceof MemKey && hasTop()) {
+      while (hasTop() && currKey.compareTo(k) < 0)
+        next();
+    }
+  }
+
+  @Override
+  public void setInterruptFlag(AtomicBoolean flag) {
+    ((InterruptibleIterator) getSource()).setInterruptFlag(flag);
+  }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/server/tserver/src/main/java/org/apache/accumulo/tserver/PartialMutationSkippingIterator.java
----------------------------------------------------------------------
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/PartialMutationSkippingIterator.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/PartialMutationSkippingIterator.java
new file mode 100644
index 0000000..8e2f113
--- /dev/null
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/PartialMutationSkippingIterator.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.tserver;
+
+import java.io.IOException;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.iterators.IteratorEnvironment;
+import org.apache.accumulo.core.iterators.SkippingIterator;
+import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
+import org.apache.accumulo.core.iterators.system.InterruptibleIterator;
+
+class PartialMutationSkippingIterator extends SkippingIterator implements InterruptibleIterator {
+
+  private int kvCount;
+
+  public PartialMutationSkippingIterator(SortedKeyValueIterator<Key,Value> source, int maxKVCount) {
+    setSource(source);
+    this.kvCount = maxKVCount;
+  }
+
+  @Override
+  protected void consume() throws IOException {
+    while (getSource().hasTop() && ((MemKey) getSource().getTopKey()).kvCount > kvCount)
+      getSource().next();
+  }
+
+  @Override
+  public SortedKeyValueIterator<Key,Value> deepCopy(IteratorEnvironment env) {
+    return new PartialMutationSkippingIterator(getSource().deepCopy(env), kvCount);
+  }
+
+  @Override
+  public void setInterruptFlag(AtomicBoolean flag) {
+    ((InterruptibleIterator) getSource()).setInterruptFlag(flag);
+  }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/test/src/main/java/org/apache/accumulo/test/EstimateInMemMapOverhead.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/EstimateInMemMapOverhead.java b/test/src/main/java/org/apache/accumulo/test/EstimateInMemMapOverhead.java
index 668b9cc..fb3c8a0 100644
--- a/test/src/main/java/org/apache/accumulo/test/EstimateInMemMapOverhead.java
+++ b/test/src/main/java/org/apache/accumulo/test/EstimateInMemMapOverhead.java
@@ -16,323 +16,6 @@
  */
 package org.apache.accumulo.test;
 
-import java.util.Collections;
-import java.util.TreeMap;
-
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.security.ColumnVisibility;
-import org.apache.accumulo.tserver.InMemoryMap;
-import org.apache.hadoop.io.Text;
-
-abstract class MemoryUsageTest {
-  abstract void addEntry(int i);
-
-  abstract int getEstimatedBytesPerEntry();
-
-  abstract void clear();
-
-  abstract int getNumPasses();
-
-  abstract String getName();
-
-  abstract void init();
-
-  public void run() {
-    System.gc();
-    long usedMem = Runtime.getRuntime().totalMemory() - Runtime.getRuntime().freeMemory();
-    int count = 0;
-    while (usedMem > 1024 * 1024 && count < 10) {
-      System.gc();
-      usedMem = Runtime.getRuntime().totalMemory() - Runtime.getRuntime().freeMemory();
-      count++;
-    }
-
-    init();
-
-    for (int i = 0; i < getNumPasses(); i++) {
-      addEntry(i);
-    }
-
-    System.gc();
-
-    long memSize = (Runtime.getRuntime().totalMemory() - Runtime.getRuntime().freeMemory()) - usedMem;
-
-    double actualBytesPerEntry = memSize / (double) getNumPasses();
-    double expectedBytesPerEntry = getEstimatedBytesPerEntry();
-    double diff = actualBytesPerEntry - expectedBytesPerEntry;
-    double ratio = actualBytesPerEntry / expectedBytesPerEntry * 100;
-
-    System.out.printf("%30s | %,10d | %6.2fGB | %6.2f | %6.2f | %6.2f | %6.2f%s%n", getName(), getNumPasses(), memSize / (1024 * 1024 * 1024.0),
-        actualBytesPerEntry, expectedBytesPerEntry, diff, ratio, "%");
-
-    clear();
-
-  }
-
-}
-
-class TextMemoryUsageTest extends MemoryUsageTest {
-
-  private int keyLen;
-  private int colFamLen;
-  private int colQualLen;
-  private int dataLen;
-  private TreeMap<Text,Value> map;
-  private int passes;
-
-  TextMemoryUsageTest(int passes, int keyLen, int colFamLen, int colQualLen, int dataLen) {
-    this.keyLen = keyLen;
-    this.colFamLen = colFamLen;
-    this.colQualLen = colQualLen;
-    this.dataLen = dataLen;
-    this.passes = passes;
-
-  }
-
-  @Override
-  void init() {
-    map = new TreeMap<Text,Value>();
-  }
-
-  @Override
-  public void addEntry(int i) {
-    Text key = new Text(String.format("%0" + keyLen + "d:%0" + colFamLen + "d:%0" + colQualLen + "d", i, 0, 0).getBytes());
-    //
-    byte data[] = new byte[dataLen];
-    for (int j = 0; j < data.length; j++) {
-      data[j] = (byte) (j % 10 + 65);
-    }
-    Value value = new Value(data);
-
-    map.put(key, value);
-
-  }
-
-  @Override
-  public void clear() {
-    map.clear();
-    map = null;
-  }
-
-  @Override
-  public int getEstimatedBytesPerEntry() {
-    return keyLen + colFamLen + colQualLen + dataLen;
-  }
-
-  @Override
-  int getNumPasses() {
-    return passes;
-  }
-
-  @Override
-  String getName() {
-    return "Text " + keyLen + " " + colFamLen + " " + colQualLen + " " + dataLen;
-  }
-
-}
-
-class InMemoryMapMemoryUsageTest extends MemoryUsageTest {
-
-  private int keyLen;
-  private int colFamLen;
-  private int colQualLen;
-  private int colVisLen;
-  private int dataLen;
-
-  private InMemoryMap imm;
-  private Text key;
-  private Text colf;
-  private Text colq;
-  private ColumnVisibility colv;
-  private int passes;
-
-  InMemoryMapMemoryUsageTest(int passes, int keyLen, int colFamLen, int colQualLen, int colVisLen, int dataLen) {
-    this.keyLen = keyLen;
-    this.colFamLen = colFamLen;
-    this.colQualLen = colQualLen;
-    this.dataLen = dataLen;
-    this.passes = passes;
-    this.colVisLen = colVisLen;
-
-  }
-
-  @Override
-  void init() {
-    imm = new InMemoryMap(false, "/tmp");
-    key = new Text();
-
-    colf = new Text(String.format("%0" + colFamLen + "d", 0));
-    colq = new Text(String.format("%0" + colQualLen + "d", 0));
-    colv = new ColumnVisibility(String.format("%0" + colVisLen + "d", 0));
-  }
-
-  @Override
-  public void addEntry(int i) {
-    key.set(String.format("%0" + keyLen + "d", i));
-
-    Mutation m = new Mutation(key);
-
-    byte data[] = new byte[dataLen];
-    for (int j = 0; j < data.length; j++) {
-      data[j] = (byte) (j % 10 + 65);
-    }
-    Value idata = new Value(data);
-
-    m.put(colf, colq, colv, idata);
-
-    imm.mutate(Collections.singletonList(m));
-
-  }
-
-  @Override
-  public int getEstimatedBytesPerEntry() {
-    return keyLen + colFamLen + colQualLen + dataLen + 4 + colVisLen;
-  }
-
-  @Override
-  public void clear() {
-    imm = null;
-    key = null;
-    colf = null;
-    colq = null;
-  }
-
-  @Override
-  int getNumPasses() {
-    return passes;
-  }
-
-  @Override
-  String getName() {
-    return "IMM " + keyLen + " " + colFamLen + " " + colQualLen + " " + dataLen;
-  }
-}
-
-class MutationMemoryUsageTest extends MemoryUsageTest {
-
-  private int keyLen;
-  private int colFamLen;
-  private int colQualLen;
-  private int dataLen;
-
-  private Mutation[] mutations;
-  private Text key;
-  private Text colf;
-  private Text colq;
-  private int passes;
-
-  MutationMemoryUsageTest(int passes, int keyLen, int colFamLen, int colQualLen, int dataLen) {
-    this.keyLen = keyLen;
-    this.colFamLen = colFamLen;
-    this.colQualLen = colQualLen;
-    this.dataLen = dataLen;
-    this.passes = passes;
-    mutations = new Mutation[passes];
-
-  }
-
-  @Override
-  void init() {
-    key = new Text();
-
-    colf = new Text(String.format("%0" + colFamLen + "d", 0));
-    colq = new Text(String.format("%0" + colQualLen + "d", 0));
-
-    byte data[] = new byte[dataLen];
-    for (int i = 0; i < data.length; i++) {
-      data[i] = (byte) (i % 10 + 65);
-    }
-  }
-
-  @Override
-  public void addEntry(int i) {
-    key.set(String.format("%0" + keyLen + "d", i));
-
-    Mutation m = new Mutation(key);
-
-    byte data[] = new byte[dataLen];
-    for (int j = 0; j < data.length; j++) {
-      data[j] = (byte) (j % 10 + 65);
-    }
-    Value idata = new Value(data);
-
-    m.put(colf, colq, idata);
-
-    mutations[i] = m;
-  }
-
-  @Override
-  public int getEstimatedBytesPerEntry() {
-    return keyLen + colFamLen + colQualLen + dataLen;
-  }
-
-  @Override
-  public void clear() {
-    key = null;
-    colf = null;
-    colq = null;
-    mutations = null;
-  }
-
-  @Override
-  int getNumPasses() {
-    return passes;
-  }
-
-  @Override
-  String getName() {
-    return "Mutation " + keyLen + " " + colFamLen + " " + colQualLen + " " + dataLen;
-  }
-}
-
-class IntObjectMemoryUsageTest extends MemoryUsageTest {
-
-  private int passes;
-  private Object data[];
-
-  static class SimpleObject {
-    int d;
-
-    SimpleObject(int d) {
-      this.d = d;
-    }
-  }
-
-  IntObjectMemoryUsageTest(int numPasses) {
-    this.passes = numPasses;
-  }
-
-  @Override
-  void init() {
-    data = new Object[passes];
-  }
-
-  @Override
-  void addEntry(int i) {
-    data[i] = new SimpleObject(i);
-
-  }
-
-  @Override
-  void clear() {}
-
-  @Override
-  int getEstimatedBytesPerEntry() {
-    return 4;
-  }
-
-  @Override
-  String getName() {
-    return "int obj";
-  }
-
-  @Override
-  int getNumPasses() {
-    return passes;
-  }
-
-}
 
 public class EstimateInMemMapOverhead {
 

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/test/src/main/java/org/apache/accumulo/test/InMemoryMapMemoryUsageTest.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/InMemoryMapMemoryUsageTest.java b/test/src/main/java/org/apache/accumulo/test/InMemoryMapMemoryUsageTest.java
new file mode 100644
index 0000000..f325524
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/InMemoryMapMemoryUsageTest.java
@@ -0,0 +1,102 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test;
+
+import java.util.Collections;
+
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.security.ColumnVisibility;
+import org.apache.accumulo.tserver.InMemoryMap;
+import org.apache.hadoop.io.Text;
+
+class InMemoryMapMemoryUsageTest extends MemoryUsageTest {
+
+  private int keyLen;
+  private int colFamLen;
+  private int colQualLen;
+  private int colVisLen;
+  private int dataLen;
+
+  private InMemoryMap imm;
+  private Text key;
+  private Text colf;
+  private Text colq;
+  private ColumnVisibility colv;
+  private int passes;
+
+  InMemoryMapMemoryUsageTest(int passes, int keyLen, int colFamLen, int colQualLen, int colVisLen, int dataLen) {
+    this.keyLen = keyLen;
+    this.colFamLen = colFamLen;
+    this.colQualLen = colQualLen;
+    this.dataLen = dataLen;
+    this.passes = passes;
+    this.colVisLen = colVisLen;
+
+  }
+
+  @Override
+  void init() {
+    imm = new InMemoryMap(false, "/tmp");
+    key = new Text();
+
+    colf = new Text(String.format("%0" + colFamLen + "d", 0));
+    colq = new Text(String.format("%0" + colQualLen + "d", 0));
+    colv = new ColumnVisibility(String.format("%0" + colVisLen + "d", 0));
+  }
+
+  @Override
+  public void addEntry(int i) {
+    key.set(String.format("%0" + keyLen + "d", i));
+
+    Mutation m = new Mutation(key);
+
+    byte data[] = new byte[dataLen];
+    for (int j = 0; j < data.length; j++) {
+      data[j] = (byte) (j % 10 + 65);
+    }
+    Value idata = new Value(data);
+
+    m.put(colf, colq, colv, idata);
+
+    imm.mutate(Collections.singletonList(m));
+
+  }
+
+  @Override
+  public int getEstimatedBytesPerEntry() {
+    return keyLen + colFamLen + colQualLen + dataLen + 4 + colVisLen;
+  }
+
+  @Override
+  public void clear() {
+    imm = null;
+    key = null;
+    colf = null;
+    colq = null;
+  }
+
+  @Override
+  int getNumPasses() {
+    return passes;
+  }
+
+  @Override
+  String getName() {
+    return "IMM " + keyLen + " " + colFamLen + " " + colQualLen + " " + dataLen;
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/test/src/main/java/org/apache/accumulo/test/IntObjectMemoryUsageTest.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/IntObjectMemoryUsageTest.java b/test/src/main/java/org/apache/accumulo/test/IntObjectMemoryUsageTest.java
new file mode 100644
index 0000000..d83421a
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/IntObjectMemoryUsageTest.java
@@ -0,0 +1,65 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test;
+
+class IntObjectMemoryUsageTest extends MemoryUsageTest {
+
+  private int passes;
+  private Object data[];
+
+  static class SimpleObject {
+    int d;
+
+    SimpleObject(int d) {
+      this.d = d;
+    }
+  }
+
+  IntObjectMemoryUsageTest(int numPasses) {
+    this.passes = numPasses;
+  }
+
+  @Override
+  void init() {
+    data = new Object[passes];
+  }
+
+  @Override
+  void addEntry(int i) {
+    data[i] = new SimpleObject(i);
+
+  }
+
+  @Override
+  void clear() {}
+
+  @Override
+  int getEstimatedBytesPerEntry() {
+    return 4;
+  }
+
+  @Override
+  String getName() {
+    return "int obj";
+  }
+
+  @Override
+  int getNumPasses() {
+    return passes;
+  }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/test/src/main/java/org/apache/accumulo/test/MemoryUsageTest.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/MemoryUsageTest.java b/test/src/main/java/org/apache/accumulo/test/MemoryUsageTest.java
new file mode 100644
index 0000000..39e8d68
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/MemoryUsageTest.java
@@ -0,0 +1,64 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test;
+
+abstract class MemoryUsageTest {
+  abstract void addEntry(int i);
+
+  abstract int getEstimatedBytesPerEntry();
+
+  abstract void clear();
+
+  abstract int getNumPasses();
+
+  abstract String getName();
+
+  abstract void init();
+
+  public void run() {
+    System.gc();
+    long usedMem = Runtime.getRuntime().totalMemory() - Runtime.getRuntime().freeMemory();
+    int count = 0;
+    while (usedMem > 1024 * 1024 && count < 10) {
+      System.gc();
+      usedMem = Runtime.getRuntime().totalMemory() - Runtime.getRuntime().freeMemory();
+      count++;
+    }
+
+    init();
+
+    for (int i = 0; i < getNumPasses(); i++) {
+      addEntry(i);
+    }
+
+    System.gc();
+
+    long memSize = (Runtime.getRuntime().totalMemory() - Runtime.getRuntime().freeMemory()) - usedMem;
+
+    double actualBytesPerEntry = memSize / (double) getNumPasses();
+    double expectedBytesPerEntry = getEstimatedBytesPerEntry();
+    double diff = actualBytesPerEntry - expectedBytesPerEntry;
+    double ratio = actualBytesPerEntry / expectedBytesPerEntry * 100;
+
+    System.out.printf("%30s | %,10d | %6.2fGB | %6.2f | %6.2f | %6.2f | %6.2f%s%n", getName(), getNumPasses(), memSize / (1024 * 1024 * 1024.0),
+        actualBytesPerEntry, expectedBytesPerEntry, diff, ratio, "%");
+
+    clear();
+
+  }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/test/src/main/java/org/apache/accumulo/test/MutationMemoryUsageTest.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/MutationMemoryUsageTest.java b/test/src/main/java/org/apache/accumulo/test/MutationMemoryUsageTest.java
new file mode 100644
index 0000000..011fbfe
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/MutationMemoryUsageTest.java
@@ -0,0 +1,98 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test;
+
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Value;
+import org.apache.hadoop.io.Text;
+
+class MutationMemoryUsageTest extends MemoryUsageTest {
+
+  private int keyLen;
+  private int colFamLen;
+  private int colQualLen;
+  private int dataLen;
+
+  private Mutation[] mutations;
+  private Text key;
+  private Text colf;
+  private Text colq;
+  private int passes;
+
+  MutationMemoryUsageTest(int passes, int keyLen, int colFamLen, int colQualLen, int dataLen) {
+    this.keyLen = keyLen;
+    this.colFamLen = colFamLen;
+    this.colQualLen = colQualLen;
+    this.dataLen = dataLen;
+    this.passes = passes;
+    mutations = new Mutation[passes];
+
+  }
+
+  @Override
+  void init() {
+    key = new Text();
+
+    colf = new Text(String.format("%0" + colFamLen + "d", 0));
+    colq = new Text(String.format("%0" + colQualLen + "d", 0));
+
+    byte data[] = new byte[dataLen];
+    for (int i = 0; i < data.length; i++) {
+      data[i] = (byte) (i % 10 + 65);
+    }
+  }
+
+  @Override
+  public void addEntry(int i) {
+    key.set(String.format("%0" + keyLen + "d", i));
+
+    Mutation m = new Mutation(key);
+
+    byte data[] = new byte[dataLen];
+    for (int j = 0; j < data.length; j++) {
+      data[j] = (byte) (j % 10 + 65);
+    }
+    Value idata = new Value(data);
+
+    m.put(colf, colq, idata);
+
+    mutations[i] = m;
+  }
+
+  @Override
+  public int getEstimatedBytesPerEntry() {
+    return keyLen + colFamLen + colQualLen + dataLen;
+  }
+
+  @Override
+  public void clear() {
+    key = null;
+    colf = null;
+    colq = null;
+    mutations = null;
+  }
+
+  @Override
+  int getNumPasses() {
+    return passes;
+  }
+
+  @Override
+  String getName() {
+    return "Mutation " + keyLen + " " + colFamLen + " " + colQualLen + " " + dataLen;
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/test/src/main/java/org/apache/accumulo/test/TextMemoryUsageTest.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/TextMemoryUsageTest.java b/test/src/main/java/org/apache/accumulo/test/TextMemoryUsageTest.java
new file mode 100644
index 0000000..14b8184
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/TextMemoryUsageTest.java
@@ -0,0 +1,82 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test;
+
+import java.util.TreeMap;
+
+import org.apache.accumulo.core.data.Value;
+import org.apache.hadoop.io.Text;
+
+class TextMemoryUsageTest extends MemoryUsageTest {
+
+  private int keyLen;
+  private int colFamLen;
+  private int colQualLen;
+  private int dataLen;
+  private TreeMap<Text,Value> map;
+  private int passes;
+
+  TextMemoryUsageTest(int passes, int keyLen, int colFamLen, int colQualLen, int dataLen) {
+    this.keyLen = keyLen;
+    this.colFamLen = colFamLen;
+    this.colQualLen = colQualLen;
+    this.dataLen = dataLen;
+    this.passes = passes;
+
+  }
+
+  @Override
+  void init() {
+    map = new TreeMap<Text,Value>();
+  }
+
+  @Override
+  public void addEntry(int i) {
+    Text key = new Text(String.format("%0" + keyLen + "d:%0" + colFamLen + "d:%0" + colQualLen + "d", i, 0, 0).getBytes());
+    //
+    byte data[] = new byte[dataLen];
+    for (int j = 0; j < data.length; j++) {
+      data[j] = (byte) (j % 10 + 65);
+    }
+    Value value = new Value(data);
+
+    map.put(key, value);
+
+  }
+
+  @Override
+  public void clear() {
+    map.clear();
+    map = null;
+  }
+
+  @Override
+  public int getEstimatedBytesPerEntry() {
+    return keyLen + colFamLen + colQualLen + dataLen;
+  }
+
+  @Override
+  int getNumPasses() {
+    return passes;
+  }
+
+  @Override
+  String getName() {
+    return "Text " + keyLen + " " + colFamLen + " " + colQualLen + " " + dataLen;
+  }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/test/src/main/java/org/apache/accumulo/test/continuous/HistData.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/continuous/HistData.java b/test/src/main/java/org/apache/accumulo/test/continuous/HistData.java
new file mode 100644
index 0000000..f53a6a6
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/continuous/HistData.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.continuous;
+
+import java.io.Serializable;
+import java.util.Objects;
+
+class HistData<T> implements Comparable<HistData<T>>, Serializable {
+  private static final long serialVersionUID = 1L;
+
+  T bin;
+  long count;
+
+  HistData(T bin) {
+    this.bin = bin;
+    count = 0;
+  }
+
+  @Override
+  public int hashCode() {
+    return Objects.hashCode(bin) + Objects.hashCode(count);
+  }
+
+  @SuppressWarnings("unchecked")
+  @Override
+  public boolean equals(Object obj) {
+    return obj == this || (obj != null && obj instanceof HistData && 0 == compareTo((HistData<T>) obj));
+  }
+
+  @SuppressWarnings("unchecked")
+  @Override
+  public int compareTo(HistData<T> o) {
+    return ((Comparable<T>) bin).compareTo(o.bin);
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/test/src/main/java/org/apache/accumulo/test/continuous/Histogram.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/continuous/Histogram.java b/test/src/main/java/org/apache/accumulo/test/continuous/Histogram.java
index dd17f3d..8dd3c9d 100644
--- a/test/src/main/java/org/apache/accumulo/test/continuous/Histogram.java
+++ b/test/src/main/java/org/apache/accumulo/test/continuous/Histogram.java
@@ -29,39 +29,9 @@ import java.util.Comparator;
 import java.util.HashMap;
 import java.util.Iterator;
 import java.util.List;
-import java.util.Objects;
 import java.util.Set;
 import java.util.TreeSet;
 
-class HistData<T> implements Comparable<HistData<T>>, Serializable {
-  private static final long serialVersionUID = 1L;
-
-  T bin;
-  long count;
-
-  HistData(T bin) {
-    this.bin = bin;
-    count = 0;
-  }
-
-  @Override
-  public int hashCode() {
-    return Objects.hashCode(bin) + Objects.hashCode(count);
-  }
-
-  @SuppressWarnings("unchecked")
-  @Override
-  public boolean equals(Object obj) {
-    return obj == this || (obj != null && obj instanceof HistData && 0 == compareTo((HistData<T>) obj));
-  }
-
-  @SuppressWarnings("unchecked")
-  @Override
-  public int compareTo(HistData<T> o) {
-    return ((Comparable<T>) bin).compareTo(o.bin);
-  }
-}
-
 public class Histogram<T> implements Serializable {
 
   private static final long serialVersionUID = 1L;


[8/9] accumulo git commit: ACCUMULO-3759 Fix Java 8 compiler warnings

Posted by ct...@apache.org.
ACCUMULO-3759 Fix Java 8 compiler warnings

* Add missing hashCode in class with equals
* Enforce one-type per file


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/6e2e6780
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/6e2e6780
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/6e2e6780

Branch: refs/heads/master
Commit: 6e2e6780fc59c86112fba30a5211081bb6e77979
Parents: f996387
Author: Christopher Tubbs <ct...@apache.org>
Authored: Tue Apr 28 20:30:22 2015 -0400
Committer: Christopher Tubbs <ct...@apache.org>
Committed: Tue Apr 28 20:30:22 2015 -0400

----------------------------------------------------------------------
 .../core/client/impl/OfflineIterator.java       | 340 ++++++++++++
 .../core/client/impl/OfflineScanner.java        | 314 -----------
 .../core/compaction/CompactionSettings.java     |  42 --
 .../accumulo/core/compaction/PatternType.java   |  28 +
 .../accumulo/core/compaction/SizeType.java      |  30 ++
 .../accumulo/core/compaction/StringType.java    |  24 +
 .../apache/accumulo/core/compaction/Type.java   |  21 +
 .../accumulo/core/compaction/UIntType.java      |  27 +
 .../core/file/DispatchingFileFactory.java       | 136 +++++
 .../accumulo/core/file/FileOperations.java      | 106 ----
 .../accumulo/core/cli/TestClientOpts.java       |   5 +
 .../client/CountingVerifyingReceiver.java       |  64 +++
 .../simple/client/RandomBatchScanner.java       |  38 --
 pom.xml                                         |   1 +
 .../accumulo/master/tableOps/BulkImport.java    | 363 -------------
 .../master/tableOps/CancelCompactions.java      |  23 -
 .../accumulo/master/tableOps/ChooseDir.java     |  53 ++
 .../accumulo/master/tableOps/CleanUp.java       | 287 ++++++++++
 .../master/tableOps/CleanUpBulkImport.java      |  64 +++
 .../accumulo/master/tableOps/CloneInfo.java     |  36 ++
 .../accumulo/master/tableOps/CloneMetadata.java |  54 ++
 .../master/tableOps/ClonePermissions.java       |  73 +++
 .../accumulo/master/tableOps/CloneTable.java    | 195 -------
 .../master/tableOps/CloneZookeeper.java         |  76 +++
 .../accumulo/master/tableOps/CompactRange.java  | 159 ------
 .../master/tableOps/CompactionDriver.java       | 188 +++++++
 .../master/tableOps/CompleteBulkImport.java     |  45 ++
 .../accumulo/master/tableOps/CopyFailed.java    | 158 ++++++
 .../accumulo/master/tableOps/CreateDir.java     |  51 ++
 .../master/tableOps/CreateImportDir.java        |  61 +++
 .../master/tableOps/CreateNamespace.java        | 137 -----
 .../accumulo/master/tableOps/CreateTable.java   | 251 ---------
 .../master/tableOps/DeleteNamespace.java        |  55 --
 .../accumulo/master/tableOps/DeleteTable.java   | 265 ----------
 .../accumulo/master/tableOps/ExportInfo.java    |  29 ++
 .../accumulo/master/tableOps/ExportTable.java   | 257 ---------
 .../master/tableOps/FinishCancelCompaction.java |  40 ++
 .../master/tableOps/FinishCloneTable.java       |  64 +++
 .../master/tableOps/FinishCreateNamespace.java  |  58 +++
 .../master/tableOps/FinishCreateTable.java      |  62 +++
 .../master/tableOps/FinishImportTable.java      |  68 +++
 .../tableOps/ImportPopulateZookeeper.java       | 104 ++++
 .../master/tableOps/ImportSetupPermissions.java |  65 +++
 .../accumulo/master/tableOps/ImportTable.java   | 521 -------------------
 .../master/tableOps/ImportedTableInfo.java      |  31 ++
 .../accumulo/master/tableOps/LoadFiles.java     | 209 ++++++++
 .../master/tableOps/MapImportFileNames.java     | 111 ++++
 .../master/tableOps/MoveExportedFiles.java      |  71 +++
 .../master/tableOps/NamespaceCleanUp.java       |  75 +++
 .../accumulo/master/tableOps/NamespaceInfo.java |  31 ++
 .../master/tableOps/PopulateMetadata.java       |  54 ++
 .../master/tableOps/PopulateMetadataTable.java  | 217 ++++++++
 .../master/tableOps/PopulateZookeeper.java      |  77 +++
 .../PopulateZookeeperWithNamespace.java         |  74 +++
 .../tableOps/SetupNamespacePermissions.java     |  55 ++
 .../master/tableOps/SetupPermissions.java       |  63 +++
 .../accumulo/master/tableOps/TableInfo.java     |  35 ++
 .../accumulo/master/tableOps/TableRangeOp.java  |  45 --
 .../master/tableOps/TableRangeOpWait.java       |  69 +++
 .../master/tableOps/WriteExportFiles.java       | 268 ++++++++++
 .../apache/accumulo/tserver/InMemoryMap.java    | 119 -----
 .../accumulo/tserver/MemKeyComparator.java      |  44 ++
 .../tserver/MemKeyConversionIterator.java       |  96 ++++
 .../PartialMutationSkippingIterator.java        |  54 ++
 .../accumulo/test/EstimateInMemMapOverhead.java | 317 -----------
 .../test/InMemoryMapMemoryUsageTest.java        | 102 ++++
 .../accumulo/test/IntObjectMemoryUsageTest.java |  65 +++
 .../apache/accumulo/test/MemoryUsageTest.java   |  64 +++
 .../accumulo/test/MutationMemoryUsageTest.java  |  98 ++++
 .../accumulo/test/TextMemoryUsageTest.java      |  82 +++
 .../accumulo/test/continuous/HistData.java      |  49 ++
 .../accumulo/test/continuous/Histogram.java     |  30 --
 72 files changed, 4406 insertions(+), 3237 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/core/src/main/java/org/apache/accumulo/core/client/impl/OfflineIterator.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/client/impl/OfflineIterator.java b/core/src/main/java/org/apache/accumulo/core/client/impl/OfflineIterator.java
new file mode 100644
index 0000000..b035e3e
--- /dev/null
+++ b/core/src/main/java/org/apache/accumulo/core/client/impl/OfflineIterator.java
@@ -0,0 +1,340 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.core.client.impl;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map.Entry;
+
+import org.apache.accumulo.core.Constants;
+import org.apache.accumulo.core.client.AccumuloException;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Instance;
+import org.apache.accumulo.core.client.RowIterator;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.client.TableNotFoundException;
+import org.apache.accumulo.core.conf.AccumuloConfiguration;
+import org.apache.accumulo.core.conf.ConfigurationCopy;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.data.Column;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.KeyValue;
+import org.apache.accumulo.core.data.PartialKey;
+import org.apache.accumulo.core.data.Range;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.data.impl.KeyExtent;
+import org.apache.accumulo.core.file.FileOperations;
+import org.apache.accumulo.core.file.FileSKVIterator;
+import org.apache.accumulo.core.iterators.IteratorEnvironment;
+import org.apache.accumulo.core.iterators.IteratorUtil;
+import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
+import org.apache.accumulo.core.iterators.IteratorUtil.IteratorScope;
+import org.apache.accumulo.core.iterators.system.ColumnFamilySkippingIterator;
+import org.apache.accumulo.core.iterators.system.ColumnQualifierFilter;
+import org.apache.accumulo.core.iterators.system.DeletingIterator;
+import org.apache.accumulo.core.iterators.system.MultiIterator;
+import org.apache.accumulo.core.iterators.system.VisibilityFilter;
+import org.apache.accumulo.core.master.state.tables.TableState;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.security.ColumnVisibility;
+import org.apache.accumulo.core.util.CachedConfiguration;
+import org.apache.accumulo.core.util.LocalityGroupUtil;
+import org.apache.accumulo.core.util.Pair;
+import org.apache.accumulo.core.util.UtilWaitThread;
+import org.apache.accumulo.core.volume.VolumeConfiguration;
+import org.apache.commons.lang.NotImplementedException;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.io.Text;
+
+class OfflineIterator implements Iterator<Entry<Key,Value>> {
+
+  static class OfflineIteratorEnvironment implements IteratorEnvironment {
+
+    private final Authorizations authorizations;
+
+    public OfflineIteratorEnvironment(Authorizations auths) {
+      this.authorizations = auths;
+    }
+
+    @Override
+    public SortedKeyValueIterator<Key,Value> reserveMapFileReader(String mapFileName) throws IOException {
+      throw new NotImplementedException();
+    }
+
+    @Override
+    public AccumuloConfiguration getConfig() {
+      return AccumuloConfiguration.getDefaultConfiguration();
+    }
+
+    @Override
+    public IteratorScope getIteratorScope() {
+      return IteratorScope.scan;
+    }
+
+    @Override
+    public boolean isFullMajorCompaction() {
+      return false;
+    }
+
+    private ArrayList<SortedKeyValueIterator<Key,Value>> topLevelIterators = new ArrayList<SortedKeyValueIterator<Key,Value>>();
+
+    @Override
+    public void registerSideChannel(SortedKeyValueIterator<Key,Value> iter) {
+      topLevelIterators.add(iter);
+    }
+
+    @Override
+    public Authorizations getAuthorizations() {
+      return authorizations;
+    }
+
+    SortedKeyValueIterator<Key,Value> getTopLevelIterator(SortedKeyValueIterator<Key,Value> iter) {
+      if (topLevelIterators.isEmpty())
+        return iter;
+      ArrayList<SortedKeyValueIterator<Key,Value>> allIters = new ArrayList<SortedKeyValueIterator<Key,Value>>(topLevelIterators);
+      allIters.add(iter);
+      return new MultiIterator(allIters, false);
+    }
+  }
+
+  private SortedKeyValueIterator<Key,Value> iter;
+  private Range range;
+  private KeyExtent currentExtent;
+  private Connector conn;
+  private String tableId;
+  private Authorizations authorizations;
+  private Instance instance;
+  private ScannerOptions options;
+  private ArrayList<SortedKeyValueIterator<Key,Value>> readers;
+  private AccumuloConfiguration config;
+
+  public OfflineIterator(ScannerOptions options, Instance instance, Credentials credentials, Authorizations authorizations, Text table, Range range) {
+    this.options = new ScannerOptions(options);
+    this.instance = instance;
+    this.range = range;
+
+    if (this.options.fetchedColumns.size() > 0) {
+      this.range = range.bound(this.options.fetchedColumns.first(), this.options.fetchedColumns.last());
+    }
+
+    this.tableId = table.toString();
+    this.authorizations = authorizations;
+    this.readers = new ArrayList<SortedKeyValueIterator<Key,Value>>();
+
+    try {
+      conn = instance.getConnector(credentials.getPrincipal(), credentials.getToken());
+      config = new ConfigurationCopy(conn.instanceOperations().getSiteConfiguration());
+      nextTablet();
+
+      while (iter != null && !iter.hasTop())
+        nextTablet();
+
+    } catch (Exception e) {
+      throw new RuntimeException(e);
+    }
+  }
+
+  @Override
+  public boolean hasNext() {
+    return iter != null && iter.hasTop();
+  }
+
+  @Override
+  public Entry<Key,Value> next() {
+    try {
+      byte[] v = iter.getTopValue().get();
+      // copy just like tablet server does, do this before calling next
+      KeyValue ret = new KeyValue(new Key(iter.getTopKey()), Arrays.copyOf(v, v.length));
+
+      iter.next();
+
+      while (iter != null && !iter.hasTop())
+        nextTablet();
+
+      return ret;
+    } catch (Exception e) {
+      throw new RuntimeException(e);
+    }
+  }
+
+  private void nextTablet() throws TableNotFoundException, AccumuloException, IOException {
+
+    Range nextRange = null;
+
+    if (currentExtent == null) {
+      Text startRow;
+
+      if (range.getStartKey() != null)
+        startRow = range.getStartKey().getRow();
+      else
+        startRow = new Text();
+
+      nextRange = new Range(new KeyExtent(new Text(tableId), startRow, null).getMetadataEntry(), true, null, false);
+    } else {
+
+      if (currentExtent.getEndRow() == null) {
+        iter = null;
+        return;
+      }
+
+      if (range.afterEndKey(new Key(currentExtent.getEndRow()).followingKey(PartialKey.ROW))) {
+        iter = null;
+        return;
+      }
+
+      nextRange = new Range(currentExtent.getMetadataEntry(), false, null, false);
+    }
+
+    List<String> relFiles = new ArrayList<String>();
+
+    Pair<KeyExtent,String> eloc = getTabletFiles(nextRange, relFiles);
+
+    while (eloc.getSecond() != null) {
+      if (Tables.getTableState(instance, tableId) != TableState.OFFLINE) {
+        Tables.clearCache(instance);
+        if (Tables.getTableState(instance, tableId) != TableState.OFFLINE) {
+          throw new AccumuloException("Table is online " + tableId + " cannot scan tablet in offline mode " + eloc.getFirst());
+        }
+      }
+
+      UtilWaitThread.sleep(250);
+
+      eloc = getTabletFiles(nextRange, relFiles);
+    }
+
+    KeyExtent extent = eloc.getFirst();
+
+    if (!extent.getTableId().toString().equals(tableId)) {
+      throw new AccumuloException(" did not find tablets for table " + tableId + " " + extent);
+    }
+
+    if (currentExtent != null && !extent.isPreviousExtent(currentExtent))
+      throw new AccumuloException(" " + currentExtent + " is not previous extent " + extent);
+
+    // Old property is only used to resolve relative paths into absolute paths. For systems upgraded
+    // with relative paths, it's assumed that correct instance.dfs.{uri,dir} is still correct in the configuration
+    @SuppressWarnings("deprecation")
+    String tablesDir = config.get(Property.INSTANCE_DFS_DIR) + Constants.HDFS_TABLES_DIR;
+
+    List<String> absFiles = new ArrayList<String>();
+    for (String relPath : relFiles) {
+      if (relPath.contains(":")) {
+        absFiles.add(relPath);
+      } else {
+        // handle old-style relative paths
+        if (relPath.startsWith("..")) {
+          absFiles.add(tablesDir + relPath.substring(2));
+        } else {
+          absFiles.add(tablesDir + "/" + tableId + relPath);
+        }
+      }
+    }
+
+    iter = createIterator(extent, absFiles);
+    iter.seek(range, LocalityGroupUtil.families(options.fetchedColumns), options.fetchedColumns.size() == 0 ? false : true);
+    currentExtent = extent;
+
+  }
+
+  private Pair<KeyExtent,String> getTabletFiles(Range nextRange, List<String> relFiles) throws TableNotFoundException {
+    Scanner scanner = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
+    scanner.setBatchSize(100);
+    scanner.setRange(nextRange);
+
+    RowIterator rowIter = new RowIterator(scanner);
+    Iterator<Entry<Key,Value>> row = rowIter.next();
+
+    KeyExtent extent = null;
+    String location = null;
+
+    while (row.hasNext()) {
+      Entry<Key,Value> entry = row.next();
+      Key key = entry.getKey();
+
+      if (key.getColumnFamily().equals(DataFileColumnFamily.NAME)) {
+        relFiles.add(key.getColumnQualifier().toString());
+      }
+
+      if (key.getColumnFamily().equals(TabletsSection.CurrentLocationColumnFamily.NAME)
+          || key.getColumnFamily().equals(TabletsSection.FutureLocationColumnFamily.NAME)) {
+        location = entry.getValue().toString();
+      }
+
+      if (TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.hasColumns(key)) {
+        extent = new KeyExtent(key.getRow(), entry.getValue());
+      }
+
+    }
+    return new Pair<KeyExtent,String>(extent, location);
+  }
+
+  private SortedKeyValueIterator<Key,Value> createIterator(KeyExtent extent, List<String> absFiles) throws TableNotFoundException, AccumuloException,
+      IOException {
+
+    // TODO share code w/ tablet - ACCUMULO-1303
+    AccumuloConfiguration acuTableConf = AccumuloConfiguration.getTableConfiguration(conn, tableId);
+
+    Configuration conf = CachedConfiguration.getInstance();
+
+    for (SortedKeyValueIterator<Key,Value> reader : readers) {
+      ((FileSKVIterator) reader).close();
+    }
+
+    readers.clear();
+
+    // TODO need to close files - ACCUMULO-1303
+    for (String file : absFiles) {
+      FileSystem fs = VolumeConfiguration.getVolume(file, conf, config).getFileSystem();
+      FileSKVIterator reader = FileOperations.getInstance().openReader(file, false, fs, conf, acuTableConf, null, null);
+      readers.add(reader);
+    }
+
+    MultiIterator multiIter = new MultiIterator(readers, extent);
+
+    OfflineIteratorEnvironment iterEnv = new OfflineIteratorEnvironment(authorizations);
+
+    DeletingIterator delIter = new DeletingIterator(multiIter, false);
+
+    ColumnFamilySkippingIterator cfsi = new ColumnFamilySkippingIterator(delIter);
+
+    ColumnQualifierFilter colFilter = new ColumnQualifierFilter(cfsi, new HashSet<Column>(options.fetchedColumns));
+
+    byte[] defaultSecurityLabel;
+
+    ColumnVisibility cv = new ColumnVisibility(acuTableConf.get(Property.TABLE_DEFAULT_SCANTIME_VISIBILITY));
+    defaultSecurityLabel = cv.getExpression();
+
+    VisibilityFilter visFilter = new VisibilityFilter(colFilter, authorizations, defaultSecurityLabel);
+
+    return iterEnv.getTopLevelIterator(IteratorUtil.loadIterators(IteratorScope.scan, visFilter, extent, acuTableConf, options.serverSideIteratorList,
+        options.serverSideIteratorOptions, iterEnv, false));
+  }
+
+  @Override
+  public void remove() {
+    throw new UnsupportedOperationException();
+  }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/core/src/main/java/org/apache/accumulo/core/client/impl/OfflineScanner.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/client/impl/OfflineScanner.java b/core/src/main/java/org/apache/accumulo/core/client/impl/OfflineScanner.java
index 2f31319..427a7cc 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/impl/OfflineScanner.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/impl/OfflineScanner.java
@@ -18,332 +18,18 @@ package org.apache.accumulo.core.client.impl;
 
 import static com.google.common.base.Preconditions.checkArgument;
 
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.HashSet;
 import java.util.Iterator;
-import java.util.List;
 import java.util.Map.Entry;
 
 import org.apache.accumulo.core.Constants;
-import org.apache.accumulo.core.client.AccumuloException;
-import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.Instance;
-import org.apache.accumulo.core.client.RowIterator;
 import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.conf.AccumuloConfiguration;
-import org.apache.accumulo.core.conf.ConfigurationCopy;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.data.Column;
 import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.KeyValue;
-import org.apache.accumulo.core.data.PartialKey;
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.data.impl.KeyExtent;
-import org.apache.accumulo.core.file.FileOperations;
-import org.apache.accumulo.core.file.FileSKVIterator;
-import org.apache.accumulo.core.iterators.IteratorEnvironment;
-import org.apache.accumulo.core.iterators.IteratorUtil;
-import org.apache.accumulo.core.iterators.IteratorUtil.IteratorScope;
-import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
-import org.apache.accumulo.core.iterators.system.ColumnFamilySkippingIterator;
-import org.apache.accumulo.core.iterators.system.ColumnQualifierFilter;
-import org.apache.accumulo.core.iterators.system.DeletingIterator;
-import org.apache.accumulo.core.iterators.system.MultiIterator;
-import org.apache.accumulo.core.iterators.system.VisibilityFilter;
-import org.apache.accumulo.core.master.state.tables.TableState;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
 import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.security.ColumnVisibility;
-import org.apache.accumulo.core.util.CachedConfiguration;
-import org.apache.accumulo.core.util.LocalityGroupUtil;
-import org.apache.accumulo.core.util.Pair;
-import org.apache.accumulo.core.util.UtilWaitThread;
-import org.apache.accumulo.core.volume.VolumeConfiguration;
-import org.apache.commons.lang.NotImplementedException;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.io.Text;
 
-class OfflineIterator implements Iterator<Entry<Key,Value>> {
-
-  static class OfflineIteratorEnvironment implements IteratorEnvironment {
-
-    private final Authorizations authorizations;
-
-    public OfflineIteratorEnvironment(Authorizations auths) {
-      this.authorizations = auths;
-    }
-
-    @Override
-    public SortedKeyValueIterator<Key,Value> reserveMapFileReader(String mapFileName) throws IOException {
-      throw new NotImplementedException();
-    }
-
-    @Override
-    public AccumuloConfiguration getConfig() {
-      return AccumuloConfiguration.getDefaultConfiguration();
-    }
-
-    @Override
-    public IteratorScope getIteratorScope() {
-      return IteratorScope.scan;
-    }
-
-    @Override
-    public boolean isFullMajorCompaction() {
-      return false;
-    }
-
-    private ArrayList<SortedKeyValueIterator<Key,Value>> topLevelIterators = new ArrayList<SortedKeyValueIterator<Key,Value>>();
-
-    @Override
-    public void registerSideChannel(SortedKeyValueIterator<Key,Value> iter) {
-      topLevelIterators.add(iter);
-    }
-
-    @Override
-    public Authorizations getAuthorizations() {
-      return authorizations;
-    }
-
-    SortedKeyValueIterator<Key,Value> getTopLevelIterator(SortedKeyValueIterator<Key,Value> iter) {
-      if (topLevelIterators.isEmpty())
-        return iter;
-      ArrayList<SortedKeyValueIterator<Key,Value>> allIters = new ArrayList<SortedKeyValueIterator<Key,Value>>(topLevelIterators);
-      allIters.add(iter);
-      return new MultiIterator(allIters, false);
-    }
-  }
-
-  private SortedKeyValueIterator<Key,Value> iter;
-  private Range range;
-  private KeyExtent currentExtent;
-  private Connector conn;
-  private String tableId;
-  private Authorizations authorizations;
-  private Instance instance;
-  private ScannerOptions options;
-  private ArrayList<SortedKeyValueIterator<Key,Value>> readers;
-  private AccumuloConfiguration config;
-
-  public OfflineIterator(ScannerOptions options, Instance instance, Credentials credentials, Authorizations authorizations, Text table, Range range) {
-    this.options = new ScannerOptions(options);
-    this.instance = instance;
-    this.range = range;
-
-    if (this.options.fetchedColumns.size() > 0) {
-      this.range = range.bound(this.options.fetchedColumns.first(), this.options.fetchedColumns.last());
-    }
-
-    this.tableId = table.toString();
-    this.authorizations = authorizations;
-    this.readers = new ArrayList<SortedKeyValueIterator<Key,Value>>();
-
-    try {
-      conn = instance.getConnector(credentials.getPrincipal(), credentials.getToken());
-      config = new ConfigurationCopy(conn.instanceOperations().getSiteConfiguration());
-      nextTablet();
-
-      while (iter != null && !iter.hasTop())
-        nextTablet();
-
-    } catch (Exception e) {
-      throw new RuntimeException(e);
-    }
-  }
-
-  @Override
-  public boolean hasNext() {
-    return iter != null && iter.hasTop();
-  }
-
-  @Override
-  public Entry<Key,Value> next() {
-    try {
-      byte[] v = iter.getTopValue().get();
-      // copy just like tablet server does, do this before calling next
-      KeyValue ret = new KeyValue(new Key(iter.getTopKey()), Arrays.copyOf(v, v.length));
-
-      iter.next();
-
-      while (iter != null && !iter.hasTop())
-        nextTablet();
-
-      return ret;
-    } catch (Exception e) {
-      throw new RuntimeException(e);
-    }
-  }
-
-  private void nextTablet() throws TableNotFoundException, AccumuloException, IOException {
-
-    Range nextRange = null;
-
-    if (currentExtent == null) {
-      Text startRow;
-
-      if (range.getStartKey() != null)
-        startRow = range.getStartKey().getRow();
-      else
-        startRow = new Text();
-
-      nextRange = new Range(new KeyExtent(new Text(tableId), startRow, null).getMetadataEntry(), true, null, false);
-    } else {
-
-      if (currentExtent.getEndRow() == null) {
-        iter = null;
-        return;
-      }
-
-      if (range.afterEndKey(new Key(currentExtent.getEndRow()).followingKey(PartialKey.ROW))) {
-        iter = null;
-        return;
-      }
-
-      nextRange = new Range(currentExtent.getMetadataEntry(), false, null, false);
-    }
-
-    List<String> relFiles = new ArrayList<String>();
-
-    Pair<KeyExtent,String> eloc = getTabletFiles(nextRange, relFiles);
-
-    while (eloc.getSecond() != null) {
-      if (Tables.getTableState(instance, tableId) != TableState.OFFLINE) {
-        Tables.clearCache(instance);
-        if (Tables.getTableState(instance, tableId) != TableState.OFFLINE) {
-          throw new AccumuloException("Table is online " + tableId + " cannot scan tablet in offline mode " + eloc.getFirst());
-        }
-      }
-
-      UtilWaitThread.sleep(250);
-
-      eloc = getTabletFiles(nextRange, relFiles);
-    }
-
-    KeyExtent extent = eloc.getFirst();
-
-    if (!extent.getTableId().toString().equals(tableId)) {
-      throw new AccumuloException(" did not find tablets for table " + tableId + " " + extent);
-    }
-
-    if (currentExtent != null && !extent.isPreviousExtent(currentExtent))
-      throw new AccumuloException(" " + currentExtent + " is not previous extent " + extent);
-
-    // Old property is only used to resolve relative paths into absolute paths. For systems upgraded
-    // with relative paths, it's assumed that correct instance.dfs.{uri,dir} is still correct in the configuration
-    @SuppressWarnings("deprecation")
-    String tablesDir = config.get(Property.INSTANCE_DFS_DIR) + Constants.HDFS_TABLES_DIR;
-
-    List<String> absFiles = new ArrayList<String>();
-    for (String relPath : relFiles) {
-      if (relPath.contains(":")) {
-        absFiles.add(relPath);
-      } else {
-        // handle old-style relative paths
-        if (relPath.startsWith("..")) {
-          absFiles.add(tablesDir + relPath.substring(2));
-        } else {
-          absFiles.add(tablesDir + "/" + tableId + relPath);
-        }
-      }
-    }
-
-    iter = createIterator(extent, absFiles);
-    iter.seek(range, LocalityGroupUtil.families(options.fetchedColumns), options.fetchedColumns.size() == 0 ? false : true);
-    currentExtent = extent;
-
-  }
-
-  private Pair<KeyExtent,String> getTabletFiles(Range nextRange, List<String> relFiles) throws TableNotFoundException {
-    Scanner scanner = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
-    scanner.setBatchSize(100);
-    scanner.setRange(nextRange);
-
-    RowIterator rowIter = new RowIterator(scanner);
-    Iterator<Entry<Key,Value>> row = rowIter.next();
-
-    KeyExtent extent = null;
-    String location = null;
-
-    while (row.hasNext()) {
-      Entry<Key,Value> entry = row.next();
-      Key key = entry.getKey();
-
-      if (key.getColumnFamily().equals(DataFileColumnFamily.NAME)) {
-        relFiles.add(key.getColumnQualifier().toString());
-      }
-
-      if (key.getColumnFamily().equals(TabletsSection.CurrentLocationColumnFamily.NAME)
-          || key.getColumnFamily().equals(TabletsSection.FutureLocationColumnFamily.NAME)) {
-        location = entry.getValue().toString();
-      }
-
-      if (TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.hasColumns(key)) {
-        extent = new KeyExtent(key.getRow(), entry.getValue());
-      }
-
-    }
-    return new Pair<KeyExtent,String>(extent, location);
-  }
-
-  private SortedKeyValueIterator<Key,Value> createIterator(KeyExtent extent, List<String> absFiles) throws TableNotFoundException, AccumuloException,
-      IOException {
-
-    // TODO share code w/ tablet - ACCUMULO-1303
-    AccumuloConfiguration acuTableConf = AccumuloConfiguration.getTableConfiguration(conn, tableId);
-
-    Configuration conf = CachedConfiguration.getInstance();
-
-    for (SortedKeyValueIterator<Key,Value> reader : readers) {
-      ((FileSKVIterator) reader).close();
-    }
-
-    readers.clear();
-
-    // TODO need to close files - ACCUMULO-1303
-    for (String file : absFiles) {
-      FileSystem fs = VolumeConfiguration.getVolume(file, conf, config).getFileSystem();
-      FileSKVIterator reader = FileOperations.getInstance().openReader(file, false, fs, conf, acuTableConf, null, null);
-      readers.add(reader);
-    }
-
-    MultiIterator multiIter = new MultiIterator(readers, extent);
-
-    OfflineIteratorEnvironment iterEnv = new OfflineIteratorEnvironment(authorizations);
-
-    DeletingIterator delIter = new DeletingIterator(multiIter, false);
-
-    ColumnFamilySkippingIterator cfsi = new ColumnFamilySkippingIterator(delIter);
-
-    ColumnQualifierFilter colFilter = new ColumnQualifierFilter(cfsi, new HashSet<Column>(options.fetchedColumns));
-
-    byte[] defaultSecurityLabel;
-
-    ColumnVisibility cv = new ColumnVisibility(acuTableConf.get(Property.TABLE_DEFAULT_SCANTIME_VISIBILITY));
-    defaultSecurityLabel = cv.getExpression();
-
-    VisibilityFilter visFilter = new VisibilityFilter(colFilter, authorizations, defaultSecurityLabel);
-
-    return iterEnv.getTopLevelIterator(IteratorUtil.loadIterators(IteratorScope.scan, visFilter, extent, acuTableConf, options.serverSideIteratorList,
-        options.serverSideIteratorOptions, iterEnv, false));
-  }
-
-  @Override
-  public void remove() {
-    throw new UnsupportedOperationException();
-  }
-
-}
-
-/**
- *
- */
 public class OfflineScanner extends ScannerOptions implements Scanner {
 
   private int batchSize;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/core/src/main/java/org/apache/accumulo/core/compaction/CompactionSettings.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/compaction/CompactionSettings.java b/core/src/main/java/org/apache/accumulo/core/compaction/CompactionSettings.java
index a45a692..43f8c0f 100644
--- a/core/src/main/java/org/apache/accumulo/core/compaction/CompactionSettings.java
+++ b/core/src/main/java/org/apache/accumulo/core/compaction/CompactionSettings.java
@@ -18,48 +18,6 @@
 package org.apache.accumulo.core.compaction;
 
 import java.util.Map;
-import java.util.regex.Pattern;
-
-import org.apache.accumulo.core.conf.AccumuloConfiguration;
-
-import com.google.common.base.Preconditions;
-
-interface Type {
-  String convert(String str);
-}
-
-class SizeType implements Type {
-  @Override
-  public String convert(String str) {
-    long size = AccumuloConfiguration.getMemoryInBytes(str);
-    Preconditions.checkArgument(size > 0);
-    return Long.toString(size);
-  }
-}
-
-class PatternType implements Type {
-  @Override
-  public String convert(String str) {
-    // ensure it compiles
-    Pattern.compile(str);
-    return str;
-  }
-}
-
-class UIntType implements Type {
-  @Override
-  public String convert(String str) {
-    Preconditions.checkArgument(Integer.parseInt(str) > 0);
-    return str;
-  }
-}
-
-class StringType implements Type {
-  @Override
-  public String convert(String str) {
-    return str;
-  }
-}
 
 public enum CompactionSettings {
 

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/core/src/main/java/org/apache/accumulo/core/compaction/PatternType.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/compaction/PatternType.java b/core/src/main/java/org/apache/accumulo/core/compaction/PatternType.java
new file mode 100644
index 0000000..c52dcb4
--- /dev/null
+++ b/core/src/main/java/org/apache/accumulo/core/compaction/PatternType.java
@@ -0,0 +1,28 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.core.compaction;
+
+import java.util.regex.Pattern;
+
+class PatternType implements Type {
+  @Override
+  public String convert(String str) {
+    // ensure it compiles
+    Pattern.compile(str);
+    return str;
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/core/src/main/java/org/apache/accumulo/core/compaction/SizeType.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/compaction/SizeType.java b/core/src/main/java/org/apache/accumulo/core/compaction/SizeType.java
new file mode 100644
index 0000000..c2af401
--- /dev/null
+++ b/core/src/main/java/org/apache/accumulo/core/compaction/SizeType.java
@@ -0,0 +1,30 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.core.compaction;
+
+import org.apache.accumulo.core.conf.AccumuloConfiguration;
+
+import com.google.common.base.Preconditions;
+
+class SizeType implements Type {
+  @Override
+  public String convert(String str) {
+    long size = AccumuloConfiguration.getMemoryInBytes(str);
+    Preconditions.checkArgument(size > 0);
+    return Long.toString(size);
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/core/src/main/java/org/apache/accumulo/core/compaction/StringType.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/compaction/StringType.java b/core/src/main/java/org/apache/accumulo/core/compaction/StringType.java
new file mode 100644
index 0000000..7098a5c
--- /dev/null
+++ b/core/src/main/java/org/apache/accumulo/core/compaction/StringType.java
@@ -0,0 +1,24 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.core.compaction;
+
+class StringType implements Type {
+  @Override
+  public String convert(String str) {
+    return str;
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/core/src/main/java/org/apache/accumulo/core/compaction/Type.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/compaction/Type.java b/core/src/main/java/org/apache/accumulo/core/compaction/Type.java
new file mode 100644
index 0000000..d8f81a6
--- /dev/null
+++ b/core/src/main/java/org/apache/accumulo/core/compaction/Type.java
@@ -0,0 +1,21 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.core.compaction;
+
+interface Type {
+  String convert(String str);
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/core/src/main/java/org/apache/accumulo/core/compaction/UIntType.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/compaction/UIntType.java b/core/src/main/java/org/apache/accumulo/core/compaction/UIntType.java
new file mode 100644
index 0000000..c8880fc
--- /dev/null
+++ b/core/src/main/java/org/apache/accumulo/core/compaction/UIntType.java
@@ -0,0 +1,27 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.core.compaction;
+
+import com.google.common.base.Preconditions;
+
+class UIntType implements Type {
+  @Override
+  public String convert(String str) {
+    Preconditions.checkArgument(Integer.parseInt(str) > 0);
+    return str;
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/core/src/main/java/org/apache/accumulo/core/file/DispatchingFileFactory.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/file/DispatchingFileFactory.java b/core/src/main/java/org/apache/accumulo/core/file/DispatchingFileFactory.java
new file mode 100644
index 0000000..128a931
--- /dev/null
+++ b/core/src/main/java/org/apache/accumulo/core/file/DispatchingFileFactory.java
@@ -0,0 +1,136 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.core.file;
+
+import java.io.IOException;
+import java.util.Set;
+
+import org.apache.accumulo.core.Constants;
+import org.apache.accumulo.core.conf.AccumuloConfiguration;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.data.ByteSequence;
+import org.apache.accumulo.core.data.Range;
+import org.apache.accumulo.core.file.blockfile.cache.BlockCache;
+import org.apache.accumulo.core.file.map.MapFileOperations;
+import org.apache.accumulo.core.file.rfile.RFile;
+import org.apache.accumulo.core.file.rfile.RFileOperations;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+
+class DispatchingFileFactory extends FileOperations {
+
+  private FileOperations findFileFactory(String file) {
+
+    Path p = new Path(file);
+    String name = p.getName();
+
+    if (name.startsWith(Constants.MAPFILE_EXTENSION + "_")) {
+      return new MapFileOperations();
+    }
+    String[] sp = name.split("\\.");
+
+    if (sp.length < 2) {
+      throw new IllegalArgumentException("File name " + name + " has no extension");
+    }
+
+    String extension = sp[sp.length - 1];
+
+    if (extension.equals(Constants.MAPFILE_EXTENSION) || extension.equals(Constants.MAPFILE_EXTENSION + "_tmp")) {
+      return new MapFileOperations();
+    } else if (extension.equals(RFile.EXTENSION) || extension.equals(RFile.EXTENSION + "_tmp")) {
+      return new RFileOperations();
+    } else {
+      throw new IllegalArgumentException("File type " + extension + " not supported");
+    }
+  }
+
+  @Override
+  public FileSKVIterator openIndex(String file, FileSystem fs, Configuration conf, AccumuloConfiguration acuconf) throws IOException {
+    return findFileFactory(file).openIndex(file, fs, conf, acuconf, null, null);
+  }
+
+  @Override
+  public FileSKVIterator openReader(String file, boolean seekToBeginning, FileSystem fs, Configuration conf, AccumuloConfiguration acuconf) throws IOException {
+    FileSKVIterator iter = findFileFactory(file).openReader(file, seekToBeginning, fs, conf, acuconf, null, null);
+    if (acuconf.getBoolean(Property.TABLE_BLOOM_ENABLED)) {
+      return new BloomFilterLayer.Reader(iter, acuconf);
+    }
+    return iter;
+  }
+
+  @Override
+  public FileSKVWriter openWriter(String file, FileSystem fs, Configuration conf, AccumuloConfiguration acuconf) throws IOException {
+    FileSKVWriter writer = findFileFactory(file).openWriter(file, fs, conf, acuconf);
+    if (acuconf.getBoolean(Property.TABLE_BLOOM_ENABLED)) {
+      return new BloomFilterLayer.Writer(writer, acuconf);
+    }
+    return writer;
+  }
+
+  @Override
+  public long getFileSize(String file, FileSystem fs, Configuration conf, AccumuloConfiguration acuconf) throws IOException {
+    return findFileFactory(file).getFileSize(file, fs, conf, acuconf);
+  }
+
+  @Override
+  public FileSKVIterator openReader(String file, Range range, Set<ByteSequence> columnFamilies, boolean inclusive, FileSystem fs, Configuration conf,
+      AccumuloConfiguration tableConf) throws IOException {
+    return findFileFactory(file).openReader(file, range, columnFamilies, inclusive, fs, conf, tableConf, null, null);
+  }
+
+  @Override
+  public FileSKVIterator openReader(String file, Range range, Set<ByteSequence> columnFamilies, boolean inclusive, FileSystem fs, Configuration conf,
+      AccumuloConfiguration tableConf, BlockCache dataCache, BlockCache indexCache) throws IOException {
+
+    if (!tableConf.getBoolean(Property.TABLE_INDEXCACHE_ENABLED))
+      indexCache = null;
+    if (!tableConf.getBoolean(Property.TABLE_BLOCKCACHE_ENABLED))
+      dataCache = null;
+
+    return findFileFactory(file).openReader(file, range, columnFamilies, inclusive, fs, conf, tableConf, dataCache, indexCache);
+  }
+
+  @Override
+  public FileSKVIterator openReader(String file, boolean seekToBeginning, FileSystem fs, Configuration conf, AccumuloConfiguration acuconf,
+      BlockCache dataCache, BlockCache indexCache) throws IOException {
+
+    if (!acuconf.getBoolean(Property.TABLE_INDEXCACHE_ENABLED))
+      indexCache = null;
+    if (!acuconf.getBoolean(Property.TABLE_BLOCKCACHE_ENABLED))
+      dataCache = null;
+
+    FileSKVIterator iter = findFileFactory(file).openReader(file, seekToBeginning, fs, conf, acuconf, dataCache, indexCache);
+    if (acuconf.getBoolean(Property.TABLE_BLOOM_ENABLED)) {
+      return new BloomFilterLayer.Reader(iter, acuconf);
+    }
+    return iter;
+  }
+
+  @Override
+  public FileSKVIterator openIndex(String file, FileSystem fs, Configuration conf, AccumuloConfiguration acuconf, BlockCache dCache, BlockCache iCache)
+      throws IOException {
+
+    if (!acuconf.getBoolean(Property.TABLE_INDEXCACHE_ENABLED))
+      iCache = null;
+    if (!acuconf.getBoolean(Property.TABLE_BLOCKCACHE_ENABLED))
+      dCache = null;
+
+    return findFileFactory(file).openIndex(file, fs, conf, acuconf, dCache, iCache);
+  }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/core/src/main/java/org/apache/accumulo/core/file/FileOperations.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/file/FileOperations.java b/core/src/main/java/org/apache/accumulo/core/file/FileOperations.java
index 78d0407..3798453 100644
--- a/core/src/main/java/org/apache/accumulo/core/file/FileOperations.java
+++ b/core/src/main/java/org/apache/accumulo/core/file/FileOperations.java
@@ -27,115 +27,9 @@ import org.apache.accumulo.core.conf.Property;
 import org.apache.accumulo.core.data.ByteSequence;
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.file.blockfile.cache.BlockCache;
-import org.apache.accumulo.core.file.map.MapFileOperations;
 import org.apache.accumulo.core.file.rfile.RFile;
-import org.apache.accumulo.core.file.rfile.RFileOperations;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-
-class DispatchingFileFactory extends FileOperations {
-
-  private FileOperations findFileFactory(String file) {
-
-    Path p = new Path(file);
-    String name = p.getName();
-
-    if (name.startsWith(Constants.MAPFILE_EXTENSION + "_")) {
-      return new MapFileOperations();
-    }
-    String[] sp = name.split("\\.");
-
-    if (sp.length < 2) {
-      throw new IllegalArgumentException("File name " + name + " has no extension");
-    }
-
-    String extension = sp[sp.length - 1];
-
-    if (extension.equals(Constants.MAPFILE_EXTENSION) || extension.equals(Constants.MAPFILE_EXTENSION + "_tmp")) {
-      return new MapFileOperations();
-    } else if (extension.equals(RFile.EXTENSION) || extension.equals(RFile.EXTENSION + "_tmp")) {
-      return new RFileOperations();
-    } else {
-      throw new IllegalArgumentException("File type " + extension + " not supported");
-    }
-  }
-
-  @Override
-  public FileSKVIterator openIndex(String file, FileSystem fs, Configuration conf, AccumuloConfiguration acuconf) throws IOException {
-    return findFileFactory(file).openIndex(file, fs, conf, acuconf, null, null);
-  }
-
-  @Override
-  public FileSKVIterator openReader(String file, boolean seekToBeginning, FileSystem fs, Configuration conf, AccumuloConfiguration acuconf) throws IOException {
-    FileSKVIterator iter = findFileFactory(file).openReader(file, seekToBeginning, fs, conf, acuconf, null, null);
-    if (acuconf.getBoolean(Property.TABLE_BLOOM_ENABLED)) {
-      return new BloomFilterLayer.Reader(iter, acuconf);
-    }
-    return iter;
-  }
-
-  @Override
-  public FileSKVWriter openWriter(String file, FileSystem fs, Configuration conf, AccumuloConfiguration acuconf) throws IOException {
-    FileSKVWriter writer = findFileFactory(file).openWriter(file, fs, conf, acuconf);
-    if (acuconf.getBoolean(Property.TABLE_BLOOM_ENABLED)) {
-      return new BloomFilterLayer.Writer(writer, acuconf);
-    }
-    return writer;
-  }
-
-  @Override
-  public long getFileSize(String file, FileSystem fs, Configuration conf, AccumuloConfiguration acuconf) throws IOException {
-    return findFileFactory(file).getFileSize(file, fs, conf, acuconf);
-  }
-
-  @Override
-  public FileSKVIterator openReader(String file, Range range, Set<ByteSequence> columnFamilies, boolean inclusive, FileSystem fs, Configuration conf,
-      AccumuloConfiguration tableConf) throws IOException {
-    return findFileFactory(file).openReader(file, range, columnFamilies, inclusive, fs, conf, tableConf, null, null);
-  }
-
-  @Override
-  public FileSKVIterator openReader(String file, Range range, Set<ByteSequence> columnFamilies, boolean inclusive, FileSystem fs, Configuration conf,
-      AccumuloConfiguration tableConf, BlockCache dataCache, BlockCache indexCache) throws IOException {
-
-    if (!tableConf.getBoolean(Property.TABLE_INDEXCACHE_ENABLED))
-      indexCache = null;
-    if (!tableConf.getBoolean(Property.TABLE_BLOCKCACHE_ENABLED))
-      dataCache = null;
-
-    return findFileFactory(file).openReader(file, range, columnFamilies, inclusive, fs, conf, tableConf, dataCache, indexCache);
-  }
-
-  @Override
-  public FileSKVIterator openReader(String file, boolean seekToBeginning, FileSystem fs, Configuration conf, AccumuloConfiguration acuconf,
-      BlockCache dataCache, BlockCache indexCache) throws IOException {
-
-    if (!acuconf.getBoolean(Property.TABLE_INDEXCACHE_ENABLED))
-      indexCache = null;
-    if (!acuconf.getBoolean(Property.TABLE_BLOCKCACHE_ENABLED))
-      dataCache = null;
-
-    FileSKVIterator iter = findFileFactory(file).openReader(file, seekToBeginning, fs, conf, acuconf, dataCache, indexCache);
-    if (acuconf.getBoolean(Property.TABLE_BLOOM_ENABLED)) {
-      return new BloomFilterLayer.Reader(iter, acuconf);
-    }
-    return iter;
-  }
-
-  @Override
-  public FileSKVIterator openIndex(String file, FileSystem fs, Configuration conf, AccumuloConfiguration acuconf, BlockCache dCache, BlockCache iCache)
-      throws IOException {
-
-    if (!acuconf.getBoolean(Property.TABLE_INDEXCACHE_ENABLED))
-      iCache = null;
-    if (!acuconf.getBoolean(Property.TABLE_BLOCKCACHE_ENABLED))
-      dCache = null;
-
-    return findFileFactory(file).openIndex(file, fs, conf, acuconf, dCache, iCache);
-  }
-
-}
 
 public abstract class FileOperations {
 

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/core/src/test/java/org/apache/accumulo/core/cli/TestClientOpts.java
----------------------------------------------------------------------
diff --git a/core/src/test/java/org/apache/accumulo/core/cli/TestClientOpts.java b/core/src/test/java/org/apache/accumulo/core/cli/TestClientOpts.java
index f0fdcca..65df5c9 100644
--- a/core/src/test/java/org/apache/accumulo/core/cli/TestClientOpts.java
+++ b/core/src/test/java/org/apache/accumulo/core/cli/TestClientOpts.java
@@ -263,5 +263,10 @@ public class TestClientOpts {
     public boolean equals(Object o) {
       return o instanceof EmptyToken;
     }
+
+    @Override
+    public int hashCode() {
+      return 0;
+    }
   }
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/CountingVerifyingReceiver.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/CountingVerifyingReceiver.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/CountingVerifyingReceiver.java
new file mode 100644
index 0000000..873f886
--- /dev/null
+++ b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/CountingVerifyingReceiver.java
@@ -0,0 +1,64 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.examples.simple.client;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+
+import java.util.Arrays;
+import java.util.HashMap;
+
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Value;
+import org.apache.hadoop.io.Text;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Internal class used to verify validity of data read.
+ */
+class CountingVerifyingReceiver {
+  private static final Logger log = LoggerFactory.getLogger(CountingVerifyingReceiver.class);
+
+  long count = 0;
+  int expectedValueSize = 0;
+  HashMap<Text,Boolean> expectedRows;
+
+  CountingVerifyingReceiver(HashMap<Text,Boolean> expectedRows, int expectedValueSize) {
+    this.expectedRows = expectedRows;
+    this.expectedValueSize = expectedValueSize;
+  }
+
+  public void receive(Key key, Value value) {
+
+    String row = key.getRow().toString();
+    long rowid = Integer.parseInt(row.split("_")[1]);
+
+    byte expectedValue[] = RandomBatchWriter.createValue(rowid, expectedValueSize);
+
+    if (!Arrays.equals(expectedValue, value.get())) {
+      log.error("Got unexpected value for " + key + " expected : " + new String(expectedValue, UTF_8) + " got : " + new String(value.get(), UTF_8));
+    }
+
+    if (!expectedRows.containsKey(key.getRow())) {
+      log.error("Got unexpected key " + key);
+    } else {
+      expectedRows.put(key.getRow(), true);
+    }
+
+    count++;
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/RandomBatchScanner.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/RandomBatchScanner.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/RandomBatchScanner.java
index 6f8b485..a43b97d 100644
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/RandomBatchScanner.java
+++ b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/RandomBatchScanner.java
@@ -16,10 +16,8 @@
  */
 package org.apache.accumulo.examples.simple.client;
 
-import static java.nio.charset.StandardCharsets.UTF_8;
 import static org.apache.accumulo.examples.simple.client.RandomBatchWriter.abs;
 
-import java.util.Arrays;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Map.Entry;
@@ -43,42 +41,6 @@ import org.slf4j.LoggerFactory;
 import com.beust.jcommander.Parameter;
 
 /**
- * Internal class used to verify validity of data read.
- */
-class CountingVerifyingReceiver {
-  private static final Logger log = LoggerFactory.getLogger(CountingVerifyingReceiver.class);
-
-  long count = 0;
-  int expectedValueSize = 0;
-  HashMap<Text,Boolean> expectedRows;
-
-  CountingVerifyingReceiver(HashMap<Text,Boolean> expectedRows, int expectedValueSize) {
-    this.expectedRows = expectedRows;
-    this.expectedValueSize = expectedValueSize;
-  }
-
-  public void receive(Key key, Value value) {
-
-    String row = key.getRow().toString();
-    long rowid = Integer.parseInt(row.split("_")[1]);
-
-    byte expectedValue[] = RandomBatchWriter.createValue(rowid, expectedValueSize);
-
-    if (!Arrays.equals(expectedValue, value.get())) {
-      log.error("Got unexpected value for " + key + " expected : " + new String(expectedValue, UTF_8) + " got : " + new String(value.get(), UTF_8));
-    }
-
-    if (!expectedRows.containsKey(key.getRow())) {
-      log.error("Got unexpected key " + key);
-    } else {
-      expectedRows.put(key.getRow(), true);
-    }
-
-    count++;
-  }
-}
-
-/**
  * Simple example for reading random batches of data from Accumulo. See docs/examples/README.batch for instructions.
  */
 public class RandomBatchScanner {

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 0bcc689..f680f84 100644
--- a/pom.xml
+++ b/pom.xml
@@ -946,6 +946,7 @@
                 <property name="eachLine" value="true" />
               </module>
               <module name="TreeWalker">
+                <module name="OneTopLevelClass" />
                 <module name="RegexpSinglelineJava">
                   <property name="format" value="\s+$" />
                   <property name="message" value="Line has trailing whitespace." />

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/server/master/src/main/java/org/apache/accumulo/master/tableOps/BulkImport.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/BulkImport.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/BulkImport.java
index 7f83988..031a80c 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/BulkImport.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/BulkImport.java
@@ -16,71 +16,34 @@
  */
 package org.apache.accumulo.master.tableOps;
 
-import static java.nio.charset.StandardCharsets.UTF_8;
-
-import java.io.BufferedReader;
-import java.io.BufferedWriter;
 import java.io.FileNotFoundException;
 import java.io.IOException;
-import java.io.InputStreamReader;
-import java.io.OutputStreamWriter;
 import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
 import java.util.List;
-import java.util.Map.Entry;
-import java.util.Set;
 import java.util.concurrent.Callable;
-import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Future;
-import java.util.concurrent.ThreadPoolExecutor;
 import java.util.concurrent.TimeUnit;
 
 import org.apache.accumulo.core.Constants;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.IsolatedScanner;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.impl.ServerClient;
 import org.apache.accumulo.core.client.impl.Tables;
-import org.apache.accumulo.core.client.impl.thrift.ClientService;
-import org.apache.accumulo.core.client.impl.thrift.ClientService.Client;
 import org.apache.accumulo.core.client.impl.thrift.TableOperation;
 import org.apache.accumulo.core.client.impl.thrift.TableOperationExceptionType;
 import org.apache.accumulo.core.client.impl.thrift.ThriftTableOperationException;
-import org.apache.accumulo.core.conf.AccumuloConfiguration;
 import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.data.impl.KeyExtent;
 import org.apache.accumulo.core.file.FileOperations;
 import org.apache.accumulo.core.master.state.tables.TableState;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.trace.Tracer;
-import org.apache.accumulo.core.util.Pair;
 import org.apache.accumulo.core.util.SimpleThreadPool;
 import org.apache.accumulo.core.util.UtilWaitThread;
 import org.apache.accumulo.fate.Repo;
 import org.apache.accumulo.master.Master;
 import org.apache.accumulo.server.ServerConstants;
-import org.apache.accumulo.server.fs.FileRef;
 import org.apache.accumulo.server.fs.VolumeManager;
-import org.apache.accumulo.server.master.LiveTServerSet.TServerConnection;
-import org.apache.accumulo.server.master.state.TServerInstance;
 import org.apache.accumulo.server.tablets.UniqueNameAllocator;
 import org.apache.accumulo.server.util.MetadataTableUtil;
-import org.apache.accumulo.server.zookeeper.DistributedWorkQueue;
 import org.apache.accumulo.server.zookeeper.TransactionWatcher.ZooArbitrator;
-import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.MapFile;
-import org.apache.hadoop.io.Text;
-import org.apache.htrace.wrappers.TraceExecutorService;
-import org.apache.thrift.TException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -302,329 +265,3 @@ public class BulkImport extends MasterRepo {
     Utils.getReadLock(tableId, tid).unlock();
   }
 }
-
-class CleanUpBulkImport extends MasterRepo {
-
-  private static final long serialVersionUID = 1L;
-
-  private static final Logger log = LoggerFactory.getLogger(CleanUpBulkImport.class);
-
-  private String tableId;
-  private String source;
-  private String bulk;
-  private String error;
-
-  public CleanUpBulkImport(String tableId, String source, String bulk, String error) {
-    this.tableId = tableId;
-    this.source = source;
-    this.bulk = bulk;
-    this.error = error;
-  }
-
-  @Override
-  public Repo<Master> call(long tid, Master master) throws Exception {
-    log.debug("removing the bulk processing flag file in " + bulk);
-    Path bulkDir = new Path(bulk);
-    MetadataTableUtil.removeBulkLoadInProgressFlag(master, "/" + bulkDir.getParent().getName() + "/" + bulkDir.getName());
-    MetadataTableUtil.addDeleteEntry(master, tableId, bulkDir.toString());
-    log.debug("removing the metadata table markers for loaded files");
-    Connector conn = master.getConnector();
-    MetadataTableUtil.removeBulkLoadEntries(conn, tableId, tid);
-    log.debug("releasing HDFS reservations for " + source + " and " + error);
-    Utils.unreserveHdfsDirectory(source, tid);
-    Utils.unreserveHdfsDirectory(error, tid);
-    Utils.getReadLock(tableId, tid).unlock();
-    log.debug("completing bulk import transaction " + tid);
-    ZooArbitrator.cleanup(Constants.BULK_ARBITRATOR_TYPE, tid);
-    return null;
-  }
-}
-
-class CompleteBulkImport extends MasterRepo {
-
-  private static final long serialVersionUID = 1L;
-
-  private String tableId;
-  private String source;
-  private String bulk;
-  private String error;
-
-  public CompleteBulkImport(String tableId, String source, String bulk, String error) {
-    this.tableId = tableId;
-    this.source = source;
-    this.bulk = bulk;
-    this.error = error;
-  }
-
-  @Override
-  public Repo<Master> call(long tid, Master master) throws Exception {
-    ZooArbitrator.stop(Constants.BULK_ARBITRATOR_TYPE, tid);
-    return new CopyFailed(tableId, source, bulk, error);
-  }
-}
-
-class CopyFailed extends MasterRepo {
-
-  private static final long serialVersionUID = 1L;
-
-  private String tableId;
-  private String source;
-  private String bulk;
-  private String error;
-
-  public CopyFailed(String tableId, String source, String bulk, String error) {
-    this.tableId = tableId;
-    this.source = source;
-    this.bulk = bulk;
-    this.error = error;
-  }
-
-  @Override
-  public long isReady(long tid, Master master) throws Exception {
-    Set<TServerInstance> finished = new HashSet<TServerInstance>();
-    Set<TServerInstance> running = master.onlineTabletServers();
-    for (TServerInstance server : running) {
-      try {
-        TServerConnection client = master.getConnection(server);
-        if (client != null && !client.isActive(tid))
-          finished.add(server);
-      } catch (TException ex) {
-        log.info("Ignoring error trying to check on tid " + tid + " from server " + server + ": " + ex);
-      }
-    }
-    if (finished.containsAll(running))
-      return 0;
-    return 500;
-  }
-
-  @Override
-  public Repo<Master> call(long tid, Master master) throws Exception {
-    // This needs to execute after the arbiter is stopped
-
-    VolumeManager fs = master.getFileSystem();
-
-    if (!fs.exists(new Path(error, BulkImport.FAILURES_TXT)))
-      return new CleanUpBulkImport(tableId, source, bulk, error);
-
-    HashMap<FileRef,String> failures = new HashMap<FileRef,String>();
-    HashMap<FileRef,String> loadedFailures = new HashMap<FileRef,String>();
-
-    try (BufferedReader in = new BufferedReader(new InputStreamReader(fs.open(new Path(error, BulkImport.FAILURES_TXT)), UTF_8))) {
-      String line = null;
-      while ((line = in.readLine()) != null) {
-        Path path = new Path(line);
-        if (!fs.exists(new Path(error, path.getName())))
-          failures.put(new FileRef(line, path), line);
-      }
-    }
-
-    /*
-     * I thought I could move files that have no file references in the table. However its possible a clone references a file. Therefore only move files that
-     * have no loaded markers.
-     */
-
-    // determine which failed files were loaded
-    Connector conn = master.getConnector();
-    Scanner mscanner = new IsolatedScanner(conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY));
-    mscanner.setRange(new KeyExtent(new Text(tableId), null, null).toMetadataRange());
-    mscanner.fetchColumnFamily(TabletsSection.BulkFileColumnFamily.NAME);
-
-    for (Entry<Key,Value> entry : mscanner) {
-      if (Long.parseLong(entry.getValue().toString()) == tid) {
-        FileRef loadedFile = new FileRef(fs, entry.getKey());
-        String absPath = failures.remove(loadedFile);
-        if (absPath != null) {
-          loadedFailures.put(loadedFile, absPath);
-        }
-      }
-    }
-
-    // move failed files that were not loaded
-    for (String failure : failures.values()) {
-      Path orig = new Path(failure);
-      Path dest = new Path(error, orig.getName());
-      fs.rename(orig, dest);
-      log.debug("tid " + tid + " renamed " + orig + " to " + dest + ": import failed");
-    }
-
-    if (loadedFailures.size() > 0) {
-      DistributedWorkQueue bifCopyQueue = new DistributedWorkQueue(Constants.ZROOT + "/" + master.getInstance().getInstanceID() + Constants.ZBULK_FAILED_COPYQ,
-          master.getConfiguration());
-
-      HashSet<String> workIds = new HashSet<String>();
-
-      for (String failure : loadedFailures.values()) {
-        Path orig = new Path(failure);
-        Path dest = new Path(error, orig.getName());
-
-        if (fs.exists(dest))
-          continue;
-
-        bifCopyQueue.addWork(orig.getName(), (failure + "," + dest).getBytes(UTF_8));
-        workIds.add(orig.getName());
-        log.debug("tid " + tid + " added to copyq: " + orig + " to " + dest + ": failed");
-      }
-
-      bifCopyQueue.waitUntilDone(workIds);
-    }
-
-    fs.deleteRecursively(new Path(error, BulkImport.FAILURES_TXT));
-    return new CleanUpBulkImport(tableId, source, bulk, error);
-  }
-
-}
-
-class LoadFiles extends MasterRepo {
-
-  private static final long serialVersionUID = 1L;
-
-  private static ExecutorService threadPool = null;
-  private static final Logger log = LoggerFactory.getLogger(BulkImport.class);
-
-  private String tableId;
-  private String source;
-  private String bulk;
-  private String errorDir;
-  private boolean setTime;
-
-  public LoadFiles(String tableId, String source, String bulk, String errorDir, boolean setTime) {
-    this.tableId = tableId;
-    this.source = source;
-    this.bulk = bulk;
-    this.errorDir = errorDir;
-    this.setTime = setTime;
-  }
-
-  @Override
-  public long isReady(long tid, Master master) throws Exception {
-    if (master.onlineTabletServers().size() == 0)
-      return 500;
-    return 0;
-  }
-
-  private static synchronized ExecutorService getThreadPool(Master master) {
-    if (threadPool == null) {
-      int threadPoolSize = master.getConfiguration().getCount(Property.MASTER_BULK_THREADPOOL_SIZE);
-      ThreadPoolExecutor pool = new SimpleThreadPool(threadPoolSize, "bulk import");
-      pool.allowCoreThreadTimeOut(true);
-      threadPool = new TraceExecutorService(pool);
-    }
-    return threadPool;
-  }
-
-  @Override
-  public Repo<Master> call(final long tid, final Master master) throws Exception {
-    ExecutorService executor = getThreadPool(master);
-    final AccumuloConfiguration conf = master.getConfiguration();
-    VolumeManager fs = master.getFileSystem();
-    List<FileStatus> files = new ArrayList<FileStatus>();
-    for (FileStatus entry : fs.listStatus(new Path(bulk))) {
-      files.add(entry);
-    }
-    log.debug("tid " + tid + " importing " + files.size() + " files");
-
-    Path writable = new Path(this.errorDir, ".iswritable");
-    if (!fs.createNewFile(writable)) {
-      // Maybe this is a re-try... clear the flag and try again
-      fs.delete(writable);
-      if (!fs.createNewFile(writable))
-        throw new ThriftTableOperationException(tableId, null, TableOperation.BULK_IMPORT, TableOperationExceptionType.BULK_BAD_ERROR_DIRECTORY,
-            "Unable to write to " + this.errorDir);
-    }
-    fs.delete(writable);
-
-    final Set<String> filesToLoad = Collections.synchronizedSet(new HashSet<String>());
-    for (FileStatus f : files)
-      filesToLoad.add(f.getPath().toString());
-
-    final int RETRIES = Math.max(1, conf.getCount(Property.MASTER_BULK_RETRIES));
-    for (int attempt = 0; attempt < RETRIES && filesToLoad.size() > 0; attempt++) {
-      List<Future<List<String>>> results = new ArrayList<Future<List<String>>>();
-
-      if (master.onlineTabletServers().size() == 0)
-        log.warn("There are no tablet server to process bulk import, waiting (tid = " + tid + ")");
-
-      while (master.onlineTabletServers().size() == 0) {
-        UtilWaitThread.sleep(500);
-      }
-
-      // Use the threadpool to assign files one-at-a-time to the server
-      final List<String> loaded = Collections.synchronizedList(new ArrayList<String>());
-      for (final String file : filesToLoad) {
-        results.add(executor.submit(new Callable<List<String>>() {
-          @Override
-          public List<String> call() {
-            List<String> failures = new ArrayList<String>();
-            ClientService.Client client = null;
-            String server = null;
-            try {
-              // get a connection to a random tablet server, do not prefer cached connections because
-              // this is running on the master and there are lots of connections to tablet servers
-              // serving the metadata tablets
-              long timeInMillis = master.getConfiguration().getTimeInMillis(Property.MASTER_BULK_TIMEOUT);
-              Pair<String,Client> pair = ServerClient.getConnection(master, false, timeInMillis);
-              client = pair.getSecond();
-              server = pair.getFirst();
-              List<String> attempt = Collections.singletonList(file);
-              log.debug("Asking " + pair.getFirst() + " to bulk import " + file);
-              List<String> fail = client.bulkImportFiles(Tracer.traceInfo(), master.rpcCreds(), tid, tableId, attempt, errorDir, setTime);
-              if (fail.isEmpty()) {
-                loaded.add(file);
-              } else {
-                failures.addAll(fail);
-              }
-            } catch (Exception ex) {
-              log.error("rpc failed server:" + server + ", tid:" + tid + " " + ex);
-            } finally {
-              ServerClient.close(client);
-            }
-            return failures;
-          }
-        }));
-      }
-      Set<String> failures = new HashSet<String>();
-      for (Future<List<String>> f : results)
-        failures.addAll(f.get());
-      filesToLoad.removeAll(loaded);
-      if (filesToLoad.size() > 0) {
-        log.debug("tid " + tid + " attempt " + (attempt + 1) + " " + sampleList(filesToLoad, 10) + " failed");
-        UtilWaitThread.sleep(100);
-      }
-    }
-
-    FSDataOutputStream failFile = fs.create(new Path(errorDir, BulkImport.FAILURES_TXT), true);
-    BufferedWriter out = new BufferedWriter(new OutputStreamWriter(failFile, UTF_8));
-    try {
-      for (String f : filesToLoad) {
-        out.write(f);
-        out.write("\n");
-      }
-    } finally {
-      out.close();
-    }
-
-    // return the next step, which will perform cleanup
-    return new CompleteBulkImport(tableId, source, bulk, errorDir);
-  }
-
-  static String sampleList(Collection<?> potentiallyLongList, int max) {
-    StringBuffer result = new StringBuffer();
-    result.append("[");
-    int i = 0;
-    for (Object obj : potentiallyLongList) {
-      result.append(obj);
-      if (i >= max) {
-        result.append("...");
-        break;
-      } else {
-        result.append(", ");
-      }
-      i++;
-    }
-    if (i < max)
-      result.delete(result.length() - 2, result.length());
-    result.append("]");
-    return result.toString();
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/server/master/src/main/java/org/apache/accumulo/master/tableOps/CancelCompactions.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CancelCompactions.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CancelCompactions.java
index 4f4b27e..e268f17 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CancelCompactions.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CancelCompactions.java
@@ -27,29 +27,6 @@ import org.apache.accumulo.fate.zookeeper.IZooReaderWriter.Mutator;
 import org.apache.accumulo.master.Master;
 import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
 
-class FinishCancelCompaction extends MasterRepo {
-  private static final long serialVersionUID = 1L;
-  private String tableId;
-
-  public FinishCancelCompaction(String tableId) {
-    this.tableId = tableId;
-  }
-
-  @Override
-  public Repo<Master> call(long tid, Master environment) throws Exception {
-    Utils.getReadLock(tableId, tid).unlock();
-    return null;
-  }
-
-  @Override
-  public void undo(long tid, Master environment) throws Exception {
-
-  }
-}
-
-/**
- *
- */
 public class CancelCompactions extends MasterRepo {
 
   private static final long serialVersionUID = 1L;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/server/master/src/main/java/org/apache/accumulo/master/tableOps/ChooseDir.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/ChooseDir.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/ChooseDir.java
new file mode 100644
index 0000000..3e1aa33
--- /dev/null
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/ChooseDir.java
@@ -0,0 +1,53 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.master.tableOps;
+
+import org.apache.accumulo.core.Constants;
+import org.apache.accumulo.fate.Repo;
+import org.apache.accumulo.master.Master;
+import org.apache.accumulo.server.ServerConstants;
+import org.apache.hadoop.fs.Path;
+
+import com.google.common.base.Optional;
+
+class ChooseDir extends MasterRepo {
+  private static final long serialVersionUID = 1L;
+
+  private TableInfo tableInfo;
+
+  ChooseDir(TableInfo ti) {
+    this.tableInfo = ti;
+  }
+
+  @Override
+  public long isReady(long tid, Master environment) throws Exception {
+    return 0;
+  }
+
+  @Override
+  public Repo<Master> call(long tid, Master master) throws Exception {
+    // Constants.DEFAULT_TABLET_LOCATION has a leading slash prepended to it so we don't need to add one here
+    tableInfo.dir = master.getFileSystem().choose(Optional.of(tableInfo.tableId), ServerConstants.getBaseUris()) + Constants.HDFS_TABLES_DIR + Path.SEPARATOR
+        + tableInfo.tableId + Constants.DEFAULT_TABLET_LOCATION;
+    return new CreateDir(tableInfo);
+  }
+
+  @Override
+  public void undo(long tid, Master master) throws Exception {
+
+  }
+}
\ No newline at end of file


[6/9] accumulo git commit: ACCUMULO-3759 Fix Java 8 compiler warnings

Posted by ct...@apache.org.
http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/server/master/src/main/java/org/apache/accumulo/master/tableOps/DeleteTable.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/DeleteTable.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/DeleteTable.java
index 05676e7..a1158f4 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/DeleteTable.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/DeleteTable.java
@@ -16,277 +16,12 @@
  */
 package org.apache.accumulo.master.tableOps;
 
-import java.io.IOException;
-import java.net.UnknownHostException;
-import java.util.Arrays;
-import java.util.Map.Entry;
-
-import org.apache.accumulo.core.client.BatchScanner;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.IteratorSetting;
-import org.apache.accumulo.core.client.Scanner;
 import org.apache.accumulo.core.client.impl.Tables;
 import org.apache.accumulo.core.client.impl.thrift.TableOperation;
-import org.apache.accumulo.core.client.impl.thrift.ThriftSecurityException;
-import org.apache.accumulo.core.conf.AccumuloConfiguration;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.data.impl.KeyExtent;
-import org.apache.accumulo.core.iterators.user.GrepIterator;
 import org.apache.accumulo.core.master.state.tables.TableState;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.volume.Volume;
 import org.apache.accumulo.fate.Repo;
 import org.apache.accumulo.master.Master;
-import org.apache.accumulo.server.ServerConstants;
-import org.apache.accumulo.server.fs.VolumeManager;
-import org.apache.accumulo.server.master.state.MetaDataTableScanner;
-import org.apache.accumulo.server.master.state.TabletLocationState;
-import org.apache.accumulo.server.master.state.TabletState;
-import org.apache.accumulo.server.problems.ProblemReports;
-import org.apache.accumulo.server.security.AuditedSecurityOperation;
 import org.apache.accumulo.server.tables.TableManager;
-import org.apache.accumulo.server.util.MetadataTableUtil;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.Text;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-class CleanUp extends MasterRepo {
-
-  final private static Logger log = LoggerFactory.getLogger(CleanUp.class);
-
-  private static final long serialVersionUID = 1L;
-
-  private String tableId, namespaceId;
-
-  private long creationTime;
-
-  private void readObject(java.io.ObjectInputStream in) throws IOException, ClassNotFoundException {
-    in.defaultReadObject();
-
-    /*
-     * handle the case where we start executing on a new machine where the current time is in the past relative to the previous machine
-     *
-     * if the new machine has time in the future, that will work ok w/ hasCycled
-     */
-    if (System.currentTimeMillis() < creationTime) {
-      creationTime = System.currentTimeMillis();
-    }
-
-  }
-
-  public CleanUp(String tableId, String namespaceId) {
-    this.tableId = tableId;
-    this.namespaceId = namespaceId;
-    creationTime = System.currentTimeMillis();
-  }
-
-  @Override
-  public long isReady(long tid, Master master) throws Exception {
-    if (!master.hasCycled(creationTime)) {
-      return 50;
-    }
-
-    boolean done = true;
-    Range tableRange = new KeyExtent(new Text(tableId), null, null).toMetadataRange();
-    Scanner scanner = master.getConnector().createScanner(MetadataTable.NAME, Authorizations.EMPTY);
-    MetaDataTableScanner.configureScanner(scanner, master);
-    scanner.setRange(tableRange);
-
-    for (Entry<Key,Value> entry : scanner) {
-      TabletLocationState locationState = MetaDataTableScanner.createTabletLocationState(entry.getKey(), entry.getValue());
-      TabletState state = locationState.getState(master.onlineTabletServers());
-      if (state.equals(TabletState.ASSIGNED) || state.equals(TabletState.HOSTED)) {
-        log.debug("Still waiting for table to be deleted: " + tableId + " locationState: " + locationState);
-        done = false;
-        break;
-      }
-    }
-
-    if (!done)
-      return 50;
-
-    return 0;
-  }
-
-  @Override
-  public Repo<Master> call(long tid, Master master) throws Exception {
-
-    master.clearMigrations(tableId);
-
-    int refCount = 0;
-
-    try {
-      // look for other tables that references this table's files
-      Connector conn = master.getConnector();
-      BatchScanner bs = conn.createBatchScanner(MetadataTable.NAME, Authorizations.EMPTY, 8);
-      try {
-        Range allTables = MetadataSchema.TabletsSection.getRange();
-        Range tableRange = MetadataSchema.TabletsSection.getRange(tableId);
-        Range beforeTable = new Range(allTables.getStartKey(), true, tableRange.getStartKey(), false);
-        Range afterTable = new Range(tableRange.getEndKey(), false, allTables.getEndKey(), true);
-        bs.setRanges(Arrays.asList(beforeTable, afterTable));
-        bs.fetchColumnFamily(DataFileColumnFamily.NAME);
-        IteratorSetting cfg = new IteratorSetting(40, "grep", GrepIterator.class);
-        GrepIterator.setTerm(cfg, "/" + tableId + "/");
-        bs.addScanIterator(cfg);
-
-        for (Entry<Key,Value> entry : bs) {
-          if (entry.getKey().getColumnQualifier().toString().contains("/" + tableId + "/")) {
-            refCount++;
-          }
-        }
-      } finally {
-        bs.close();
-      }
-
-    } catch (Exception e) {
-      refCount = -1;
-      log.error("Failed to scan " + MetadataTable.NAME + " looking for references to deleted table " + tableId, e);
-    }
-
-    // remove metadata table entries
-    try {
-      // Intentionally do not pass master lock. If master loses lock, this operation may complete before master can kill itself.
-      // If the master lock passed to deleteTable, it is possible that the delete mutations will be dropped. If the delete operations
-      // are dropped and the operation completes, then the deletes will not be repeated.
-      MetadataTableUtil.deleteTable(tableId, refCount != 0, master, null);
-    } catch (Exception e) {
-      log.error("error deleting " + tableId + " from metadata table", e);
-    }
-
-    // remove any problem reports the table may have
-    try {
-      ProblemReports.getInstance(master).deleteProblemReports(tableId);
-    } catch (Exception e) {
-      log.error("Failed to delete problem reports for table " + tableId, e);
-    }
-
-    if (refCount == 0) {
-      final AccumuloConfiguration conf = master.getConfiguration();
-      boolean archiveFiles = conf.getBoolean(Property.GC_FILE_ARCHIVE);
-
-      // delete the map files
-      try {
-        VolumeManager fs = master.getFileSystem();
-        for (String dir : ServerConstants.getTablesDirs()) {
-          if (archiveFiles) {
-            archiveFile(fs, dir, tableId);
-          } else {
-            fs.deleteRecursively(new Path(dir, tableId));
-          }
-        }
-      } catch (IOException e) {
-        log.error("Unable to remove deleted table directory", e);
-      } catch (IllegalArgumentException exception) {
-        if (exception.getCause() instanceof UnknownHostException) {
-          /* Thrown if HDFS encounters a DNS problem in some edge cases */
-          log.error("Unable to remove deleted table directory", exception);
-        } else {
-          throw exception;
-        }
-      }
-    }
-
-    // remove table from zookeeper
-    try {
-      TableManager.getInstance().removeTable(tableId);
-      Tables.clearCache(master.getInstance());
-    } catch (Exception e) {
-      log.error("Failed to find table id in zookeeper", e);
-    }
-
-    // remove any permissions associated with this table
-    try {
-      AuditedSecurityOperation.getInstance(master).deleteTable(master.rpcCreds(), tableId, namespaceId);
-    } catch (ThriftSecurityException e) {
-      log.error("{}", e.getMessage(), e);
-    }
-
-    Utils.unreserveTable(tableId, tid, true);
-    Utils.unreserveNamespace(namespaceId, tid, false);
-
-    LoggerFactory.getLogger(CleanUp.class).debug("Deleted table " + tableId);
-
-    return null;
-  }
-
-  protected void archiveFile(VolumeManager fs, String dir, String tableId) throws IOException {
-    Path tableDirectory = new Path(dir, tableId);
-    Volume v = fs.getVolumeByPath(tableDirectory);
-    String basePath = v.getBasePath();
-
-    // Path component of URI
-    String tableDirPath = tableDirectory.toUri().getPath();
-
-    // Just the suffix of the path (after the Volume's base path)
-    String tableDirSuffix = tableDirPath.substring(basePath.length());
-
-    // Remove a leading path separator char because Path will treat the "child" as an absolute path with it
-    if (Path.SEPARATOR_CHAR == tableDirSuffix.charAt(0)) {
-      if (tableDirSuffix.length() > 1) {
-        tableDirSuffix = tableDirSuffix.substring(1);
-      } else {
-        tableDirSuffix = "";
-      }
-    }
-
-    // Get the file archive directory on this volume
-    final Path fileArchiveDir = new Path(basePath, ServerConstants.FILE_ARCHIVE_DIR);
-
-    // Make sure it exists just to be safe
-    fs.mkdirs(fileArchiveDir);
-
-    // The destination to archive this table to
-    final Path destTableDir = new Path(fileArchiveDir, tableDirSuffix);
-
-    log.debug("Archiving " + tableDirectory + " to " + tableDirectory);
-
-    if (fs.exists(destTableDir)) {
-      merge(fs, tableDirectory, destTableDir);
-    } else {
-      fs.rename(tableDirectory, destTableDir);
-    }
-  }
-
-  protected void merge(VolumeManager fs, Path src, Path dest) throws IOException {
-    for (FileStatus child : fs.listStatus(src)) {
-      final String childName = child.getPath().getName();
-      final Path childInSrc = new Path(src, childName), childInDest = new Path(dest, childName);
-
-      if (child.isFile()) {
-        if (fs.exists(childInDest)) {
-          log.warn("File already exists in archive, ignoring. " + childInDest);
-        } else {
-          fs.rename(childInSrc, childInDest);
-        }
-      } else if (child.isDirectory()) {
-        if (fs.exists(childInDest)) {
-          // Recurse
-          merge(fs, childInSrc, childInDest);
-        } else {
-          fs.rename(childInSrc, childInDest);
-        }
-      } else {
-        // Symlinks shouldn't exist in table directories..
-        log.warn("Ignoring archiving of non file/directory: " + child);
-      }
-    }
-  }
-
-  @Override
-  public void undo(long tid, Master environment) throws Exception {
-    // nothing to do
-  }
-
-}
 
 public class DeleteTable extends MasterRepo {
 

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/server/master/src/main/java/org/apache/accumulo/master/tableOps/ExportInfo.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/ExportInfo.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/ExportInfo.java
new file mode 100644
index 0000000..d8f276a
--- /dev/null
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/ExportInfo.java
@@ -0,0 +1,29 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.master.tableOps;
+
+import java.io.Serializable;
+
+class ExportInfo implements Serializable {
+
+  private static final long serialVersionUID = 1L;
+
+  public String tableName;
+  public String tableID;
+  public String exportDir;
+  public String namespaceID;
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/server/master/src/main/java/org/apache/accumulo/master/tableOps/ExportTable.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/ExportTable.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/ExportTable.java
index e5b7e86..cd50a18 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/ExportTable.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/ExportTable.java
@@ -16,268 +16,11 @@
  */
 package org.apache.accumulo.master.tableOps;
 
-import static java.nio.charset.StandardCharsets.UTF_8;
-
-import java.io.BufferedOutputStream;
-import java.io.BufferedWriter;
-import java.io.DataOutputStream;
-import java.io.IOException;
-import java.io.OutputStreamWriter;
-import java.io.Serializable;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.zip.ZipEntry;
-import java.util.zip.ZipOutputStream;
-
-import org.apache.accumulo.core.Constants;
-import org.apache.accumulo.core.client.AccumuloException;
-import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.TableNotFoundException;
 import org.apache.accumulo.core.client.impl.Tables;
-import org.apache.accumulo.core.client.impl.thrift.TableOperation;
-import org.apache.accumulo.core.client.impl.thrift.TableOperationExceptionType;
-import org.apache.accumulo.core.client.impl.thrift.ThriftTableOperationException;
-import org.apache.accumulo.core.conf.AccumuloConfiguration;
-import org.apache.accumulo.core.conf.DefaultConfiguration;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.data.impl.KeyExtent;
-import org.apache.accumulo.core.master.state.tables.TableState;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.LogColumnFamily;
-import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.fate.Repo;
 import org.apache.accumulo.master.Master;
-import org.apache.accumulo.server.AccumuloServerContext;
-import org.apache.accumulo.server.ServerConstants;
 import org.apache.accumulo.server.client.HdfsZooInstance;
-import org.apache.accumulo.server.conf.TableConfiguration;
-import org.apache.accumulo.server.fs.VolumeManager;
-import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.Text;
-
-class ExportInfo implements Serializable {
-
-  private static final long serialVersionUID = 1L;
-
-  public String tableName;
-  public String tableID;
-  public String exportDir;
-  public String namespaceID;
-}
-
-class WriteExportFiles extends MasterRepo {
-
-  private static final long serialVersionUID = 1L;
-  private final ExportInfo tableInfo;
-
-  WriteExportFiles(ExportInfo tableInfo) {
-    this.tableInfo = tableInfo;
-  }
-
-  private void checkOffline(Connector conn) throws Exception {
-    if (Tables.getTableState(conn.getInstance(), tableInfo.tableID) != TableState.OFFLINE) {
-      Tables.clearCache(conn.getInstance());
-      if (Tables.getTableState(conn.getInstance(), tableInfo.tableID) != TableState.OFFLINE) {
-        throw new ThriftTableOperationException(tableInfo.tableID, tableInfo.tableName, TableOperation.EXPORT, TableOperationExceptionType.OTHER,
-            "Table is not offline");
-      }
-    }
-  }
-
-  @Override
-  public long isReady(long tid, Master master) throws Exception {
-
-    long reserved = Utils.reserveNamespace(tableInfo.namespaceID, tid, false, true, TableOperation.EXPORT)
-        + Utils.reserveTable(tableInfo.tableID, tid, false, true, TableOperation.EXPORT);
-    if (reserved > 0)
-      return reserved;
-
-    Connector conn = master.getConnector();
-
-    checkOffline(conn);
-
-    Scanner metaScanner = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
-    metaScanner.setRange(new KeyExtent(new Text(tableInfo.tableID), null, null).toMetadataRange());
-
-    // scan for locations
-    metaScanner.fetchColumnFamily(TabletsSection.CurrentLocationColumnFamily.NAME);
-    metaScanner.fetchColumnFamily(TabletsSection.FutureLocationColumnFamily.NAME);
-
-    if (metaScanner.iterator().hasNext()) {
-      return 500;
-    }
-
-    // use the same range to check for walogs that we used to check for hosted (or future hosted) tablets
-    // this is done as a separate scan after we check for locations, because walogs are okay only if there is no location
-    metaScanner.clearColumns();
-    metaScanner.fetchColumnFamily(LogColumnFamily.NAME);
-
-    if (metaScanner.iterator().hasNext()) {
-      throw new ThriftTableOperationException(tableInfo.tableID, tableInfo.tableName, TableOperation.EXPORT, TableOperationExceptionType.OTHER,
-          "Write ahead logs found for table");
-    }
-
-    return 0;
-  }
-
-  @Override
-  public Repo<Master> call(long tid, Master master) throws Exception {
-    try {
-      exportTable(master.getFileSystem(), master, tableInfo.tableName, tableInfo.tableID, tableInfo.exportDir);
-    } catch (IOException ioe) {
-      throw new ThriftTableOperationException(tableInfo.tableID, tableInfo.tableName, TableOperation.EXPORT, TableOperationExceptionType.OTHER,
-          "Failed to create export files " + ioe.getMessage());
-    }
-    Utils.unreserveNamespace(tableInfo.namespaceID, tid, false);
-    Utils.unreserveTable(tableInfo.tableID, tid, false);
-    Utils.unreserveHdfsDirectory(new Path(tableInfo.exportDir).toString(), tid);
-    return null;
-  }
-
-  @Override
-  public void undo(long tid, Master env) throws Exception {
-    Utils.unreserveNamespace(tableInfo.namespaceID, tid, false);
-    Utils.unreserveTable(tableInfo.tableID, tid, false);
-  }
-
-  public static void exportTable(VolumeManager fs, AccumuloServerContext context, String tableName, String tableID, String exportDir) throws Exception {
-
-    fs.mkdirs(new Path(exportDir));
-    Path exportMetaFilePath = fs.getVolumeByPath(new Path(exportDir)).getFileSystem().makeQualified(new Path(exportDir, Constants.EXPORT_FILE));
-
-    FSDataOutputStream fileOut = fs.create(exportMetaFilePath, false);
-    ZipOutputStream zipOut = new ZipOutputStream(fileOut);
-    BufferedOutputStream bufOut = new BufferedOutputStream(zipOut);
-    DataOutputStream dataOut = new DataOutputStream(bufOut);
-
-    try {
-
-      zipOut.putNextEntry(new ZipEntry(Constants.EXPORT_INFO_FILE));
-      OutputStreamWriter osw = new OutputStreamWriter(dataOut, UTF_8);
-      osw.append(ExportTable.EXPORT_VERSION_PROP + ":" + ExportTable.VERSION + "\n");
-      osw.append("srcInstanceName:" + context.getInstance().getInstanceName() + "\n");
-      osw.append("srcInstanceID:" + context.getInstance().getInstanceID() + "\n");
-      osw.append("srcZookeepers:" + context.getInstance().getZooKeepers() + "\n");
-      osw.append("srcTableName:" + tableName + "\n");
-      osw.append("srcTableID:" + tableID + "\n");
-      osw.append(ExportTable.DATA_VERSION_PROP + ":" + ServerConstants.DATA_VERSION + "\n");
-      osw.append("srcCodeVersion:" + Constants.VERSION + "\n");
-
-      osw.flush();
-      dataOut.flush();
-
-      exportConfig(context, tableID, zipOut, dataOut);
-      dataOut.flush();
-
-      Map<String,String> uniqueFiles = exportMetadata(fs, context, tableID, zipOut, dataOut);
-
-      dataOut.close();
-      dataOut = null;
-
-      createDistcpFile(fs, exportDir, exportMetaFilePath, uniqueFiles);
-
-    } finally {
-      if (dataOut != null)
-        dataOut.close();
-    }
-  }
-
-  private static void createDistcpFile(VolumeManager fs, String exportDir, Path exportMetaFilePath, Map<String,String> uniqueFiles) throws IOException {
-    BufferedWriter distcpOut = new BufferedWriter(new OutputStreamWriter(fs.create(new Path(exportDir, "distcp.txt"), false), UTF_8));
-
-    try {
-      for (String file : uniqueFiles.values()) {
-        distcpOut.append(file);
-        distcpOut.newLine();
-      }
-
-      distcpOut.append(exportMetaFilePath.toString());
-      distcpOut.newLine();
-
-      distcpOut.close();
-      distcpOut = null;
-
-    } finally {
-      if (distcpOut != null)
-        distcpOut.close();
-    }
-  }
-
-  private static Map<String,String> exportMetadata(VolumeManager fs, AccumuloServerContext context, String tableID, ZipOutputStream zipOut,
-      DataOutputStream dataOut) throws IOException, TableNotFoundException, AccumuloException, AccumuloSecurityException {
-    zipOut.putNextEntry(new ZipEntry(Constants.EXPORT_METADATA_FILE));
-
-    Map<String,String> uniqueFiles = new HashMap<String,String>();
-
-    Scanner metaScanner = context.getConnector().createScanner(MetadataTable.NAME, Authorizations.EMPTY);
-    metaScanner.fetchColumnFamily(DataFileColumnFamily.NAME);
-    TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.fetch(metaScanner);
-    TabletsSection.ServerColumnFamily.TIME_COLUMN.fetch(metaScanner);
-    metaScanner.setRange(new KeyExtent(new Text(tableID), null, null).toMetadataRange());
-
-    for (Entry<Key,Value> entry : metaScanner) {
-      entry.getKey().write(dataOut);
-      entry.getValue().write(dataOut);
-
-      if (entry.getKey().getColumnFamily().equals(DataFileColumnFamily.NAME)) {
-        String path = fs.getFullPath(entry.getKey()).toString();
-        String tokens[] = path.split("/");
-        if (tokens.length < 1) {
-          throw new RuntimeException("Illegal path " + path);
-        }
-
-        String filename = tokens[tokens.length - 1];
-
-        String existingPath = uniqueFiles.get(filename);
-        if (existingPath == null) {
-          uniqueFiles.put(filename, path);
-        } else if (!existingPath.equals(path)) {
-          // make sure file names are unique, should only apply for tables with file names generated by Accumulo 1.3 and earlier
-          throw new IOException("Cannot export table with nonunique file names " + filename + ". Major compact table.");
-        }
-
-      }
-    }
-    return uniqueFiles;
-  }
-
-  private static void exportConfig(AccumuloServerContext context, String tableID, ZipOutputStream zipOut, DataOutputStream dataOut) throws AccumuloException,
-      AccumuloSecurityException, TableNotFoundException, IOException {
-    Connector conn = context.getConnector();
-
-    DefaultConfiguration defaultConfig = AccumuloConfiguration.getDefaultConfiguration();
-    Map<String,String> siteConfig = conn.instanceOperations().getSiteConfiguration();
-    Map<String,String> systemConfig = conn.instanceOperations().getSystemConfiguration();
-
-    TableConfiguration tableConfig = context.getServerConfigurationFactory().getTableConfiguration(tableID);
-
-    OutputStreamWriter osw = new OutputStreamWriter(dataOut, UTF_8);
-
-    // only put props that are different than defaults and higher level configurations
-    zipOut.putNextEntry(new ZipEntry(Constants.EXPORT_TABLE_CONFIG_FILE));
-    for (Entry<String,String> prop : tableConfig) {
-      if (prop.getKey().startsWith(Property.TABLE_PREFIX.getKey())) {
-        Property key = Property.getPropertyByKey(prop.getKey());
-
-        if (key == null || !defaultConfig.get(key).equals(prop.getValue())) {
-          if (!prop.getValue().equals(siteConfig.get(prop.getKey())) && !prop.getValue().equals(systemConfig.get(prop.getKey()))) {
-            osw.append(prop.getKey() + "=" + prop.getValue() + "\n");
-          }
-        }
-      }
-    }
-
-    osw.flush();
-  }
-}
 
 public class ExportTable extends MasterRepo {
   private static final long serialVersionUID = 1L;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/server/master/src/main/java/org/apache/accumulo/master/tableOps/FinishCancelCompaction.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/FinishCancelCompaction.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/FinishCancelCompaction.java
new file mode 100644
index 0000000..a502a3d
--- /dev/null
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/FinishCancelCompaction.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.master.tableOps;
+
+import org.apache.accumulo.fate.Repo;
+import org.apache.accumulo.master.Master;
+
+class FinishCancelCompaction extends MasterRepo {
+  private static final long serialVersionUID = 1L;
+  private String tableId;
+
+  public FinishCancelCompaction(String tableId) {
+    this.tableId = tableId;
+  }
+
+  @Override
+  public Repo<Master> call(long tid, Master environment) throws Exception {
+    Utils.getReadLock(tableId, tid).unlock();
+    return null;
+  }
+
+  @Override
+  public void undo(long tid, Master environment) throws Exception {
+
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/server/master/src/main/java/org/apache/accumulo/master/tableOps/FinishCloneTable.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/FinishCloneTable.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/FinishCloneTable.java
new file mode 100644
index 0000000..7c3701b
--- /dev/null
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/FinishCloneTable.java
@@ -0,0 +1,64 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.master.tableOps;
+
+import org.apache.accumulo.core.master.state.tables.TableState;
+import org.apache.accumulo.fate.Repo;
+import org.apache.accumulo.master.Master;
+import org.apache.accumulo.server.tables.TableManager;
+import org.slf4j.LoggerFactory;
+
+class FinishCloneTable extends MasterRepo {
+
+  private static final long serialVersionUID = 1L;
+  private CloneInfo cloneInfo;
+
+  public FinishCloneTable(CloneInfo cloneInfo) {
+    this.cloneInfo = cloneInfo;
+  }
+
+  @Override
+  public long isReady(long tid, Master environment) throws Exception {
+    return 0;
+  }
+
+  @Override
+  public Repo<Master> call(long tid, Master environment) throws Exception {
+    // directories are intentionally not created.... this is done because directories should be unique
+    // because they occupy a different namespace than normal tablet directories... also some clones
+    // may never create files.. therefore there is no need to consume namenode space w/ directories
+    // that are not used... tablet will create directories as needed
+
+    TableManager.getInstance().transitionTableState(cloneInfo.tableId, TableState.ONLINE);
+
+    Utils.unreserveNamespace(cloneInfo.srcNamespaceId, tid, false);
+    if (!cloneInfo.srcNamespaceId.equals(cloneInfo.namespaceId))
+      Utils.unreserveNamespace(cloneInfo.namespaceId, tid, false);
+    Utils.unreserveTable(cloneInfo.srcTableId, tid, false);
+    Utils.unreserveTable(cloneInfo.tableId, tid, true);
+
+    environment.getEventCoordinator().event("Cloned table %s from %s", cloneInfo.tableName, cloneInfo.srcTableId);
+
+    LoggerFactory.getLogger(FinishCloneTable.class).debug("Cloned table " + cloneInfo.srcTableId + " " + cloneInfo.tableId + " " + cloneInfo.tableName);
+
+    return null;
+  }
+
+  @Override
+  public void undo(long tid, Master environment) throws Exception {}
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/server/master/src/main/java/org/apache/accumulo/master/tableOps/FinishCreateNamespace.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/FinishCreateNamespace.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/FinishCreateNamespace.java
new file mode 100644
index 0000000..93cc194
--- /dev/null
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/FinishCreateNamespace.java
@@ -0,0 +1,58 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.master.tableOps;
+
+import org.apache.accumulo.fate.Repo;
+import org.apache.accumulo.master.Master;
+import org.slf4j.LoggerFactory;
+
+class FinishCreateNamespace extends MasterRepo {
+
+  private static final long serialVersionUID = 1L;
+
+  private NamespaceInfo namespaceInfo;
+
+  public FinishCreateNamespace(NamespaceInfo ti) {
+    this.namespaceInfo = ti;
+  }
+
+  @Override
+  public long isReady(long tid, Master environment) throws Exception {
+    return 0;
+  }
+
+  @Override
+  public Repo<Master> call(long id, Master env) throws Exception {
+
+    Utils.unreserveNamespace(namespaceInfo.namespaceId, id, true);
+
+    env.getEventCoordinator().event("Created namespace %s ", namespaceInfo.namespaceName);
+
+    LoggerFactory.getLogger(FinishCreateNamespace.class).debug("Created table " + namespaceInfo.namespaceId + " " + namespaceInfo.namespaceName);
+
+    return null;
+  }
+
+  @Override
+  public String getReturn() {
+    return namespaceInfo.namespaceId;
+  }
+
+  @Override
+  public void undo(long tid, Master env) throws Exception {}
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/server/master/src/main/java/org/apache/accumulo/master/tableOps/FinishCreateTable.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/FinishCreateTable.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/FinishCreateTable.java
new file mode 100644
index 0000000..2343efb
--- /dev/null
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/FinishCreateTable.java
@@ -0,0 +1,62 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.master.tableOps;
+
+import org.apache.accumulo.core.master.state.tables.TableState;
+import org.apache.accumulo.fate.Repo;
+import org.apache.accumulo.master.Master;
+import org.apache.accumulo.server.tables.TableManager;
+import org.slf4j.LoggerFactory;
+
+class FinishCreateTable extends MasterRepo {
+
+  private static final long serialVersionUID = 1L;
+
+  private TableInfo tableInfo;
+
+  public FinishCreateTable(TableInfo ti) {
+    this.tableInfo = ti;
+  }
+
+  @Override
+  public long isReady(long tid, Master environment) throws Exception {
+    return 0;
+  }
+
+  @Override
+  public Repo<Master> call(long tid, Master env) throws Exception {
+    TableManager.getInstance().transitionTableState(tableInfo.tableId, TableState.ONLINE);
+
+    Utils.unreserveNamespace(tableInfo.namespaceId, tid, false);
+    Utils.unreserveTable(tableInfo.tableId, tid, true);
+
+    env.getEventCoordinator().event("Created table %s ", tableInfo.tableName);
+
+    LoggerFactory.getLogger(FinishCreateTable.class).debug("Created table " + tableInfo.tableId + " " + tableInfo.tableName);
+
+    return null;
+  }
+
+  @Override
+  public String getReturn() {
+    return tableInfo.tableId;
+  }
+
+  @Override
+  public void undo(long tid, Master env) throws Exception {}
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/server/master/src/main/java/org/apache/accumulo/master/tableOps/FinishImportTable.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/FinishImportTable.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/FinishImportTable.java
new file mode 100644
index 0000000..7dd76b1
--- /dev/null
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/FinishImportTable.java
@@ -0,0 +1,68 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.master.tableOps;
+
+import org.apache.accumulo.core.master.state.tables.TableState;
+import org.apache.accumulo.fate.Repo;
+import org.apache.accumulo.master.Master;
+import org.apache.accumulo.server.tables.TableManager;
+import org.apache.hadoop.fs.Path;
+import org.slf4j.LoggerFactory;
+
+class FinishImportTable extends MasterRepo {
+
+  private static final long serialVersionUID = 1L;
+
+  private ImportedTableInfo tableInfo;
+
+  public FinishImportTable(ImportedTableInfo ti) {
+    this.tableInfo = ti;
+  }
+
+  @Override
+  public long isReady(long tid, Master environment) throws Exception {
+    return 0;
+  }
+
+  @Override
+  public Repo<Master> call(long tid, Master env) throws Exception {
+
+    env.getFileSystem().deleteRecursively(new Path(tableInfo.importDir, "mappings.txt"));
+
+    TableManager.getInstance().transitionTableState(tableInfo.tableId, TableState.ONLINE);
+
+    Utils.unreserveNamespace(tableInfo.namespaceId, tid, false);
+    Utils.unreserveTable(tableInfo.tableId, tid, true);
+
+    Utils.unreserveHdfsDirectory(new Path(tableInfo.exportDir).toString(), tid);
+
+    env.getEventCoordinator().event("Imported table %s ", tableInfo.tableName);
+
+    LoggerFactory.getLogger(FinishImportTable.class).debug("Imported table " + tableInfo.tableId + " " + tableInfo.tableName);
+
+    return null;
+  }
+
+  @Override
+  public String getReturn() {
+    return tableInfo.tableId;
+  }
+
+  @Override
+  public void undo(long tid, Master env) throws Exception {}
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/server/master/src/main/java/org/apache/accumulo/master/tableOps/ImportPopulateZookeeper.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/ImportPopulateZookeeper.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/ImportPopulateZookeeper.java
new file mode 100644
index 0000000..f436fd3
--- /dev/null
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/ImportPopulateZookeeper.java
@@ -0,0 +1,104 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.master.tableOps;
+
+import java.io.IOException;
+import java.util.Map;
+import java.util.Map.Entry;
+
+import org.apache.accumulo.core.Constants;
+import org.apache.accumulo.core.client.Instance;
+import org.apache.accumulo.core.client.impl.Namespaces;
+import org.apache.accumulo.core.client.impl.TableOperationsImpl;
+import org.apache.accumulo.core.client.impl.Tables;
+import org.apache.accumulo.core.client.impl.thrift.TableOperation;
+import org.apache.accumulo.core.client.impl.thrift.TableOperationExceptionType;
+import org.apache.accumulo.core.client.impl.thrift.ThriftTableOperationException;
+import org.apache.accumulo.fate.Repo;
+import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeExistsPolicy;
+import org.apache.accumulo.master.Master;
+import org.apache.accumulo.server.fs.VolumeManager;
+import org.apache.accumulo.server.tables.TableManager;
+import org.apache.accumulo.server.util.TablePropUtil;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+
+class ImportPopulateZookeeper extends MasterRepo {
+
+  private static final long serialVersionUID = 1L;
+
+  private ImportedTableInfo tableInfo;
+
+  ImportPopulateZookeeper(ImportedTableInfo ti) {
+    this.tableInfo = ti;
+  }
+
+  @Override
+  public long isReady(long tid, Master environment) throws Exception {
+    return Utils.reserveTable(tableInfo.tableId, tid, true, false, TableOperation.IMPORT);
+  }
+
+  private Map<String,String> getExportedProps(VolumeManager fs) throws Exception {
+
+    Path path = new Path(tableInfo.exportDir, Constants.EXPORT_FILE);
+
+    try {
+      FileSystem ns = fs.getVolumeByPath(path).getFileSystem();
+      return TableOperationsImpl.getExportedProps(ns, path);
+    } catch (IOException ioe) {
+      throw new ThriftTableOperationException(tableInfo.tableId, tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER,
+          "Error reading table props from " + path + " " + ioe.getMessage());
+    }
+  }
+
+  @Override
+  public Repo<Master> call(long tid, Master env) throws Exception {
+    // reserve the table name in zookeeper or fail
+
+    Utils.tableNameLock.lock();
+    try {
+      // write tableName & tableId to zookeeper
+      Instance instance = env.getInstance();
+
+      Utils.checkTableDoesNotExist(instance, tableInfo.tableName, tableInfo.tableId, TableOperation.CREATE);
+
+      String namespace = Tables.qualify(tableInfo.tableName).getFirst();
+      String namespaceId = Namespaces.getNamespaceId(instance, namespace);
+      TableManager.getInstance().addTable(tableInfo.tableId, namespaceId, tableInfo.tableName, NodeExistsPolicy.OVERWRITE);
+
+      Tables.clearCache(instance);
+    } finally {
+      Utils.tableNameLock.unlock();
+    }
+
+    for (Entry<String,String> entry : getExportedProps(env.getFileSystem()).entrySet())
+      if (!TablePropUtil.setTableProperty(tableInfo.tableId, entry.getKey(), entry.getValue())) {
+        throw new ThriftTableOperationException(tableInfo.tableId, tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER,
+            "Invalid table property " + entry.getKey());
+      }
+
+    return new CreateImportDir(tableInfo);
+  }
+
+  @Override
+  public void undo(long tid, Master env) throws Exception {
+    Instance instance = env.getInstance();
+    TableManager.getInstance().removeTable(tableInfo.tableId);
+    Utils.unreserveTable(tableInfo.tableId, tid, true);
+    Tables.clearCache(instance);
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/server/master/src/main/java/org/apache/accumulo/master/tableOps/ImportSetupPermissions.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/ImportSetupPermissions.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/ImportSetupPermissions.java
new file mode 100644
index 0000000..00fade9
--- /dev/null
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/ImportSetupPermissions.java
@@ -0,0 +1,65 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.master.tableOps;
+
+import org.apache.accumulo.core.client.impl.thrift.ThriftSecurityException;
+import org.apache.accumulo.core.security.TablePermission;
+import org.apache.accumulo.fate.Repo;
+import org.apache.accumulo.master.Master;
+import org.apache.accumulo.server.security.AuditedSecurityOperation;
+import org.apache.accumulo.server.security.SecurityOperation;
+import org.slf4j.LoggerFactory;
+
+class ImportSetupPermissions extends MasterRepo {
+
+  private static final long serialVersionUID = 1L;
+
+  private ImportedTableInfo tableInfo;
+
+  public ImportSetupPermissions(ImportedTableInfo ti) {
+    this.tableInfo = ti;
+  }
+
+  @Override
+  public long isReady(long tid, Master environment) throws Exception {
+    return 0;
+  }
+
+  @Override
+  public Repo<Master> call(long tid, Master env) throws Exception {
+    // give all table permissions to the creator
+    SecurityOperation security = AuditedSecurityOperation.getInstance(env);
+    for (TablePermission permission : TablePermission.values()) {
+      try {
+        security.grantTablePermission(env.rpcCreds(), tableInfo.user, tableInfo.tableId, permission, tableInfo.namespaceId);
+      } catch (ThriftSecurityException e) {
+        LoggerFactory.getLogger(ImportSetupPermissions.class).error("{}", e.getMessage(), e);
+        throw e;
+      }
+    }
+
+    // setup permissions in zookeeper before table info in zookeeper
+    // this way concurrent users will not get a spurious permission denied
+    // error
+    return new ImportPopulateZookeeper(tableInfo);
+  }
+
+  @Override
+  public void undo(long tid, Master env) throws Exception {
+    AuditedSecurityOperation.getInstance(env).deleteTable(env.rpcCreds(), tableInfo.tableId, tableInfo.namespaceId);
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/server/master/src/main/java/org/apache/accumulo/master/tableOps/ImportTable.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/ImportTable.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/ImportTable.java
index 31bc52c..a90474f 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/ImportTable.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/ImportTable.java
@@ -18,542 +18,21 @@ package org.apache.accumulo.master.tableOps;
 
 import static java.nio.charset.StandardCharsets.UTF_8;
 
-import java.io.BufferedInputStream;
 import java.io.BufferedReader;
-import java.io.BufferedWriter;
-import java.io.DataInputStream;
 import java.io.IOException;
 import java.io.InputStreamReader;
-import java.io.OutputStreamWriter;
-import java.io.Serializable;
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Map.Entry;
 import java.util.zip.ZipEntry;
 import java.util.zip.ZipInputStream;
 
 import org.apache.accumulo.core.Constants;
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
 import org.apache.accumulo.core.client.Instance;
-import org.apache.accumulo.core.client.impl.Namespaces;
-import org.apache.accumulo.core.client.impl.TableOperationsImpl;
-import org.apache.accumulo.core.client.impl.Tables;
 import org.apache.accumulo.core.client.impl.thrift.TableOperation;
 import org.apache.accumulo.core.client.impl.thrift.TableOperationExceptionType;
-import org.apache.accumulo.core.client.impl.thrift.ThriftSecurityException;
 import org.apache.accumulo.core.client.impl.thrift.ThriftTableOperationException;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.data.impl.KeyExtent;
-import org.apache.accumulo.core.file.FileOperations;
-import org.apache.accumulo.core.master.state.tables.TableState;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
-import org.apache.accumulo.core.security.TablePermission;
-import org.apache.accumulo.core.util.FastFormat;
 import org.apache.accumulo.fate.Repo;
-import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeExistsPolicy;
 import org.apache.accumulo.master.Master;
 import org.apache.accumulo.server.ServerConstants;
-import org.apache.accumulo.server.fs.VolumeManager;
-import org.apache.accumulo.server.security.AuditedSecurityOperation;
-import org.apache.accumulo.server.security.SecurityOperation;
-import org.apache.accumulo.server.tables.TableManager;
-import org.apache.accumulo.server.tablets.UniqueNameAllocator;
-import org.apache.accumulo.server.util.MetadataTableUtil;
-import org.apache.accumulo.server.util.TablePropUtil;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.Text;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.base.Optional;
-
-/**
- *
- */
-class ImportedTableInfo implements Serializable {
-
-  private static final long serialVersionUID = 1L;
-
-  public String exportDir;
-  public String user;
-  public String tableName;
-  public String tableId;
-  public String importDir;
-  public String namespaceId;
-}
-
-class FinishImportTable extends MasterRepo {
-
-  private static final long serialVersionUID = 1L;
-
-  private ImportedTableInfo tableInfo;
-
-  public FinishImportTable(ImportedTableInfo ti) {
-    this.tableInfo = ti;
-  }
-
-  @Override
-  public long isReady(long tid, Master environment) throws Exception {
-    return 0;
-  }
-
-  @Override
-  public Repo<Master> call(long tid, Master env) throws Exception {
-
-    env.getFileSystem().deleteRecursively(new Path(tableInfo.importDir, "mappings.txt"));
-
-    TableManager.getInstance().transitionTableState(tableInfo.tableId, TableState.ONLINE);
-
-    Utils.unreserveNamespace(tableInfo.namespaceId, tid, false);
-    Utils.unreserveTable(tableInfo.tableId, tid, true);
-
-    Utils.unreserveHdfsDirectory(new Path(tableInfo.exportDir).toString(), tid);
-
-    env.getEventCoordinator().event("Imported table %s ", tableInfo.tableName);
-
-    LoggerFactory.getLogger(FinishImportTable.class).debug("Imported table " + tableInfo.tableId + " " + tableInfo.tableName);
-
-    return null;
-  }
-
-  @Override
-  public String getReturn() {
-    return tableInfo.tableId;
-  }
-
-  @Override
-  public void undo(long tid, Master env) throws Exception {}
-
-}
-
-class MoveExportedFiles extends MasterRepo {
-
-  private static final long serialVersionUID = 1L;
-
-  private ImportedTableInfo tableInfo;
-
-  MoveExportedFiles(ImportedTableInfo ti) {
-    this.tableInfo = ti;
-  }
-
-  @Override
-  public Repo<Master> call(long tid, Master master) throws Exception {
-    try {
-      VolumeManager fs = master.getFileSystem();
-
-      Map<String,String> fileNameMappings = PopulateMetadataTable.readMappingFile(fs, tableInfo);
-
-      for (String oldFileName : fileNameMappings.keySet()) {
-        if (!fs.exists(new Path(tableInfo.exportDir, oldFileName))) {
-          throw new ThriftTableOperationException(tableInfo.tableId, tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER,
-              "File referenced by exported table does not exists " + oldFileName);
-        }
-      }
-
-      FileStatus[] files = fs.listStatus(new Path(tableInfo.exportDir));
-
-      for (FileStatus fileStatus : files) {
-        String newName = fileNameMappings.get(fileStatus.getPath().getName());
-
-        if (newName != null)
-          fs.rename(fileStatus.getPath(), new Path(tableInfo.importDir, newName));
-      }
-
-      return new FinishImportTable(tableInfo);
-    } catch (IOException ioe) {
-      log.warn("{}", ioe.getMessage(), ioe);
-      throw new ThriftTableOperationException(tableInfo.tableId, tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER,
-          "Error renaming files " + ioe.getMessage());
-    }
-  }
-}
-
-class PopulateMetadataTable extends MasterRepo {
-
-  private static final long serialVersionUID = 1L;
-
-  private ImportedTableInfo tableInfo;
-
-  PopulateMetadataTable(ImportedTableInfo ti) {
-    this.tableInfo = ti;
-  }
-
-  static Map<String,String> readMappingFile(VolumeManager fs, ImportedTableInfo tableInfo) throws Exception {
-    BufferedReader in = new BufferedReader(new InputStreamReader(fs.open(new Path(tableInfo.importDir, "mappings.txt")), UTF_8));
-
-    try {
-      Map<String,String> map = new HashMap<String,String>();
-
-      String line = null;
-      while ((line = in.readLine()) != null) {
-        String sa[] = line.split(":", 2);
-        map.put(sa[0], sa[1]);
-      }
-
-      return map;
-    } finally {
-      in.close();
-    }
-
-  }
-
-  @Override
-  public Repo<Master> call(long tid, Master master) throws Exception {
-
-    Path path = new Path(tableInfo.exportDir, Constants.EXPORT_FILE);
-
-    BatchWriter mbw = null;
-    ZipInputStream zis = null;
-
-    try {
-      VolumeManager fs = master.getFileSystem();
-
-      mbw = master.getConnector().createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
-
-      zis = new ZipInputStream(fs.open(path));
-
-      Map<String,String> fileNameMappings = readMappingFile(fs, tableInfo);
-
-      log.info("importDir is " + tableInfo.importDir);
-
-      // This is a directory already prefixed with proper volume information e.g. hdfs://localhost:8020/path/to/accumulo/tables/...
-      final String bulkDir = tableInfo.importDir;
-
-      final String[] tableDirs = ServerConstants.getTablesDirs();
-
-      ZipEntry zipEntry;
-      while ((zipEntry = zis.getNextEntry()) != null) {
-        if (zipEntry.getName().equals(Constants.EXPORT_METADATA_FILE)) {
-          DataInputStream in = new DataInputStream(new BufferedInputStream(zis));
-
-          Key key = new Key();
-          Value val = new Value();
-
-          Mutation m = null;
-          Text currentRow = null;
-          int dirCount = 0;
-
-          while (true) {
-            key.readFields(in);
-            val.readFields(in);
-
-            Text endRow = new KeyExtent(key.getRow(), (Text) null).getEndRow();
-            Text metadataRow = new KeyExtent(new Text(tableInfo.tableId), endRow, null).getMetadataEntry();
-
-            Text cq;
-
-            if (key.getColumnFamily().equals(DataFileColumnFamily.NAME)) {
-              String oldName = new Path(key.getColumnQualifier().toString()).getName();
-              String newName = fileNameMappings.get(oldName);
-
-              if (newName == null) {
-                throw new ThriftTableOperationException(tableInfo.tableId, tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER,
-                    "File " + oldName + " does not exist in import dir");
-              }
-
-              cq = new Text(bulkDir + "/" + newName);
-            } else {
-              cq = key.getColumnQualifier();
-            }
-
-            if (m == null) {
-              // Make a unique directory inside the table's dir. Cannot import multiple tables into one table, so don't need to use unique allocator
-              String tabletDir = new String(FastFormat.toZeroPaddedString(dirCount++, 8, 16, Constants.CLONE_PREFIX_BYTES), UTF_8);
-
-              // Build up a full hdfs://localhost:8020/accumulo/tables/$id/c-XXXXXXX
-              String absolutePath = getClonedTabletDir(master, tableDirs, tabletDir);
-
-              m = new Mutation(metadataRow);
-              TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.put(m, new Value(absolutePath.getBytes(UTF_8)));
-              currentRow = metadataRow;
-            }
-
-            if (!currentRow.equals(metadataRow)) {
-              mbw.addMutation(m);
-
-              // Make a unique directory inside the table's dir. Cannot import multiple tables into one table, so don't need to use unique allocator
-              String tabletDir = new String(FastFormat.toZeroPaddedString(dirCount++, 8, 16, Constants.CLONE_PREFIX_BYTES), UTF_8);
-
-              // Build up a full hdfs://localhost:8020/accumulo/tables/$id/c-XXXXXXX
-              String absolutePath = getClonedTabletDir(master, tableDirs, tabletDir);
-
-              m = new Mutation(metadataRow);
-              TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.put(m, new Value(absolutePath.getBytes(UTF_8)));
-            }
-
-            m.put(key.getColumnFamily(), cq, val);
-
-            if (endRow == null && TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.hasColumns(key)) {
-              mbw.addMutation(m);
-              break; // its the last column in the last row
-            }
-          }
-
-          break;
-        }
-      }
-
-      return new MoveExportedFiles(tableInfo);
-    } catch (IOException ioe) {
-      log.warn("{}", ioe.getMessage(), ioe);
-      throw new ThriftTableOperationException(tableInfo.tableId, tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER,
-          "Error reading " + path + " " + ioe.getMessage());
-    } finally {
-      if (zis != null) {
-        try {
-          zis.close();
-        } catch (IOException ioe) {
-          log.warn("Failed to close zip file ", ioe);
-        }
-      }
-
-      if (mbw != null) {
-        mbw.close();
-      }
-    }
-  }
-
-  /**
-   * Given options for tables (across multiple volumes), construct an absolute path using the unique name within the chosen volume
-   *
-   * @return An absolute, unique path for the imported table
-   */
-  protected String getClonedTabletDir(Master master, String[] tableDirs, String tabletDir) {
-    // We can try to spread out the tablet dirs across all volumes
-    String tableDir = master.getFileSystem().choose(Optional.of(tableInfo.tableId), tableDirs);
-
-    // Build up a full hdfs://localhost:8020/accumulo/tables/$id/c-XXXXXXX
-    return tableDir + "/" + tableInfo.tableId + "/" + tabletDir;
-  }
-
-  @Override
-  public void undo(long tid, Master environment) throws Exception {
-    MetadataTableUtil.deleteTable(tableInfo.tableId, false, environment, environment.getMasterLock());
-  }
-}
-
-class MapImportFileNames extends MasterRepo {
-
-  private static final long serialVersionUID = 1L;
-
-  private ImportedTableInfo tableInfo;
-
-  MapImportFileNames(ImportedTableInfo ti) {
-    this.tableInfo = ti;
-  }
-
-  @Override
-  public Repo<Master> call(long tid, Master environment) throws Exception {
-
-    Path path = new Path(tableInfo.importDir, "mappings.txt");
-
-    BufferedWriter mappingsWriter = null;
-
-    try {
-      VolumeManager fs = environment.getFileSystem();
-
-      fs.mkdirs(new Path(tableInfo.importDir));
-
-      FileStatus[] files = fs.listStatus(new Path(tableInfo.exportDir));
-
-      UniqueNameAllocator namer = UniqueNameAllocator.getInstance();
-
-      mappingsWriter = new BufferedWriter(new OutputStreamWriter(fs.create(path), UTF_8));
-
-      for (FileStatus fileStatus : files) {
-        String fileName = fileStatus.getPath().getName();
-        log.info("filename " + fileStatus.getPath().toString());
-        String sa[] = fileName.split("\\.");
-        String extension = "";
-        if (sa.length > 1) {
-          extension = sa[sa.length - 1];
-
-          if (!FileOperations.getValidExtensions().contains(extension)) {
-            continue;
-          }
-        } else {
-          // assume it is a map file
-          extension = Constants.MAPFILE_EXTENSION;
-        }
-
-        String newName = "I" + namer.getNextName() + "." + extension;
-
-        mappingsWriter.append(fileName);
-        mappingsWriter.append(':');
-        mappingsWriter.append(newName);
-        mappingsWriter.newLine();
-      }
-
-      mappingsWriter.close();
-      mappingsWriter = null;
-
-      return new PopulateMetadataTable(tableInfo);
-    } catch (IOException ioe) {
-      log.warn("{}", ioe.getMessage(), ioe);
-      throw new ThriftTableOperationException(tableInfo.tableId, tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER,
-          "Error writing mapping file " + path + " " + ioe.getMessage());
-    } finally {
-      if (mappingsWriter != null)
-        try {
-          mappingsWriter.close();
-        } catch (IOException ioe) {
-          log.warn("Failed to close " + path, ioe);
-        }
-    }
-  }
-
-  @Override
-  public void undo(long tid, Master env) throws Exception {
-    env.getFileSystem().deleteRecursively(new Path(tableInfo.importDir));
-  }
-}
-
-class CreateImportDir extends MasterRepo {
-  private static final Logger log = LoggerFactory.getLogger(CreateImportDir.class);
-  private static final long serialVersionUID = 1L;
-
-  private ImportedTableInfo tableInfo;
-
-  CreateImportDir(ImportedTableInfo ti) {
-    this.tableInfo = ti;
-  }
-
-  @Override
-  public Repo<Master> call(long tid, Master master) throws Exception {
-
-    UniqueNameAllocator namer = UniqueNameAllocator.getInstance();
-
-    Path exportDir = new Path(tableInfo.exportDir);
-    String[] tableDirs = ServerConstants.getTablesDirs();
-
-    log.info("Looking for matching filesystem for " + exportDir + " from options " + Arrays.toString(tableDirs));
-    Path base = master.getFileSystem().matchingFileSystem(exportDir, tableDirs);
-    log.info("Chose base table directory of " + base);
-    Path directory = new Path(base, tableInfo.tableId);
-
-    Path newBulkDir = new Path(directory, Constants.BULK_PREFIX + namer.getNextName());
-
-    tableInfo.importDir = newBulkDir.toString();
-
-    log.info("Using import dir: " + tableInfo.importDir);
-
-    return new MapImportFileNames(tableInfo);
-  }
-}
-
-class ImportPopulateZookeeper extends MasterRepo {
-
-  private static final long serialVersionUID = 1L;
-
-  private ImportedTableInfo tableInfo;
-
-  ImportPopulateZookeeper(ImportedTableInfo ti) {
-    this.tableInfo = ti;
-  }
-
-  @Override
-  public long isReady(long tid, Master environment) throws Exception {
-    return Utils.reserveTable(tableInfo.tableId, tid, true, false, TableOperation.IMPORT);
-  }
-
-  private Map<String,String> getExportedProps(VolumeManager fs) throws Exception {
-
-    Path path = new Path(tableInfo.exportDir, Constants.EXPORT_FILE);
-
-    try {
-      FileSystem ns = fs.getVolumeByPath(path).getFileSystem();
-      return TableOperationsImpl.getExportedProps(ns, path);
-    } catch (IOException ioe) {
-      throw new ThriftTableOperationException(tableInfo.tableId, tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER,
-          "Error reading table props from " + path + " " + ioe.getMessage());
-    }
-  }
-
-  @Override
-  public Repo<Master> call(long tid, Master env) throws Exception {
-    // reserve the table name in zookeeper or fail
-
-    Utils.tableNameLock.lock();
-    try {
-      // write tableName & tableId to zookeeper
-      Instance instance = env.getInstance();
-
-      Utils.checkTableDoesNotExist(instance, tableInfo.tableName, tableInfo.tableId, TableOperation.CREATE);
-
-      String namespace = Tables.qualify(tableInfo.tableName).getFirst();
-      String namespaceId = Namespaces.getNamespaceId(instance, namespace);
-      TableManager.getInstance().addTable(tableInfo.tableId, namespaceId, tableInfo.tableName, NodeExistsPolicy.OVERWRITE);
-
-      Tables.clearCache(instance);
-    } finally {
-      Utils.tableNameLock.unlock();
-    }
-
-    for (Entry<String,String> entry : getExportedProps(env.getFileSystem()).entrySet())
-      if (!TablePropUtil.setTableProperty(tableInfo.tableId, entry.getKey(), entry.getValue())) {
-        throw new ThriftTableOperationException(tableInfo.tableId, tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER,
-            "Invalid table property " + entry.getKey());
-      }
-
-    return new CreateImportDir(tableInfo);
-  }
-
-  @Override
-  public void undo(long tid, Master env) throws Exception {
-    Instance instance = env.getInstance();
-    TableManager.getInstance().removeTable(tableInfo.tableId);
-    Utils.unreserveTable(tableInfo.tableId, tid, true);
-    Tables.clearCache(instance);
-  }
-}
-
-class ImportSetupPermissions extends MasterRepo {
-
-  private static final long serialVersionUID = 1L;
-
-  private ImportedTableInfo tableInfo;
-
-  public ImportSetupPermissions(ImportedTableInfo ti) {
-    this.tableInfo = ti;
-  }
-
-  @Override
-  public long isReady(long tid, Master environment) throws Exception {
-    return 0;
-  }
-
-  @Override
-  public Repo<Master> call(long tid, Master env) throws Exception {
-    // give all table permissions to the creator
-    SecurityOperation security = AuditedSecurityOperation.getInstance(env);
-    for (TablePermission permission : TablePermission.values()) {
-      try {
-        security.grantTablePermission(env.rpcCreds(), tableInfo.user, tableInfo.tableId, permission, tableInfo.namespaceId);
-      } catch (ThriftSecurityException e) {
-        LoggerFactory.getLogger(ImportSetupPermissions.class).error("{}", e.getMessage(), e);
-        throw e;
-      }
-    }
-
-    // setup permissions in zookeeper before table info in zookeeper
-    // this way concurrent users will not get a spurious permission denied
-    // error
-    return new ImportPopulateZookeeper(tableInfo);
-  }
-
-  @Override
-  public void undo(long tid, Master env) throws Exception {
-    AuditedSecurityOperation.getInstance(env).deleteTable(env.rpcCreds(), tableInfo.tableId, tableInfo.namespaceId);
-  }
-}
 
 public class ImportTable extends MasterRepo {
   private static final long serialVersionUID = 1L;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/server/master/src/main/java/org/apache/accumulo/master/tableOps/ImportedTableInfo.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/ImportedTableInfo.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/ImportedTableInfo.java
new file mode 100644
index 0000000..34bb6c8
--- /dev/null
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/ImportedTableInfo.java
@@ -0,0 +1,31 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.master.tableOps;
+
+import java.io.Serializable;
+
+class ImportedTableInfo implements Serializable {
+
+  private static final long serialVersionUID = 1L;
+
+  public String exportDir;
+  public String user;
+  public String tableName;
+  public String tableId;
+  public String importDir;
+  public String namespaceId;
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/server/master/src/main/java/org/apache/accumulo/master/tableOps/LoadFiles.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/LoadFiles.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/LoadFiles.java
new file mode 100644
index 0000000..c478a5d
--- /dev/null
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/LoadFiles.java
@@ -0,0 +1,209 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.master.tableOps;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+
+import java.io.BufferedWriter;
+import java.io.OutputStreamWriter;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Future;
+import java.util.concurrent.ThreadPoolExecutor;
+
+import org.apache.accumulo.core.client.impl.ServerClient;
+import org.apache.accumulo.core.client.impl.thrift.ClientService;
+import org.apache.accumulo.core.client.impl.thrift.TableOperation;
+import org.apache.accumulo.core.client.impl.thrift.TableOperationExceptionType;
+import org.apache.accumulo.core.client.impl.thrift.ThriftTableOperationException;
+import org.apache.accumulo.core.client.impl.thrift.ClientService.Client;
+import org.apache.accumulo.core.conf.AccumuloConfiguration;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.trace.Tracer;
+import org.apache.accumulo.core.util.Pair;
+import org.apache.accumulo.core.util.SimpleThreadPool;
+import org.apache.accumulo.core.util.UtilWaitThread;
+import org.apache.accumulo.fate.Repo;
+import org.apache.accumulo.master.Master;
+import org.apache.accumulo.server.fs.VolumeManager;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.Path;
+import org.apache.htrace.wrappers.TraceExecutorService;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+class LoadFiles extends MasterRepo {
+
+  private static final long serialVersionUID = 1L;
+
+  private static ExecutorService threadPool = null;
+  private static final Logger log = LoggerFactory.getLogger(BulkImport.class);
+
+  private String tableId;
+  private String source;
+  private String bulk;
+  private String errorDir;
+  private boolean setTime;
+
+  public LoadFiles(String tableId, String source, String bulk, String errorDir, boolean setTime) {
+    this.tableId = tableId;
+    this.source = source;
+    this.bulk = bulk;
+    this.errorDir = errorDir;
+    this.setTime = setTime;
+  }
+
+  @Override
+  public long isReady(long tid, Master master) throws Exception {
+    if (master.onlineTabletServers().size() == 0)
+      return 500;
+    return 0;
+  }
+
+  private static synchronized ExecutorService getThreadPool(Master master) {
+    if (threadPool == null) {
+      int threadPoolSize = master.getConfiguration().getCount(Property.MASTER_BULK_THREADPOOL_SIZE);
+      ThreadPoolExecutor pool = new SimpleThreadPool(threadPoolSize, "bulk import");
+      pool.allowCoreThreadTimeOut(true);
+      threadPool = new TraceExecutorService(pool);
+    }
+    return threadPool;
+  }
+
+  @Override
+  public Repo<Master> call(final long tid, final Master master) throws Exception {
+    ExecutorService executor = getThreadPool(master);
+    final AccumuloConfiguration conf = master.getConfiguration();
+    VolumeManager fs = master.getFileSystem();
+    List<FileStatus> files = new ArrayList<FileStatus>();
+    for (FileStatus entry : fs.listStatus(new Path(bulk))) {
+      files.add(entry);
+    }
+    log.debug("tid " + tid + " importing " + files.size() + " files");
+
+    Path writable = new Path(this.errorDir, ".iswritable");
+    if (!fs.createNewFile(writable)) {
+      // Maybe this is a re-try... clear the flag and try again
+      fs.delete(writable);
+      if (!fs.createNewFile(writable))
+        throw new ThriftTableOperationException(tableId, null, TableOperation.BULK_IMPORT, TableOperationExceptionType.BULK_BAD_ERROR_DIRECTORY,
+            "Unable to write to " + this.errorDir);
+    }
+    fs.delete(writable);
+
+    final Set<String> filesToLoad = Collections.synchronizedSet(new HashSet<String>());
+    for (FileStatus f : files)
+      filesToLoad.add(f.getPath().toString());
+
+    final int RETRIES = Math.max(1, conf.getCount(Property.MASTER_BULK_RETRIES));
+    for (int attempt = 0; attempt < RETRIES && filesToLoad.size() > 0; attempt++) {
+      List<Future<List<String>>> results = new ArrayList<Future<List<String>>>();
+
+      if (master.onlineTabletServers().size() == 0)
+        log.warn("There are no tablet server to process bulk import, waiting (tid = " + tid + ")");
+
+      while (master.onlineTabletServers().size() == 0) {
+        UtilWaitThread.sleep(500);
+      }
+
+      // Use the threadpool to assign files one-at-a-time to the server
+      final List<String> loaded = Collections.synchronizedList(new ArrayList<String>());
+      for (final String file : filesToLoad) {
+        results.add(executor.submit(new Callable<List<String>>() {
+          @Override
+          public List<String> call() {
+            List<String> failures = new ArrayList<String>();
+            ClientService.Client client = null;
+            String server = null;
+            try {
+              // get a connection to a random tablet server, do not prefer cached connections because
+              // this is running on the master and there are lots of connections to tablet servers
+              // serving the metadata tablets
+              long timeInMillis = master.getConfiguration().getTimeInMillis(Property.MASTER_BULK_TIMEOUT);
+              Pair<String,Client> pair = ServerClient.getConnection(master, false, timeInMillis);
+              client = pair.getSecond();
+              server = pair.getFirst();
+              List<String> attempt = Collections.singletonList(file);
+              log.debug("Asking " + pair.getFirst() + " to bulk import " + file);
+              List<String> fail = client.bulkImportFiles(Tracer.traceInfo(), master.rpcCreds(), tid, tableId, attempt, errorDir, setTime);
+              if (fail.isEmpty()) {
+                loaded.add(file);
+              } else {
+                failures.addAll(fail);
+              }
+            } catch (Exception ex) {
+              log.error("rpc failed server:" + server + ", tid:" + tid + " " + ex);
+            } finally {
+              ServerClient.close(client);
+            }
+            return failures;
+          }
+        }));
+      }
+      Set<String> failures = new HashSet<String>();
+      for (Future<List<String>> f : results)
+        failures.addAll(f.get());
+      filesToLoad.removeAll(loaded);
+      if (filesToLoad.size() > 0) {
+        log.debug("tid " + tid + " attempt " + (attempt + 1) + " " + sampleList(filesToLoad, 10) + " failed");
+        UtilWaitThread.sleep(100);
+      }
+    }
+
+    FSDataOutputStream failFile = fs.create(new Path(errorDir, BulkImport.FAILURES_TXT), true);
+    BufferedWriter out = new BufferedWriter(new OutputStreamWriter(failFile, UTF_8));
+    try {
+      for (String f : filesToLoad) {
+        out.write(f);
+        out.write("\n");
+      }
+    } finally {
+      out.close();
+    }
+
+    // return the next step, which will perform cleanup
+    return new CompleteBulkImport(tableId, source, bulk, errorDir);
+  }
+
+  static String sampleList(Collection<?> potentiallyLongList, int max) {
+    StringBuffer result = new StringBuffer();
+    result.append("[");
+    int i = 0;
+    for (Object obj : potentiallyLongList) {
+      result.append(obj);
+      if (i >= max) {
+        result.append("...");
+        break;
+      } else {
+        result.append(", ");
+      }
+      i++;
+    }
+    if (i < max)
+      result.delete(result.length() - 2, result.length());
+    result.append("]");
+    return result.toString();
+  }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/server/master/src/main/java/org/apache/accumulo/master/tableOps/MapImportFileNames.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/MapImportFileNames.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/MapImportFileNames.java
new file mode 100644
index 0000000..0ee91dd
--- /dev/null
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/MapImportFileNames.java
@@ -0,0 +1,111 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.master.tableOps;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+
+import java.io.BufferedWriter;
+import java.io.IOException;
+import java.io.OutputStreamWriter;
+
+import org.apache.accumulo.core.Constants;
+import org.apache.accumulo.core.client.impl.thrift.TableOperation;
+import org.apache.accumulo.core.client.impl.thrift.TableOperationExceptionType;
+import org.apache.accumulo.core.client.impl.thrift.ThriftTableOperationException;
+import org.apache.accumulo.core.file.FileOperations;
+import org.apache.accumulo.fate.Repo;
+import org.apache.accumulo.master.Master;
+import org.apache.accumulo.server.fs.VolumeManager;
+import org.apache.accumulo.server.tablets.UniqueNameAllocator;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.Path;
+
+class MapImportFileNames extends MasterRepo {
+
+  private static final long serialVersionUID = 1L;
+
+  private ImportedTableInfo tableInfo;
+
+  MapImportFileNames(ImportedTableInfo ti) {
+    this.tableInfo = ti;
+  }
+
+  @Override
+  public Repo<Master> call(long tid, Master environment) throws Exception {
+
+    Path path = new Path(tableInfo.importDir, "mappings.txt");
+
+    BufferedWriter mappingsWriter = null;
+
+    try {
+      VolumeManager fs = environment.getFileSystem();
+
+      fs.mkdirs(new Path(tableInfo.importDir));
+
+      FileStatus[] files = fs.listStatus(new Path(tableInfo.exportDir));
+
+      UniqueNameAllocator namer = UniqueNameAllocator.getInstance();
+
+      mappingsWriter = new BufferedWriter(new OutputStreamWriter(fs.create(path), UTF_8));
+
+      for (FileStatus fileStatus : files) {
+        String fileName = fileStatus.getPath().getName();
+        log.info("filename " + fileStatus.getPath().toString());
+        String sa[] = fileName.split("\\.");
+        String extension = "";
+        if (sa.length > 1) {
+          extension = sa[sa.length - 1];
+
+          if (!FileOperations.getValidExtensions().contains(extension)) {
+            continue;
+          }
+        } else {
+          // assume it is a map file
+          extension = Constants.MAPFILE_EXTENSION;
+        }
+
+        String newName = "I" + namer.getNextName() + "." + extension;
+
+        mappingsWriter.append(fileName);
+        mappingsWriter.append(':');
+        mappingsWriter.append(newName);
+        mappingsWriter.newLine();
+      }
+
+      mappingsWriter.close();
+      mappingsWriter = null;
+
+      return new PopulateMetadataTable(tableInfo);
+    } catch (IOException ioe) {
+      log.warn("{}", ioe.getMessage(), ioe);
+      throw new ThriftTableOperationException(tableInfo.tableId, tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER,
+          "Error writing mapping file " + path + " " + ioe.getMessage());
+    } finally {
+      if (mappingsWriter != null)
+        try {
+          mappingsWriter.close();
+        } catch (IOException ioe) {
+          log.warn("Failed to close " + path, ioe);
+        }
+    }
+  }
+
+  @Override
+  public void undo(long tid, Master env) throws Exception {
+    env.getFileSystem().deleteRecursively(new Path(tableInfo.importDir));
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/server/master/src/main/java/org/apache/accumulo/master/tableOps/MoveExportedFiles.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/MoveExportedFiles.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/MoveExportedFiles.java
new file mode 100644
index 0000000..19395df
--- /dev/null
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/MoveExportedFiles.java
@@ -0,0 +1,71 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.master.tableOps;
+
+import java.io.IOException;
+import java.util.Map;
+
+import org.apache.accumulo.core.client.impl.thrift.TableOperation;
+import org.apache.accumulo.core.client.impl.thrift.TableOperationExceptionType;
+import org.apache.accumulo.core.client.impl.thrift.ThriftTableOperationException;
+import org.apache.accumulo.fate.Repo;
+import org.apache.accumulo.master.Master;
+import org.apache.accumulo.server.fs.VolumeManager;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.Path;
+
+class MoveExportedFiles extends MasterRepo {
+
+  private static final long serialVersionUID = 1L;
+
+  private ImportedTableInfo tableInfo;
+
+  MoveExportedFiles(ImportedTableInfo ti) {
+    this.tableInfo = ti;
+  }
+
+  @Override
+  public Repo<Master> call(long tid, Master master) throws Exception {
+    try {
+      VolumeManager fs = master.getFileSystem();
+
+      Map<String,String> fileNameMappings = PopulateMetadataTable.readMappingFile(fs, tableInfo);
+
+      for (String oldFileName : fileNameMappings.keySet()) {
+        if (!fs.exists(new Path(tableInfo.exportDir, oldFileName))) {
+          throw new ThriftTableOperationException(tableInfo.tableId, tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER,
+              "File referenced by exported table does not exists " + oldFileName);
+        }
+      }
+
+      FileStatus[] files = fs.listStatus(new Path(tableInfo.exportDir));
+
+      for (FileStatus fileStatus : files) {
+        String newName = fileNameMappings.get(fileStatus.getPath().getName());
+
+        if (newName != null)
+          fs.rename(fileStatus.getPath(), new Path(tableInfo.importDir, newName));
+      }
+
+      return new FinishImportTable(tableInfo);
+    } catch (IOException ioe) {
+      log.warn("{}", ioe.getMessage(), ioe);
+      throw new ThriftTableOperationException(tableInfo.tableId, tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER,
+          "Error renaming files " + ioe.getMessage());
+    }
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/server/master/src/main/java/org/apache/accumulo/master/tableOps/NamespaceCleanUp.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/NamespaceCleanUp.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/NamespaceCleanUp.java
new file mode 100644
index 0000000..2444374
--- /dev/null
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/NamespaceCleanUp.java
@@ -0,0 +1,75 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.master.tableOps;
+
+import org.apache.accumulo.core.client.impl.Tables;
+import org.apache.accumulo.core.client.impl.thrift.ThriftSecurityException;
+import org.apache.accumulo.fate.Repo;
+import org.apache.accumulo.master.Master;
+import org.apache.accumulo.server.security.AuditedSecurityOperation;
+import org.apache.accumulo.server.tables.TableManager;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+class NamespaceCleanUp extends MasterRepo {
+
+  private static final Logger log = LoggerFactory.getLogger(NamespaceCleanUp.class);
+
+  private static final long serialVersionUID = 1L;
+
+  private String namespaceId;
+
+  public NamespaceCleanUp(String namespaceId) {
+    this.namespaceId = namespaceId;
+  }
+
+  @Override
+  public long isReady(long tid, Master master) throws Exception {
+    return 0;
+  }
+
+  @Override
+  public Repo<Master> call(long id, Master master) throws Exception {
+
+    // remove from zookeeper
+    try {
+      TableManager.getInstance().removeNamespace(namespaceId);
+    } catch (Exception e) {
+      log.error("Failed to find namespace in zookeeper", e);
+    }
+    Tables.clearCache(master.getInstance());
+
+    // remove any permissions associated with this namespace
+    try {
+      AuditedSecurityOperation.getInstance(master).deleteNamespace(master.rpcCreds(), namespaceId);
+    } catch (ThriftSecurityException e) {
+      log.error("{}", e.getMessage(), e);
+    }
+
+    Utils.unreserveNamespace(namespaceId, id, true);
+
+    log.debug("Deleted namespace " + namespaceId);
+
+    return null;
+  }
+
+  @Override
+  public void undo(long tid, Master environment) throws Exception {
+    // nothing to do
+  }
+
+}


[7/9] accumulo git commit: ACCUMULO-3759 Fix Java 8 compiler warnings

Posted by ct...@apache.org.
http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/server/master/src/main/java/org/apache/accumulo/master/tableOps/CleanUp.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CleanUp.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CleanUp.java
new file mode 100644
index 0000000..f696198
--- /dev/null
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CleanUp.java
@@ -0,0 +1,287 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.master.tableOps;
+
+import java.io.IOException;
+import java.net.UnknownHostException;
+import java.util.Arrays;
+import java.util.Map.Entry;
+
+import org.apache.accumulo.core.client.BatchScanner;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.IteratorSetting;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.client.impl.Tables;
+import org.apache.accumulo.core.client.impl.thrift.ThriftSecurityException;
+import org.apache.accumulo.core.conf.AccumuloConfiguration;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Range;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.data.impl.KeyExtent;
+import org.apache.accumulo.core.iterators.user.GrepIterator;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.volume.Volume;
+import org.apache.accumulo.fate.Repo;
+import org.apache.accumulo.master.Master;
+import org.apache.accumulo.server.ServerConstants;
+import org.apache.accumulo.server.fs.VolumeManager;
+import org.apache.accumulo.server.master.state.MetaDataTableScanner;
+import org.apache.accumulo.server.master.state.TabletLocationState;
+import org.apache.accumulo.server.master.state.TabletState;
+import org.apache.accumulo.server.problems.ProblemReports;
+import org.apache.accumulo.server.security.AuditedSecurityOperation;
+import org.apache.accumulo.server.tables.TableManager;
+import org.apache.accumulo.server.util.MetadataTableUtil;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.Text;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+class CleanUp extends MasterRepo {
+
+  final private static Logger log = LoggerFactory.getLogger(CleanUp.class);
+
+  private static final long serialVersionUID = 1L;
+
+  private String tableId, namespaceId;
+
+  private long creationTime;
+
+  private void readObject(java.io.ObjectInputStream in) throws IOException, ClassNotFoundException {
+    in.defaultReadObject();
+
+    /*
+     * handle the case where we start executing on a new machine where the current time is in the past relative to the previous machine
+     *
+     * if the new machine has time in the future, that will work ok w/ hasCycled
+     */
+    if (System.currentTimeMillis() < creationTime) {
+      creationTime = System.currentTimeMillis();
+    }
+
+  }
+
+  public CleanUp(String tableId, String namespaceId) {
+    this.tableId = tableId;
+    this.namespaceId = namespaceId;
+    creationTime = System.currentTimeMillis();
+  }
+
+  @Override
+  public long isReady(long tid, Master master) throws Exception {
+    if (!master.hasCycled(creationTime)) {
+      return 50;
+    }
+
+    boolean done = true;
+    Range tableRange = new KeyExtent(new Text(tableId), null, null).toMetadataRange();
+    Scanner scanner = master.getConnector().createScanner(MetadataTable.NAME, Authorizations.EMPTY);
+    MetaDataTableScanner.configureScanner(scanner, master);
+    scanner.setRange(tableRange);
+
+    for (Entry<Key,Value> entry : scanner) {
+      TabletLocationState locationState = MetaDataTableScanner.createTabletLocationState(entry.getKey(), entry.getValue());
+      TabletState state = locationState.getState(master.onlineTabletServers());
+      if (state.equals(TabletState.ASSIGNED) || state.equals(TabletState.HOSTED)) {
+        log.debug("Still waiting for table to be deleted: " + tableId + " locationState: " + locationState);
+        done = false;
+        break;
+      }
+    }
+
+    if (!done)
+      return 50;
+
+    return 0;
+  }
+
+  @Override
+  public Repo<Master> call(long tid, Master master) throws Exception {
+
+    master.clearMigrations(tableId);
+
+    int refCount = 0;
+
+    try {
+      // look for other tables that references this table's files
+      Connector conn = master.getConnector();
+      BatchScanner bs = conn.createBatchScanner(MetadataTable.NAME, Authorizations.EMPTY, 8);
+      try {
+        Range allTables = MetadataSchema.TabletsSection.getRange();
+        Range tableRange = MetadataSchema.TabletsSection.getRange(tableId);
+        Range beforeTable = new Range(allTables.getStartKey(), true, tableRange.getStartKey(), false);
+        Range afterTable = new Range(tableRange.getEndKey(), false, allTables.getEndKey(), true);
+        bs.setRanges(Arrays.asList(beforeTable, afterTable));
+        bs.fetchColumnFamily(DataFileColumnFamily.NAME);
+        IteratorSetting cfg = new IteratorSetting(40, "grep", GrepIterator.class);
+        GrepIterator.setTerm(cfg, "/" + tableId + "/");
+        bs.addScanIterator(cfg);
+
+        for (Entry<Key,Value> entry : bs) {
+          if (entry.getKey().getColumnQualifier().toString().contains("/" + tableId + "/")) {
+            refCount++;
+          }
+        }
+      } finally {
+        bs.close();
+      }
+
+    } catch (Exception e) {
+      refCount = -1;
+      log.error("Failed to scan " + MetadataTable.NAME + " looking for references to deleted table " + tableId, e);
+    }
+
+    // remove metadata table entries
+    try {
+      // Intentionally do not pass master lock. If master loses lock, this operation may complete before master can kill itself.
+      // If the master lock passed to deleteTable, it is possible that the delete mutations will be dropped. If the delete operations
+      // are dropped and the operation completes, then the deletes will not be repeated.
+      MetadataTableUtil.deleteTable(tableId, refCount != 0, master, null);
+    } catch (Exception e) {
+      log.error("error deleting " + tableId + " from metadata table", e);
+    }
+
+    // remove any problem reports the table may have
+    try {
+      ProblemReports.getInstance(master).deleteProblemReports(tableId);
+    } catch (Exception e) {
+      log.error("Failed to delete problem reports for table " + tableId, e);
+    }
+
+    if (refCount == 0) {
+      final AccumuloConfiguration conf = master.getConfiguration();
+      boolean archiveFiles = conf.getBoolean(Property.GC_FILE_ARCHIVE);
+
+      // delete the map files
+      try {
+        VolumeManager fs = master.getFileSystem();
+        for (String dir : ServerConstants.getTablesDirs()) {
+          if (archiveFiles) {
+            archiveFile(fs, dir, tableId);
+          } else {
+            fs.deleteRecursively(new Path(dir, tableId));
+          }
+        }
+      } catch (IOException e) {
+        log.error("Unable to remove deleted table directory", e);
+      } catch (IllegalArgumentException exception) {
+        if (exception.getCause() instanceof UnknownHostException) {
+          /* Thrown if HDFS encounters a DNS problem in some edge cases */
+          log.error("Unable to remove deleted table directory", exception);
+        } else {
+          throw exception;
+        }
+      }
+    }
+
+    // remove table from zookeeper
+    try {
+      TableManager.getInstance().removeTable(tableId);
+      Tables.clearCache(master.getInstance());
+    } catch (Exception e) {
+      log.error("Failed to find table id in zookeeper", e);
+    }
+
+    // remove any permissions associated with this table
+    try {
+      AuditedSecurityOperation.getInstance(master).deleteTable(master.rpcCreds(), tableId, namespaceId);
+    } catch (ThriftSecurityException e) {
+      log.error("{}", e.getMessage(), e);
+    }
+
+    Utils.unreserveTable(tableId, tid, true);
+    Utils.unreserveNamespace(namespaceId, tid, false);
+
+    LoggerFactory.getLogger(CleanUp.class).debug("Deleted table " + tableId);
+
+    return null;
+  }
+
+  protected void archiveFile(VolumeManager fs, String dir, String tableId) throws IOException {
+    Path tableDirectory = new Path(dir, tableId);
+    Volume v = fs.getVolumeByPath(tableDirectory);
+    String basePath = v.getBasePath();
+
+    // Path component of URI
+    String tableDirPath = tableDirectory.toUri().getPath();
+
+    // Just the suffix of the path (after the Volume's base path)
+    String tableDirSuffix = tableDirPath.substring(basePath.length());
+
+    // Remove a leading path separator char because Path will treat the "child" as an absolute path with it
+    if (Path.SEPARATOR_CHAR == tableDirSuffix.charAt(0)) {
+      if (tableDirSuffix.length() > 1) {
+        tableDirSuffix = tableDirSuffix.substring(1);
+      } else {
+        tableDirSuffix = "";
+      }
+    }
+
+    // Get the file archive directory on this volume
+    final Path fileArchiveDir = new Path(basePath, ServerConstants.FILE_ARCHIVE_DIR);
+
+    // Make sure it exists just to be safe
+    fs.mkdirs(fileArchiveDir);
+
+    // The destination to archive this table to
+    final Path destTableDir = new Path(fileArchiveDir, tableDirSuffix);
+
+    log.debug("Archiving " + tableDirectory + " to " + tableDirectory);
+
+    if (fs.exists(destTableDir)) {
+      merge(fs, tableDirectory, destTableDir);
+    } else {
+      fs.rename(tableDirectory, destTableDir);
+    }
+  }
+
+  protected void merge(VolumeManager fs, Path src, Path dest) throws IOException {
+    for (FileStatus child : fs.listStatus(src)) {
+      final String childName = child.getPath().getName();
+      final Path childInSrc = new Path(src, childName), childInDest = new Path(dest, childName);
+
+      if (child.isFile()) {
+        if (fs.exists(childInDest)) {
+          log.warn("File already exists in archive, ignoring. " + childInDest);
+        } else {
+          fs.rename(childInSrc, childInDest);
+        }
+      } else if (child.isDirectory()) {
+        if (fs.exists(childInDest)) {
+          // Recurse
+          merge(fs, childInSrc, childInDest);
+        } else {
+          fs.rename(childInSrc, childInDest);
+        }
+      } else {
+        // Symlinks shouldn't exist in table directories..
+        log.warn("Ignoring archiving of non file/directory: " + child);
+      }
+    }
+  }
+
+  @Override
+  public void undo(long tid, Master environment) throws Exception {
+    // nothing to do
+  }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/server/master/src/main/java/org/apache/accumulo/master/tableOps/CleanUpBulkImport.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CleanUpBulkImport.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CleanUpBulkImport.java
new file mode 100644
index 0000000..85f9a8c
--- /dev/null
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CleanUpBulkImport.java
@@ -0,0 +1,64 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.master.tableOps;
+
+import org.apache.accumulo.core.Constants;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.fate.Repo;
+import org.apache.accumulo.master.Master;
+import org.apache.accumulo.server.util.MetadataTableUtil;
+import org.apache.accumulo.server.zookeeper.TransactionWatcher.ZooArbitrator;
+import org.apache.hadoop.fs.Path;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+class CleanUpBulkImport extends MasterRepo {
+
+  private static final long serialVersionUID = 1L;
+
+  private static final Logger log = LoggerFactory.getLogger(CleanUpBulkImport.class);
+
+  private String tableId;
+  private String source;
+  private String bulk;
+  private String error;
+
+  public CleanUpBulkImport(String tableId, String source, String bulk, String error) {
+    this.tableId = tableId;
+    this.source = source;
+    this.bulk = bulk;
+    this.error = error;
+  }
+
+  @Override
+  public Repo<Master> call(long tid, Master master) throws Exception {
+    log.debug("removing the bulk processing flag file in " + bulk);
+    Path bulkDir = new Path(bulk);
+    MetadataTableUtil.removeBulkLoadInProgressFlag(master, "/" + bulkDir.getParent().getName() + "/" + bulkDir.getName());
+    MetadataTableUtil.addDeleteEntry(master, tableId, bulkDir.toString());
+    log.debug("removing the metadata table markers for loaded files");
+    Connector conn = master.getConnector();
+    MetadataTableUtil.removeBulkLoadEntries(conn, tableId, tid);
+    log.debug("releasing HDFS reservations for " + source + " and " + error);
+    Utils.unreserveHdfsDirectory(source, tid);
+    Utils.unreserveHdfsDirectory(error, tid);
+    Utils.getReadLock(tableId, tid).unlock();
+    log.debug("completing bulk import transaction " + tid);
+    ZooArbitrator.cleanup(Constants.BULK_ARBITRATOR_TYPE, tid);
+    return null;
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/server/master/src/main/java/org/apache/accumulo/master/tableOps/CloneInfo.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CloneInfo.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CloneInfo.java
new file mode 100644
index 0000000..335d65d
--- /dev/null
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CloneInfo.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.master.tableOps;
+
+import java.io.Serializable;
+import java.util.Map;
+import java.util.Set;
+
+class CloneInfo implements Serializable {
+
+  private static final long serialVersionUID = 1L;
+
+  String srcTableId;
+  String tableName;
+  String tableId;
+  String namespaceId;
+  String srcNamespaceId;
+  Map<String,String> propertiesToSet;
+  Set<String> propertiesToExclude;
+
+  public String user;
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/server/master/src/main/java/org/apache/accumulo/master/tableOps/CloneMetadata.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CloneMetadata.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CloneMetadata.java
new file mode 100644
index 0000000..045f6b1
--- /dev/null
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CloneMetadata.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.master.tableOps;
+
+import org.apache.accumulo.fate.Repo;
+import org.apache.accumulo.master.Master;
+import org.apache.accumulo.server.util.MetadataTableUtil;
+import org.slf4j.LoggerFactory;
+
+class CloneMetadata extends MasterRepo {
+
+  private static final long serialVersionUID = 1L;
+  private CloneInfo cloneInfo;
+
+  public CloneMetadata(CloneInfo cloneInfo) {
+    this.cloneInfo = cloneInfo;
+  }
+
+  @Override
+  public long isReady(long tid, Master environment) throws Exception {
+    return 0;
+  }
+
+  @Override
+  public Repo<Master> call(long tid, Master environment) throws Exception {
+    LoggerFactory.getLogger(CloneMetadata.class).info(
+        String.format("Cloning %s with tableId %s from srcTableId %s", cloneInfo.tableName, cloneInfo.tableId, cloneInfo.srcTableId));
+    // need to clear out any metadata entries for tableId just in case this
+    // died before and is executing again
+    MetadataTableUtil.deleteTable(cloneInfo.tableId, false, environment, environment.getMasterLock());
+    MetadataTableUtil.cloneTable(environment, cloneInfo.srcTableId, cloneInfo.tableId, environment.getFileSystem());
+    return new FinishCloneTable(cloneInfo);
+  }
+
+  @Override
+  public void undo(long tid, Master environment) throws Exception {
+    MetadataTableUtil.deleteTable(cloneInfo.tableId, false, environment, environment.getMasterLock());
+  }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/server/master/src/main/java/org/apache/accumulo/master/tableOps/ClonePermissions.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/ClonePermissions.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/ClonePermissions.java
new file mode 100644
index 0000000..3572c31
--- /dev/null
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/ClonePermissions.java
@@ -0,0 +1,73 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.master.tableOps;
+
+import org.apache.accumulo.core.client.NamespaceNotFoundException;
+import org.apache.accumulo.core.client.impl.thrift.TableOperation;
+import org.apache.accumulo.core.client.impl.thrift.TableOperationExceptionType;
+import org.apache.accumulo.core.client.impl.thrift.ThriftSecurityException;
+import org.apache.accumulo.core.client.impl.thrift.ThriftTableOperationException;
+import org.apache.accumulo.core.security.TablePermission;
+import org.apache.accumulo.fate.Repo;
+import org.apache.accumulo.master.Master;
+import org.apache.accumulo.server.security.AuditedSecurityOperation;
+import org.slf4j.LoggerFactory;
+
+class ClonePermissions extends MasterRepo {
+
+  private static final long serialVersionUID = 1L;
+
+  private CloneInfo cloneInfo;
+
+  public ClonePermissions(CloneInfo cloneInfo) {
+    this.cloneInfo = cloneInfo;
+  }
+
+  @Override
+  public long isReady(long tid, Master environment) throws Exception {
+    return 0;
+  }
+
+  @Override
+  public Repo<Master> call(long tid, Master environment) throws Exception {
+    // give all table permissions to the creator
+    for (TablePermission permission : TablePermission.values()) {
+      try {
+        AuditedSecurityOperation.getInstance(environment).grantTablePermission(environment.rpcCreds(), cloneInfo.user, cloneInfo.tableId, permission,
+            cloneInfo.namespaceId);
+      } catch (ThriftSecurityException e) {
+        LoggerFactory.getLogger(FinishCloneTable.class).error("{}", e.getMessage(), e);
+        throw e;
+      }
+    }
+
+    // setup permissions in zookeeper before table info in zookeeper
+    // this way concurrent users will not get a spurious pemission denied
+    // error
+    try {
+      return new CloneZookeeper(cloneInfo);
+    } catch (NamespaceNotFoundException e) {
+      throw new ThriftTableOperationException(null, cloneInfo.tableName, TableOperation.CLONE, TableOperationExceptionType.NAMESPACE_NOTFOUND,
+          "Namespace for target table not found");
+    }
+  }
+
+  @Override
+  public void undo(long tid, Master environment) throws Exception {
+    AuditedSecurityOperation.getInstance(environment).deleteTable(environment.rpcCreds(), cloneInfo.tableId, cloneInfo.namespaceId);
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/server/master/src/main/java/org/apache/accumulo/master/tableOps/CloneTable.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CloneTable.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CloneTable.java
index 192d182..eb2370e 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CloneTable.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CloneTable.java
@@ -16,209 +16,14 @@
  */
 package org.apache.accumulo.master.tableOps;
 
-import java.io.Serializable;
 import java.util.Map;
 import java.util.Set;
 
-import org.apache.accumulo.core.client.NamespaceNotFoundException;
-import org.apache.accumulo.core.client.impl.Namespaces;
 import org.apache.accumulo.core.client.impl.Tables;
 import org.apache.accumulo.core.client.impl.thrift.TableOperation;
-import org.apache.accumulo.core.client.impl.thrift.TableOperationExceptionType;
-import org.apache.accumulo.core.client.impl.thrift.ThriftSecurityException;
-import org.apache.accumulo.core.client.impl.thrift.ThriftTableOperationException;
-import org.apache.accumulo.core.master.state.tables.TableState;
-import org.apache.accumulo.core.security.TablePermission;
 import org.apache.accumulo.fate.Repo;
-import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeExistsPolicy;
 import org.apache.accumulo.master.Master;
 import org.apache.accumulo.server.client.HdfsZooInstance;
-import org.apache.accumulo.server.security.AuditedSecurityOperation;
-import org.apache.accumulo.server.tables.TableManager;
-import org.apache.accumulo.server.util.MetadataTableUtil;
-import org.slf4j.LoggerFactory;
-
-class CloneInfo implements Serializable {
-
-  private static final long serialVersionUID = 1L;
-
-  String srcTableId;
-  String tableName;
-  String tableId;
-  String namespaceId;
-  String srcNamespaceId;
-  Map<String,String> propertiesToSet;
-  Set<String> propertiesToExclude;
-
-  public String user;
-}
-
-class FinishCloneTable extends MasterRepo {
-
-  private static final long serialVersionUID = 1L;
-  private CloneInfo cloneInfo;
-
-  public FinishCloneTable(CloneInfo cloneInfo) {
-    this.cloneInfo = cloneInfo;
-  }
-
-  @Override
-  public long isReady(long tid, Master environment) throws Exception {
-    return 0;
-  }
-
-  @Override
-  public Repo<Master> call(long tid, Master environment) throws Exception {
-    // directories are intentionally not created.... this is done because directories should be unique
-    // because they occupy a different namespace than normal tablet directories... also some clones
-    // may never create files.. therefore there is no need to consume namenode space w/ directories
-    // that are not used... tablet will create directories as needed
-
-    TableManager.getInstance().transitionTableState(cloneInfo.tableId, TableState.ONLINE);
-
-    Utils.unreserveNamespace(cloneInfo.srcNamespaceId, tid, false);
-    if (!cloneInfo.srcNamespaceId.equals(cloneInfo.namespaceId))
-      Utils.unreserveNamespace(cloneInfo.namespaceId, tid, false);
-    Utils.unreserveTable(cloneInfo.srcTableId, tid, false);
-    Utils.unreserveTable(cloneInfo.tableId, tid, true);
-
-    environment.getEventCoordinator().event("Cloned table %s from %s", cloneInfo.tableName, cloneInfo.srcTableId);
-
-    LoggerFactory.getLogger(FinishCloneTable.class).debug("Cloned table " + cloneInfo.srcTableId + " " + cloneInfo.tableId + " " + cloneInfo.tableName);
-
-    return null;
-  }
-
-  @Override
-  public void undo(long tid, Master environment) throws Exception {}
-
-}
-
-class CloneMetadata extends MasterRepo {
-
-  private static final long serialVersionUID = 1L;
-  private CloneInfo cloneInfo;
-
-  public CloneMetadata(CloneInfo cloneInfo) {
-    this.cloneInfo = cloneInfo;
-  }
-
-  @Override
-  public long isReady(long tid, Master environment) throws Exception {
-    return 0;
-  }
-
-  @Override
-  public Repo<Master> call(long tid, Master environment) throws Exception {
-    LoggerFactory.getLogger(CloneMetadata.class).info(
-        String.format("Cloning %s with tableId %s from srcTableId %s", cloneInfo.tableName, cloneInfo.tableId, cloneInfo.srcTableId));
-    // need to clear out any metadata entries for tableId just in case this
-    // died before and is executing again
-    MetadataTableUtil.deleteTable(cloneInfo.tableId, false, environment, environment.getMasterLock());
-    MetadataTableUtil.cloneTable(environment, cloneInfo.srcTableId, cloneInfo.tableId, environment.getFileSystem());
-    return new FinishCloneTable(cloneInfo);
-  }
-
-  @Override
-  public void undo(long tid, Master environment) throws Exception {
-    MetadataTableUtil.deleteTable(cloneInfo.tableId, false, environment, environment.getMasterLock());
-  }
-
-}
-
-class CloneZookeeper extends MasterRepo {
-
-  private static final long serialVersionUID = 1L;
-
-  private CloneInfo cloneInfo;
-
-  public CloneZookeeper(CloneInfo cloneInfo) throws NamespaceNotFoundException {
-    this.cloneInfo = cloneInfo;
-    this.cloneInfo.namespaceId = Namespaces.getNamespaceId(HdfsZooInstance.getInstance(), Tables.qualify(this.cloneInfo.tableName).getFirst());
-  }
-
-  @Override
-  public long isReady(long tid, Master environment) throws Exception {
-    long val = 0;
-    if (!cloneInfo.srcNamespaceId.equals(cloneInfo.namespaceId))
-      val += Utils.reserveNamespace(cloneInfo.namespaceId, tid, false, true, TableOperation.CLONE);
-    val += Utils.reserveTable(cloneInfo.tableId, tid, true, false, TableOperation.CLONE);
-    return val;
-  }
-
-  @Override
-  public Repo<Master> call(long tid, Master environment) throws Exception {
-    Utils.tableNameLock.lock();
-    try {
-      // write tableName & tableId to zookeeper
-
-      Utils.checkTableDoesNotExist(environment.getInstance(), cloneInfo.tableName, cloneInfo.tableId, TableOperation.CLONE);
-
-      TableManager.getInstance().cloneTable(cloneInfo.srcTableId, cloneInfo.tableId, cloneInfo.tableName, cloneInfo.namespaceId, cloneInfo.propertiesToSet,
-          cloneInfo.propertiesToExclude, NodeExistsPolicy.OVERWRITE);
-      Tables.clearCache(environment.getInstance());
-
-      return new CloneMetadata(cloneInfo);
-    } finally {
-      Utils.tableNameLock.unlock();
-    }
-  }
-
-  @Override
-  public void undo(long tid, Master environment) throws Exception {
-    TableManager.getInstance().removeTable(cloneInfo.tableId);
-    if (!cloneInfo.srcNamespaceId.equals(cloneInfo.namespaceId))
-      Utils.unreserveNamespace(cloneInfo.namespaceId, tid, false);
-    Utils.unreserveTable(cloneInfo.tableId, tid, true);
-    Tables.clearCache(environment.getInstance());
-  }
-
-}
-
-class ClonePermissions extends MasterRepo {
-
-  private static final long serialVersionUID = 1L;
-
-  private CloneInfo cloneInfo;
-
-  public ClonePermissions(CloneInfo cloneInfo) {
-    this.cloneInfo = cloneInfo;
-  }
-
-  @Override
-  public long isReady(long tid, Master environment) throws Exception {
-    return 0;
-  }
-
-  @Override
-  public Repo<Master> call(long tid, Master environment) throws Exception {
-    // give all table permissions to the creator
-    for (TablePermission permission : TablePermission.values()) {
-      try {
-        AuditedSecurityOperation.getInstance(environment).grantTablePermission(environment.rpcCreds(), cloneInfo.user, cloneInfo.tableId, permission,
-            cloneInfo.namespaceId);
-      } catch (ThriftSecurityException e) {
-        LoggerFactory.getLogger(FinishCloneTable.class).error("{}", e.getMessage(), e);
-        throw e;
-      }
-    }
-
-    // setup permissions in zookeeper before table info in zookeeper
-    // this way concurrent users will not get a spurious pemission denied
-    // error
-    try {
-      return new CloneZookeeper(cloneInfo);
-    } catch (NamespaceNotFoundException e) {
-      throw new ThriftTableOperationException(null, cloneInfo.tableName, TableOperation.CLONE, TableOperationExceptionType.NAMESPACE_NOTFOUND,
-          "Namespace for target table not found");
-    }
-  }
-
-  @Override
-  public void undo(long tid, Master environment) throws Exception {
-    AuditedSecurityOperation.getInstance(environment).deleteTable(environment.rpcCreds(), cloneInfo.tableId, cloneInfo.namespaceId);
-  }
-}
 
 public class CloneTable extends MasterRepo {
 

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/server/master/src/main/java/org/apache/accumulo/master/tableOps/CloneZookeeper.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CloneZookeeper.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CloneZookeeper.java
new file mode 100644
index 0000000..072f5de
--- /dev/null
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CloneZookeeper.java
@@ -0,0 +1,76 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.master.tableOps;
+
+import org.apache.accumulo.core.client.NamespaceNotFoundException;
+import org.apache.accumulo.core.client.impl.Namespaces;
+import org.apache.accumulo.core.client.impl.Tables;
+import org.apache.accumulo.core.client.impl.thrift.TableOperation;
+import org.apache.accumulo.fate.Repo;
+import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeExistsPolicy;
+import org.apache.accumulo.master.Master;
+import org.apache.accumulo.server.client.HdfsZooInstance;
+import org.apache.accumulo.server.tables.TableManager;
+
+class CloneZookeeper extends MasterRepo {
+
+  private static final long serialVersionUID = 1L;
+
+  private CloneInfo cloneInfo;
+
+  public CloneZookeeper(CloneInfo cloneInfo) throws NamespaceNotFoundException {
+    this.cloneInfo = cloneInfo;
+    this.cloneInfo.namespaceId = Namespaces.getNamespaceId(HdfsZooInstance.getInstance(), Tables.qualify(this.cloneInfo.tableName).getFirst());
+  }
+
+  @Override
+  public long isReady(long tid, Master environment) throws Exception {
+    long val = 0;
+    if (!cloneInfo.srcNamespaceId.equals(cloneInfo.namespaceId))
+      val += Utils.reserveNamespace(cloneInfo.namespaceId, tid, false, true, TableOperation.CLONE);
+    val += Utils.reserveTable(cloneInfo.tableId, tid, true, false, TableOperation.CLONE);
+    return val;
+  }
+
+  @Override
+  public Repo<Master> call(long tid, Master environment) throws Exception {
+    Utils.tableNameLock.lock();
+    try {
+      // write tableName & tableId to zookeeper
+
+      Utils.checkTableDoesNotExist(environment.getInstance(), cloneInfo.tableName, cloneInfo.tableId, TableOperation.CLONE);
+
+      TableManager.getInstance().cloneTable(cloneInfo.srcTableId, cloneInfo.tableId, cloneInfo.tableName, cloneInfo.namespaceId, cloneInfo.propertiesToSet,
+          cloneInfo.propertiesToExclude, NodeExistsPolicy.OVERWRITE);
+      Tables.clearCache(environment.getInstance());
+
+      return new CloneMetadata(cloneInfo);
+    } finally {
+      Utils.tableNameLock.unlock();
+    }
+  }
+
+  @Override
+  public void undo(long tid, Master environment) throws Exception {
+    TableManager.getInstance().removeTable(cloneInfo.tableId);
+    if (!cloneInfo.srcNamespaceId.equals(cloneInfo.namespaceId))
+      Utils.unreserveNamespace(cloneInfo.namespaceId, tid, false);
+    Utils.unreserveTable(cloneInfo.tableId, tid, true);
+    Tables.clearCache(environment.getInstance());
+  }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/server/master/src/main/java/org/apache/accumulo/master/tableOps/CompactRange.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CompactRange.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CompactRange.java
index 133663d..befaea3 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CompactRange.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CompactRange.java
@@ -18,188 +18,29 @@ package org.apache.accumulo.master.tableOps;
 
 import static java.nio.charset.StandardCharsets.UTF_8;
 
-import java.util.Collections;
-import java.util.Iterator;
 import java.util.List;
-import java.util.Map.Entry;
 
 import org.apache.accumulo.core.Constants;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Instance;
-import org.apache.accumulo.core.client.IsolatedScanner;
 import org.apache.accumulo.core.client.IteratorSetting;
-import org.apache.accumulo.core.client.RowIterator;
-import org.apache.accumulo.core.client.Scanner;
 import org.apache.accumulo.core.client.admin.CompactionStrategyConfig;
 import org.apache.accumulo.core.client.impl.CompactionStrategyConfigUtil;
 import org.apache.accumulo.core.client.impl.Tables;
 import org.apache.accumulo.core.client.impl.thrift.TableOperation;
 import org.apache.accumulo.core.client.impl.thrift.TableOperationExceptionType;
 import org.apache.accumulo.core.client.impl.thrift.ThriftTableOperationException;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.data.impl.KeyExtent;
-import org.apache.accumulo.core.master.state.tables.TableState;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.metadata.RootTable;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.util.MapCounter;
 import org.apache.accumulo.fate.Repo;
 import org.apache.accumulo.fate.zookeeper.IZooReaderWriter;
 import org.apache.accumulo.fate.zookeeper.IZooReaderWriter.Mutator;
 import org.apache.accumulo.master.Master;
-import org.apache.accumulo.server.master.LiveTServerSet.TServerConnection;
-import org.apache.accumulo.server.master.state.TServerInstance;
 import org.apache.accumulo.server.master.tableOps.UserCompactionConfig;
 import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
 import org.apache.commons.codec.binary.Hex;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.WritableUtils;
-import org.apache.thrift.TException;
 import org.apache.zookeeper.KeeperException.NoNodeException;
-import org.slf4j.LoggerFactory;
 
 import com.google.common.base.Preconditions;
 
-class CompactionDriver extends MasterRepo {
-
-  private static final long serialVersionUID = 1L;
-
-  private long compactId;
-  private final String tableId;
-  private byte[] startRow;
-  private byte[] endRow;
-
-  public CompactionDriver(long compactId, String tableId, byte[] startRow, byte[] endRow) {
-
-    this.compactId = compactId;
-    this.tableId = tableId;
-    this.startRow = startRow;
-    this.endRow = endRow;
-  }
-
-  @Override
-  public long isReady(long tid, Master master) throws Exception {
-
-    String zCancelID = Constants.ZROOT + "/" + master.getInstance().getInstanceID() + Constants.ZTABLES + "/" + tableId + Constants.ZTABLE_COMPACT_CANCEL_ID;
-
-    IZooReaderWriter zoo = ZooReaderWriter.getInstance();
-
-    if (Long.parseLong(new String(zoo.getData(zCancelID, null))) >= compactId) {
-      // compaction was canceled
-      throw new ThriftTableOperationException(tableId, null, TableOperation.COMPACT, TableOperationExceptionType.OTHER, "Compaction canceled");
-    }
-
-    MapCounter<TServerInstance> serversToFlush = new MapCounter<TServerInstance>();
-    Connector conn = master.getConnector();
-
-    Scanner scanner;
-
-    if (tableId.equals(MetadataTable.ID)) {
-      scanner = new IsolatedScanner(conn.createScanner(RootTable.NAME, Authorizations.EMPTY));
-      scanner.setRange(MetadataSchema.TabletsSection.getRange());
-    } else {
-      scanner = new IsolatedScanner(conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY));
-      Range range = new KeyExtent(new Text(tableId), null, startRow == null ? null : new Text(startRow)).toMetadataRange();
-      scanner.setRange(range);
-    }
-
-    TabletsSection.ServerColumnFamily.COMPACT_COLUMN.fetch(scanner);
-    TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.fetch(scanner);
-    scanner.fetchColumnFamily(TabletsSection.CurrentLocationColumnFamily.NAME);
-
-    long t1 = System.currentTimeMillis();
-    RowIterator ri = new RowIterator(scanner);
-
-    int tabletsToWaitFor = 0;
-    int tabletCount = 0;
-
-    while (ri.hasNext()) {
-      Iterator<Entry<Key,Value>> row = ri.next();
-      long tabletCompactID = -1;
-
-      TServerInstance server = null;
-
-      Entry<Key,Value> entry = null;
-      while (row.hasNext()) {
-        entry = row.next();
-        Key key = entry.getKey();
-
-        if (TabletsSection.ServerColumnFamily.COMPACT_COLUMN.equals(key.getColumnFamily(), key.getColumnQualifier()))
-          tabletCompactID = Long.parseLong(entry.getValue().toString());
-
-        if (TabletsSection.CurrentLocationColumnFamily.NAME.equals(key.getColumnFamily()))
-          server = new TServerInstance(entry.getValue(), key.getColumnQualifier());
-      }
-
-      if (tabletCompactID < compactId) {
-        tabletsToWaitFor++;
-        if (server != null)
-          serversToFlush.increment(server, 1);
-      }
-
-      tabletCount++;
-
-      Text tabletEndRow = new KeyExtent(entry.getKey().getRow(), (Text) null).getEndRow();
-      if (tabletEndRow == null || (endRow != null && tabletEndRow.compareTo(new Text(endRow)) >= 0))
-        break;
-    }
-
-    long scanTime = System.currentTimeMillis() - t1;
-
-    Instance instance = master.getInstance();
-    Tables.clearCache(instance);
-    if (tabletCount == 0 && !Tables.exists(instance, tableId))
-      throw new ThriftTableOperationException(tableId, null, TableOperation.COMPACT, TableOperationExceptionType.NOTFOUND, null);
-
-    if (serversToFlush.size() == 0 && Tables.getTableState(instance, tableId) == TableState.OFFLINE)
-      throw new ThriftTableOperationException(tableId, null, TableOperation.COMPACT, TableOperationExceptionType.OFFLINE, null);
-
-    if (tabletsToWaitFor == 0)
-      return 0;
-
-    for (TServerInstance tsi : serversToFlush.keySet()) {
-      try {
-        final TServerConnection server = master.getConnection(tsi);
-        if (server != null)
-          server.compact(master.getMasterLock(), tableId, startRow, endRow);
-      } catch (TException ex) {
-        LoggerFactory.getLogger(CompactionDriver.class).error(ex.toString());
-      }
-    }
-
-    long sleepTime = 500;
-
-    if (serversToFlush.size() > 0)
-      sleepTime = Collections.max(serversToFlush.values()) * sleepTime; // make wait time depend on the server with the most to
-                                                                        // compact
-
-    sleepTime = Math.max(2 * scanTime, sleepTime);
-
-    sleepTime = Math.min(sleepTime, 30000);
-
-    return sleepTime;
-  }
-
-  @Override
-  public Repo<Master> call(long tid, Master environment) throws Exception {
-    String namespaceId = Tables.getNamespaceId(environment.getInstance(), tableId);
-    CompactRange.removeIterators(environment, tid, tableId);
-    Utils.getReadLock(tableId, tid).unlock();
-    Utils.getReadLock(namespaceId, tid).unlock();
-    return null;
-  }
-
-  @Override
-  public void undo(long tid, Master environment) throws Exception {
-
-  }
-
-}
-
 public class CompactRange extends MasterRepo {
 
   private static final long serialVersionUID = 1L;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/server/master/src/main/java/org/apache/accumulo/master/tableOps/CompactionDriver.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CompactionDriver.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CompactionDriver.java
new file mode 100644
index 0000000..e3d0820
--- /dev/null
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CompactionDriver.java
@@ -0,0 +1,188 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.master.tableOps;
+
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.Map.Entry;
+
+import org.apache.accumulo.core.Constants;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Instance;
+import org.apache.accumulo.core.client.IsolatedScanner;
+import org.apache.accumulo.core.client.RowIterator;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.client.impl.Tables;
+import org.apache.accumulo.core.client.impl.thrift.TableOperation;
+import org.apache.accumulo.core.client.impl.thrift.TableOperationExceptionType;
+import org.apache.accumulo.core.client.impl.thrift.ThriftTableOperationException;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Range;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.data.impl.KeyExtent;
+import org.apache.accumulo.core.master.state.tables.TableState;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.RootTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.util.MapCounter;
+import org.apache.accumulo.fate.Repo;
+import org.apache.accumulo.fate.zookeeper.IZooReaderWriter;
+import org.apache.accumulo.master.Master;
+import org.apache.accumulo.server.master.LiveTServerSet.TServerConnection;
+import org.apache.accumulo.server.master.state.TServerInstance;
+import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
+import org.apache.hadoop.io.Text;
+import org.apache.thrift.TException;
+import org.slf4j.LoggerFactory;
+
+class CompactionDriver extends MasterRepo {
+
+  private static final long serialVersionUID = 1L;
+
+  private long compactId;
+  private final String tableId;
+  private byte[] startRow;
+  private byte[] endRow;
+
+  public CompactionDriver(long compactId, String tableId, byte[] startRow, byte[] endRow) {
+
+    this.compactId = compactId;
+    this.tableId = tableId;
+    this.startRow = startRow;
+    this.endRow = endRow;
+  }
+
+  @Override
+  public long isReady(long tid, Master master) throws Exception {
+
+    String zCancelID = Constants.ZROOT + "/" + master.getInstance().getInstanceID() + Constants.ZTABLES + "/" + tableId + Constants.ZTABLE_COMPACT_CANCEL_ID;
+
+    IZooReaderWriter zoo = ZooReaderWriter.getInstance();
+
+    if (Long.parseLong(new String(zoo.getData(zCancelID, null))) >= compactId) {
+      // compaction was canceled
+      throw new ThriftTableOperationException(tableId, null, TableOperation.COMPACT, TableOperationExceptionType.OTHER, "Compaction canceled");
+    }
+
+    MapCounter<TServerInstance> serversToFlush = new MapCounter<TServerInstance>();
+    Connector conn = master.getConnector();
+
+    Scanner scanner;
+
+    if (tableId.equals(MetadataTable.ID)) {
+      scanner = new IsolatedScanner(conn.createScanner(RootTable.NAME, Authorizations.EMPTY));
+      scanner.setRange(MetadataSchema.TabletsSection.getRange());
+    } else {
+      scanner = new IsolatedScanner(conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY));
+      Range range = new KeyExtent(new Text(tableId), null, startRow == null ? null : new Text(startRow)).toMetadataRange();
+      scanner.setRange(range);
+    }
+
+    TabletsSection.ServerColumnFamily.COMPACT_COLUMN.fetch(scanner);
+    TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.fetch(scanner);
+    scanner.fetchColumnFamily(TabletsSection.CurrentLocationColumnFamily.NAME);
+
+    long t1 = System.currentTimeMillis();
+    RowIterator ri = new RowIterator(scanner);
+
+    int tabletsToWaitFor = 0;
+    int tabletCount = 0;
+
+    while (ri.hasNext()) {
+      Iterator<Entry<Key,Value>> row = ri.next();
+      long tabletCompactID = -1;
+
+      TServerInstance server = null;
+
+      Entry<Key,Value> entry = null;
+      while (row.hasNext()) {
+        entry = row.next();
+        Key key = entry.getKey();
+
+        if (TabletsSection.ServerColumnFamily.COMPACT_COLUMN.equals(key.getColumnFamily(), key.getColumnQualifier()))
+          tabletCompactID = Long.parseLong(entry.getValue().toString());
+
+        if (TabletsSection.CurrentLocationColumnFamily.NAME.equals(key.getColumnFamily()))
+          server = new TServerInstance(entry.getValue(), key.getColumnQualifier());
+      }
+
+      if (tabletCompactID < compactId) {
+        tabletsToWaitFor++;
+        if (server != null)
+          serversToFlush.increment(server, 1);
+      }
+
+      tabletCount++;
+
+      Text tabletEndRow = new KeyExtent(entry.getKey().getRow(), (Text) null).getEndRow();
+      if (tabletEndRow == null || (endRow != null && tabletEndRow.compareTo(new Text(endRow)) >= 0))
+        break;
+    }
+
+    long scanTime = System.currentTimeMillis() - t1;
+
+    Instance instance = master.getInstance();
+    Tables.clearCache(instance);
+    if (tabletCount == 0 && !Tables.exists(instance, tableId))
+      throw new ThriftTableOperationException(tableId, null, TableOperation.COMPACT, TableOperationExceptionType.NOTFOUND, null);
+
+    if (serversToFlush.size() == 0 && Tables.getTableState(instance, tableId) == TableState.OFFLINE)
+      throw new ThriftTableOperationException(tableId, null, TableOperation.COMPACT, TableOperationExceptionType.OFFLINE, null);
+
+    if (tabletsToWaitFor == 0)
+      return 0;
+
+    for (TServerInstance tsi : serversToFlush.keySet()) {
+      try {
+        final TServerConnection server = master.getConnection(tsi);
+        if (server != null)
+          server.compact(master.getMasterLock(), tableId, startRow, endRow);
+      } catch (TException ex) {
+        LoggerFactory.getLogger(CompactionDriver.class).error(ex.toString());
+      }
+    }
+
+    long sleepTime = 500;
+
+    if (serversToFlush.size() > 0)
+      sleepTime = Collections.max(serversToFlush.values()) * sleepTime; // make wait time depend on the server with the most to
+                                                                        // compact
+
+    sleepTime = Math.max(2 * scanTime, sleepTime);
+
+    sleepTime = Math.min(sleepTime, 30000);
+
+    return sleepTime;
+  }
+
+  @Override
+  public Repo<Master> call(long tid, Master environment) throws Exception {
+    String namespaceId = Tables.getNamespaceId(environment.getInstance(), tableId);
+    CompactRange.removeIterators(environment, tid, tableId);
+    Utils.getReadLock(tableId, tid).unlock();
+    Utils.getReadLock(namespaceId, tid).unlock();
+    return null;
+  }
+
+  @Override
+  public void undo(long tid, Master environment) throws Exception {
+
+  }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/server/master/src/main/java/org/apache/accumulo/master/tableOps/CompleteBulkImport.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CompleteBulkImport.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CompleteBulkImport.java
new file mode 100644
index 0000000..8905c80
--- /dev/null
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CompleteBulkImport.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.master.tableOps;
+
+import org.apache.accumulo.core.Constants;
+import org.apache.accumulo.fate.Repo;
+import org.apache.accumulo.master.Master;
+import org.apache.accumulo.server.zookeeper.TransactionWatcher.ZooArbitrator;
+
+class CompleteBulkImport extends MasterRepo {
+
+  private static final long serialVersionUID = 1L;
+
+  private String tableId;
+  private String source;
+  private String bulk;
+  private String error;
+
+  public CompleteBulkImport(String tableId, String source, String bulk, String error) {
+    this.tableId = tableId;
+    this.source = source;
+    this.bulk = bulk;
+    this.error = error;
+  }
+
+  @Override
+  public Repo<Master> call(long tid, Master master) throws Exception {
+    ZooArbitrator.stop(Constants.BULK_ARBITRATOR_TYPE, tid);
+    return new CopyFailed(tableId, source, bulk, error);
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/server/master/src/main/java/org/apache/accumulo/master/tableOps/CopyFailed.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CopyFailed.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CopyFailed.java
new file mode 100644
index 0000000..e0cc8ec
--- /dev/null
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CopyFailed.java
@@ -0,0 +1,158 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.master.tableOps;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+
+import java.io.BufferedReader;
+import java.io.InputStreamReader;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Set;
+import java.util.Map.Entry;
+
+import org.apache.accumulo.core.Constants;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.IsolatedScanner;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.data.impl.KeyExtent;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.fate.Repo;
+import org.apache.accumulo.master.Master;
+import org.apache.accumulo.server.fs.FileRef;
+import org.apache.accumulo.server.fs.VolumeManager;
+import org.apache.accumulo.server.master.LiveTServerSet.TServerConnection;
+import org.apache.accumulo.server.master.state.TServerInstance;
+import org.apache.accumulo.server.zookeeper.DistributedWorkQueue;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.Text;
+import org.apache.thrift.TException;
+
+class CopyFailed extends MasterRepo {
+
+  private static final long serialVersionUID = 1L;
+
+  private String tableId;
+  private String source;
+  private String bulk;
+  private String error;
+
+  public CopyFailed(String tableId, String source, String bulk, String error) {
+    this.tableId = tableId;
+    this.source = source;
+    this.bulk = bulk;
+    this.error = error;
+  }
+
+  @Override
+  public long isReady(long tid, Master master) throws Exception {
+    Set<TServerInstance> finished = new HashSet<TServerInstance>();
+    Set<TServerInstance> running = master.onlineTabletServers();
+    for (TServerInstance server : running) {
+      try {
+        TServerConnection client = master.getConnection(server);
+        if (client != null && !client.isActive(tid))
+          finished.add(server);
+      } catch (TException ex) {
+        log.info("Ignoring error trying to check on tid " + tid + " from server " + server + ": " + ex);
+      }
+    }
+    if (finished.containsAll(running))
+      return 0;
+    return 500;
+  }
+
+  @Override
+  public Repo<Master> call(long tid, Master master) throws Exception {
+    // This needs to execute after the arbiter is stopped
+
+    VolumeManager fs = master.getFileSystem();
+
+    if (!fs.exists(new Path(error, BulkImport.FAILURES_TXT)))
+      return new CleanUpBulkImport(tableId, source, bulk, error);
+
+    HashMap<FileRef,String> failures = new HashMap<FileRef,String>();
+    HashMap<FileRef,String> loadedFailures = new HashMap<FileRef,String>();
+
+    try (BufferedReader in = new BufferedReader(new InputStreamReader(fs.open(new Path(error, BulkImport.FAILURES_TXT)), UTF_8))) {
+      String line = null;
+      while ((line = in.readLine()) != null) {
+        Path path = new Path(line);
+        if (!fs.exists(new Path(error, path.getName())))
+          failures.put(new FileRef(line, path), line);
+      }
+    }
+
+    /*
+     * I thought I could move files that have no file references in the table. However its possible a clone references a file. Therefore only move files that
+     * have no loaded markers.
+     */
+
+    // determine which failed files were loaded
+    Connector conn = master.getConnector();
+    Scanner mscanner = new IsolatedScanner(conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY));
+    mscanner.setRange(new KeyExtent(new Text(tableId), null, null).toMetadataRange());
+    mscanner.fetchColumnFamily(TabletsSection.BulkFileColumnFamily.NAME);
+
+    for (Entry<Key,Value> entry : mscanner) {
+      if (Long.parseLong(entry.getValue().toString()) == tid) {
+        FileRef loadedFile = new FileRef(fs, entry.getKey());
+        String absPath = failures.remove(loadedFile);
+        if (absPath != null) {
+          loadedFailures.put(loadedFile, absPath);
+        }
+      }
+    }
+
+    // move failed files that were not loaded
+    for (String failure : failures.values()) {
+      Path orig = new Path(failure);
+      Path dest = new Path(error, orig.getName());
+      fs.rename(orig, dest);
+      log.debug("tid " + tid + " renamed " + orig + " to " + dest + ": import failed");
+    }
+
+    if (loadedFailures.size() > 0) {
+      DistributedWorkQueue bifCopyQueue = new DistributedWorkQueue(Constants.ZROOT + "/" + master.getInstance().getInstanceID() + Constants.ZBULK_FAILED_COPYQ,
+          master.getConfiguration());
+
+      HashSet<String> workIds = new HashSet<String>();
+
+      for (String failure : loadedFailures.values()) {
+        Path orig = new Path(failure);
+        Path dest = new Path(error, orig.getName());
+
+        if (fs.exists(dest))
+          continue;
+
+        bifCopyQueue.addWork(orig.getName(), (failure + "," + dest).getBytes(UTF_8));
+        workIds.add(orig.getName());
+        log.debug("tid " + tid + " added to copyq: " + orig + " to " + dest + ": failed");
+      }
+
+      bifCopyQueue.waitUntilDone(workIds);
+    }
+
+    fs.deleteRecursively(new Path(error, BulkImport.FAILURES_TXT));
+    return new CleanUpBulkImport(tableId, source, bulk, error);
+  }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/server/master/src/main/java/org/apache/accumulo/master/tableOps/CreateDir.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CreateDir.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CreateDir.java
new file mode 100644
index 0000000..6221624
--- /dev/null
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CreateDir.java
@@ -0,0 +1,51 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.master.tableOps;
+
+import org.apache.accumulo.fate.Repo;
+import org.apache.accumulo.master.Master;
+import org.apache.accumulo.server.fs.VolumeManager;
+import org.apache.hadoop.fs.Path;
+
+class CreateDir extends MasterRepo {
+  private static final long serialVersionUID = 1L;
+
+  private TableInfo tableInfo;
+
+  CreateDir(TableInfo ti) {
+    this.tableInfo = ti;
+  }
+
+  @Override
+  public long isReady(long tid, Master environment) throws Exception {
+    return 0;
+  }
+
+  @Override
+  public Repo<Master> call(long tid, Master master) throws Exception {
+    VolumeManager fs = master.getFileSystem();
+    fs.mkdirs(new Path(tableInfo.dir));
+    return new PopulateMetadata(tableInfo);
+  }
+
+  @Override
+  public void undo(long tid, Master master) throws Exception {
+    VolumeManager fs = master.getFileSystem();
+    fs.deleteRecursively(new Path(tableInfo.dir));
+
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/server/master/src/main/java/org/apache/accumulo/master/tableOps/CreateImportDir.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CreateImportDir.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CreateImportDir.java
new file mode 100644
index 0000000..4f0e7f8
--- /dev/null
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CreateImportDir.java
@@ -0,0 +1,61 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.master.tableOps;
+
+import java.util.Arrays;
+
+import org.apache.accumulo.core.Constants;
+import org.apache.accumulo.fate.Repo;
+import org.apache.accumulo.master.Master;
+import org.apache.accumulo.server.ServerConstants;
+import org.apache.accumulo.server.tablets.UniqueNameAllocator;
+import org.apache.hadoop.fs.Path;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+class CreateImportDir extends MasterRepo {
+  private static final Logger log = LoggerFactory.getLogger(CreateImportDir.class);
+  private static final long serialVersionUID = 1L;
+
+  private ImportedTableInfo tableInfo;
+
+  CreateImportDir(ImportedTableInfo ti) {
+    this.tableInfo = ti;
+  }
+
+  @Override
+  public Repo<Master> call(long tid, Master master) throws Exception {
+
+    UniqueNameAllocator namer = UniqueNameAllocator.getInstance();
+
+    Path exportDir = new Path(tableInfo.exportDir);
+    String[] tableDirs = ServerConstants.getTablesDirs();
+
+    log.info("Looking for matching filesystem for " + exportDir + " from options " + Arrays.toString(tableDirs));
+    Path base = master.getFileSystem().matchingFileSystem(exportDir, tableDirs);
+    log.info("Chose base table directory of " + base);
+    Path directory = new Path(base, tableInfo.tableId);
+
+    Path newBulkDir = new Path(directory, Constants.BULK_PREFIX + namer.getNextName());
+
+    tableInfo.importDir = newBulkDir.toString();
+
+    log.info("Using import dir: " + tableInfo.importDir);
+
+    return new MapImportFileNames(tableInfo);
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/server/master/src/main/java/org/apache/accumulo/master/tableOps/CreateNamespace.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CreateNamespace.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CreateNamespace.java
index 9264031..b01fbcc 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CreateNamespace.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CreateNamespace.java
@@ -16,147 +16,10 @@
  */
 package org.apache.accumulo.master.tableOps;
 
-import java.io.Serializable;
 import java.util.Map;
-import java.util.Map.Entry;
 
-import org.apache.accumulo.core.client.Instance;
-import org.apache.accumulo.core.client.impl.Tables;
-import org.apache.accumulo.core.client.impl.thrift.TableOperation;
-import org.apache.accumulo.core.client.impl.thrift.ThriftSecurityException;
-import org.apache.accumulo.core.security.NamespacePermission;
 import org.apache.accumulo.fate.Repo;
-import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeExistsPolicy;
 import org.apache.accumulo.master.Master;
-import org.apache.accumulo.server.security.AuditedSecurityOperation;
-import org.apache.accumulo.server.security.SecurityOperation;
-import org.apache.accumulo.server.tables.TableManager;
-import org.apache.accumulo.server.util.NamespacePropUtil;
-import org.slf4j.LoggerFactory;
-
-class NamespaceInfo implements Serializable {
-
-  private static final long serialVersionUID = 1L;
-
-  String namespaceName;
-  String namespaceId;
-  String user;
-
-  public Map<String,String> props;
-}
-
-class FinishCreateNamespace extends MasterRepo {
-
-  private static final long serialVersionUID = 1L;
-
-  private NamespaceInfo namespaceInfo;
-
-  public FinishCreateNamespace(NamespaceInfo ti) {
-    this.namespaceInfo = ti;
-  }
-
-  @Override
-  public long isReady(long tid, Master environment) throws Exception {
-    return 0;
-  }
-
-  @Override
-  public Repo<Master> call(long id, Master env) throws Exception {
-
-    Utils.unreserveNamespace(namespaceInfo.namespaceId, id, true);
-
-    env.getEventCoordinator().event("Created namespace %s ", namespaceInfo.namespaceName);
-
-    LoggerFactory.getLogger(FinishCreateNamespace.class).debug("Created table " + namespaceInfo.namespaceId + " " + namespaceInfo.namespaceName);
-
-    return null;
-  }
-
-  @Override
-  public String getReturn() {
-    return namespaceInfo.namespaceId;
-  }
-
-  @Override
-  public void undo(long tid, Master env) throws Exception {}
-
-}
-
-class PopulateZookeeperWithNamespace extends MasterRepo {
-
-  private static final long serialVersionUID = 1L;
-
-  private NamespaceInfo namespaceInfo;
-
-  PopulateZookeeperWithNamespace(NamespaceInfo ti) {
-    this.namespaceInfo = ti;
-  }
-
-  @Override
-  public long isReady(long id, Master environment) throws Exception {
-    return Utils.reserveNamespace(namespaceInfo.namespaceId, id, true, false, TableOperation.CREATE);
-  }
-
-  @Override
-  public Repo<Master> call(long tid, Master master) throws Exception {
-
-    Utils.tableNameLock.lock();
-    try {
-      Instance instance = master.getInstance();
-
-      Utils.checkNamespaceDoesNotExist(instance, namespaceInfo.namespaceName, namespaceInfo.namespaceId, TableOperation.CREATE);
-
-      TableManager.prepareNewNamespaceState(instance.getInstanceID(), namespaceInfo.namespaceId, namespaceInfo.namespaceName, NodeExistsPolicy.OVERWRITE);
-
-      for (Entry<String,String> entry : namespaceInfo.props.entrySet())
-        NamespacePropUtil.setNamespaceProperty(namespaceInfo.namespaceId, entry.getKey(), entry.getValue());
-
-      Tables.clearCache(instance);
-
-      return new FinishCreateNamespace(namespaceInfo);
-    } finally {
-      Utils.tableNameLock.unlock();
-    }
-  }
-
-  @Override
-  public void undo(long tid, Master master) throws Exception {
-    TableManager.getInstance().removeNamespace(namespaceInfo.namespaceId);
-    Tables.clearCache(master.getInstance());
-    Utils.unreserveNamespace(namespaceInfo.namespaceId, tid, true);
-  }
-
-}
-
-class SetupNamespacePermissions extends MasterRepo {
-
-  private static final long serialVersionUID = 1L;
-
-  private NamespaceInfo namespaceInfo;
-
-  public SetupNamespacePermissions(NamespaceInfo ti) {
-    this.namespaceInfo = ti;
-  }
-
-  @Override
-  public Repo<Master> call(long tid, Master env) throws Exception {
-    // give all namespace permissions to the creator
-    SecurityOperation security = AuditedSecurityOperation.getInstance(env);
-    for (NamespacePermission permission : NamespacePermission.values()) {
-      try {
-        security.grantNamespacePermission(env.rpcCreds(), namespaceInfo.user, namespaceInfo.namespaceId, permission);
-      } catch (ThriftSecurityException e) {
-        LoggerFactory.getLogger(FinishCreateNamespace.class).error("{}", e.getMessage(), e);
-        throw e;
-      }
-    }
-
-    // setup permissions in zookeeper before table info in zookeeper
-    // this way concurrent users will not get a spurious permission denied
-    // error
-    return new PopulateZookeeperWithNamespace(namespaceInfo);
-  }
-}
 
 public class CreateNamespace extends MasterRepo {
   private static final long serialVersionUID = 1L;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/server/master/src/main/java/org/apache/accumulo/master/tableOps/CreateTable.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CreateTable.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CreateTable.java
index 9436704..ea2e395 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CreateTable.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CreateTable.java
@@ -16,264 +16,13 @@
  */
 package org.apache.accumulo.master.tableOps;
 
-import java.io.Serializable;
 import java.util.Map;
-import java.util.Map.Entry;
 
-import org.apache.accumulo.core.Constants;
-import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.admin.TimeType;
-import org.apache.accumulo.core.client.impl.Tables;
 import org.apache.accumulo.core.client.impl.thrift.TableOperation;
-import org.apache.accumulo.core.client.impl.thrift.ThriftSecurityException;
-import org.apache.accumulo.core.data.impl.KeyExtent;
-import org.apache.accumulo.core.master.state.tables.TableState;
-import org.apache.accumulo.core.security.TablePermission;
 import org.apache.accumulo.fate.Repo;
-import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeExistsPolicy;
 import org.apache.accumulo.master.Master;
-import org.apache.accumulo.server.ServerConstants;
-import org.apache.accumulo.server.fs.VolumeManager;
-import org.apache.accumulo.server.security.AuditedSecurityOperation;
-import org.apache.accumulo.server.security.SecurityOperation;
-import org.apache.accumulo.server.tables.TableManager;
 import org.apache.accumulo.server.tablets.TabletTime;
-import org.apache.accumulo.server.util.MetadataTableUtil;
-import org.apache.accumulo.server.util.TablePropUtil;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.Text;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.base.Optional;
-
-class TableInfo implements Serializable {
-
-  private static final long serialVersionUID = 1L;
-
-  String tableName;
-  String tableId;
-  String namespaceId;
-  char timeType;
-  String user;
-
-  public Map<String,String> props;
-
-  public String dir = null;
-}
-
-class FinishCreateTable extends MasterRepo {
-
-  private static final long serialVersionUID = 1L;
-
-  private TableInfo tableInfo;
-
-  public FinishCreateTable(TableInfo ti) {
-    this.tableInfo = ti;
-  }
-
-  @Override
-  public long isReady(long tid, Master environment) throws Exception {
-    return 0;
-  }
-
-  @Override
-  public Repo<Master> call(long tid, Master env) throws Exception {
-    TableManager.getInstance().transitionTableState(tableInfo.tableId, TableState.ONLINE);
-
-    Utils.unreserveNamespace(tableInfo.namespaceId, tid, false);
-    Utils.unreserveTable(tableInfo.tableId, tid, true);
-
-    env.getEventCoordinator().event("Created table %s ", tableInfo.tableName);
-
-    LoggerFactory.getLogger(FinishCreateTable.class).debug("Created table " + tableInfo.tableId + " " + tableInfo.tableName);
-
-    return null;
-  }
-
-  @Override
-  public String getReturn() {
-    return tableInfo.tableId;
-  }
-
-  @Override
-  public void undo(long tid, Master env) throws Exception {}
-
-}
-
-class PopulateMetadata extends MasterRepo {
-
-  private static final long serialVersionUID = 1L;
-
-  private TableInfo tableInfo;
-
-  PopulateMetadata(TableInfo ti) {
-    this.tableInfo = ti;
-  }
-
-  @Override
-  public long isReady(long tid, Master environment) throws Exception {
-    return 0;
-  }
-
-  @Override
-  public Repo<Master> call(long tid, Master environment) throws Exception {
-    KeyExtent extent = new KeyExtent(new Text(tableInfo.tableId), null, null);
-    MetadataTableUtil.addTablet(extent, tableInfo.dir, environment, tableInfo.timeType, environment.getMasterLock());
-
-    return new FinishCreateTable(tableInfo);
-
-  }
-
-  @Override
-  public void undo(long tid, Master environment) throws Exception {
-    MetadataTableUtil.deleteTable(tableInfo.tableId, false, environment, environment.getMasterLock());
-  }
-
-}
-
-class CreateDir extends MasterRepo {
-  private static final long serialVersionUID = 1L;
-
-  private TableInfo tableInfo;
-
-  CreateDir(TableInfo ti) {
-    this.tableInfo = ti;
-  }
-
-  @Override
-  public long isReady(long tid, Master environment) throws Exception {
-    return 0;
-  }
-
-  @Override
-  public Repo<Master> call(long tid, Master master) throws Exception {
-    VolumeManager fs = master.getFileSystem();
-    fs.mkdirs(new Path(tableInfo.dir));
-    return new PopulateMetadata(tableInfo);
-  }
-
-  @Override
-  public void undo(long tid, Master master) throws Exception {
-    VolumeManager fs = master.getFileSystem();
-    fs.deleteRecursively(new Path(tableInfo.dir));
-
-  }
-}
-
-class ChooseDir extends MasterRepo {
-  private static final long serialVersionUID = 1L;
-
-  private TableInfo tableInfo;
-
-  ChooseDir(TableInfo ti) {
-    this.tableInfo = ti;
-  }
-
-  @Override
-  public long isReady(long tid, Master environment) throws Exception {
-    return 0;
-  }
-
-  @Override
-  public Repo<Master> call(long tid, Master master) throws Exception {
-    // Constants.DEFAULT_TABLET_LOCATION has a leading slash prepended to it so we don't need to add one here
-    tableInfo.dir = master.getFileSystem().choose(Optional.of(tableInfo.tableId), ServerConstants.getBaseUris()) + Constants.HDFS_TABLES_DIR + Path.SEPARATOR
-        + tableInfo.tableId + Constants.DEFAULT_TABLET_LOCATION;
-    return new CreateDir(tableInfo);
-  }
-
-  @Override
-  public void undo(long tid, Master master) throws Exception {
-
-  }
-}
-
-class PopulateZookeeper extends MasterRepo {
-
-  private static final long serialVersionUID = 1L;
-
-  private TableInfo tableInfo;
-
-  PopulateZookeeper(TableInfo ti) {
-    this.tableInfo = ti;
-  }
-
-  @Override
-  public long isReady(long tid, Master environment) throws Exception {
-    return Utils.reserveTable(tableInfo.tableId, tid, true, false, TableOperation.CREATE);
-  }
-
-  @Override
-  public Repo<Master> call(long tid, Master master) throws Exception {
-    // reserve the table name in zookeeper or fail
-
-    Utils.tableNameLock.lock();
-    try {
-      // write tableName & tableId to zookeeper
-      Instance instance = master.getInstance();
-
-      Utils.checkTableDoesNotExist(instance, tableInfo.tableName, tableInfo.tableId, TableOperation.CREATE);
-
-      TableManager.getInstance().addTable(tableInfo.tableId, tableInfo.namespaceId, tableInfo.tableName, NodeExistsPolicy.OVERWRITE);
-
-      for (Entry<String,String> entry : tableInfo.props.entrySet())
-        TablePropUtil.setTableProperty(tableInfo.tableId, entry.getKey(), entry.getValue());
-
-      Tables.clearCache(instance);
-      return new ChooseDir(tableInfo);
-    } finally {
-      Utils.tableNameLock.unlock();
-    }
-
-  }
-
-  @Override
-  public void undo(long tid, Master master) throws Exception {
-    Instance instance = master.getInstance();
-    TableManager.getInstance().removeTable(tableInfo.tableId);
-    Utils.unreserveTable(tableInfo.tableId, tid, true);
-    Tables.clearCache(instance);
-  }
-
-}
-
-class SetupPermissions extends MasterRepo {
-
-  private static final long serialVersionUID = 1L;
-
-  private TableInfo tableInfo;
-
-  public SetupPermissions(TableInfo ti) {
-    this.tableInfo = ti;
-  }
-
-  @Override
-  public Repo<Master> call(long tid, Master env) throws Exception {
-    // give all table permissions to the creator
-    SecurityOperation security = AuditedSecurityOperation.getInstance(env);
-    if (!tableInfo.user.equals(env.getCredentials().getPrincipal())) {
-      for (TablePermission permission : TablePermission.values()) {
-        try {
-          security.grantTablePermission(env.rpcCreds(), tableInfo.user, tableInfo.tableId, permission, tableInfo.namespaceId);
-        } catch (ThriftSecurityException e) {
-          LoggerFactory.getLogger(FinishCreateTable.class).error("{}", e.getMessage(), e);
-          throw e;
-        }
-      }
-    }
-
-    // setup permissions in zookeeper before table info in zookeeper
-    // this way concurrent users will not get a spurious permission denied
-    // error
-    return new PopulateZookeeper(tableInfo);
-  }
-
-  @Override
-  public void undo(long tid, Master env) throws Exception {
-    AuditedSecurityOperation.getInstance(env).deleteTable(env.rpcCreds(), tableInfo.tableId, tableInfo.namespaceId);
-  }
-
-}
 
 public class CreateTable extends MasterRepo {
   private static final long serialVersionUID = 1L;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/server/master/src/main/java/org/apache/accumulo/master/tableOps/DeleteNamespace.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/DeleteNamespace.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/DeleteNamespace.java
index 3aa3719..f84671e 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/DeleteNamespace.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/DeleteNamespace.java
@@ -16,64 +16,9 @@
  */
 package org.apache.accumulo.master.tableOps;
 
-import org.apache.accumulo.core.client.impl.Tables;
 import org.apache.accumulo.core.client.impl.thrift.TableOperation;
-import org.apache.accumulo.core.client.impl.thrift.ThriftSecurityException;
 import org.apache.accumulo.fate.Repo;
 import org.apache.accumulo.master.Master;
-import org.apache.accumulo.server.security.AuditedSecurityOperation;
-import org.apache.accumulo.server.tables.TableManager;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-class NamespaceCleanUp extends MasterRepo {
-
-  final private static Logger log = LoggerFactory.getLogger(CleanUp.class);
-
-  private static final long serialVersionUID = 1L;
-
-  private String namespaceId;
-
-  public NamespaceCleanUp(String namespaceId) {
-    this.namespaceId = namespaceId;
-  }
-
-  @Override
-  public long isReady(long tid, Master master) throws Exception {
-    return 0;
-  }
-
-  @Override
-  public Repo<Master> call(long id, Master master) throws Exception {
-
-    // remove from zookeeper
-    try {
-      TableManager.getInstance().removeNamespace(namespaceId);
-    } catch (Exception e) {
-      log.error("Failed to find namespace in zookeeper", e);
-    }
-    Tables.clearCache(master.getInstance());
-
-    // remove any permissions associated with this namespace
-    try {
-      AuditedSecurityOperation.getInstance(master).deleteNamespace(master.rpcCreds(), namespaceId);
-    } catch (ThriftSecurityException e) {
-      log.error("{}", e.getMessage(), e);
-    }
-
-    Utils.unreserveNamespace(namespaceId, id, true);
-
-    LoggerFactory.getLogger(CleanUp.class).debug("Deleted namespace " + namespaceId);
-
-    return null;
-  }
-
-  @Override
-  public void undo(long tid, Master environment) throws Exception {
-    // nothing to do
-  }
-
-}
 
 public class DeleteNamespace extends MasterRepo {
 


[4/9] accumulo git commit: ACCUMULO-3759 Fix Java 8 compiler warnings

Posted by ct...@apache.org.
ACCUMULO-3759 Fix Java 8 compiler warnings

* Add missing hashCode in class with equals
* Enforce one-type per file


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/6e2e6780
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/6e2e6780
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/6e2e6780

Branch: refs/heads/1.7
Commit: 6e2e6780fc59c86112fba30a5211081bb6e77979
Parents: f996387
Author: Christopher Tubbs <ct...@apache.org>
Authored: Tue Apr 28 20:30:22 2015 -0400
Committer: Christopher Tubbs <ct...@apache.org>
Committed: Tue Apr 28 20:30:22 2015 -0400

----------------------------------------------------------------------
 .../core/client/impl/OfflineIterator.java       | 340 ++++++++++++
 .../core/client/impl/OfflineScanner.java        | 314 -----------
 .../core/compaction/CompactionSettings.java     |  42 --
 .../accumulo/core/compaction/PatternType.java   |  28 +
 .../accumulo/core/compaction/SizeType.java      |  30 ++
 .../accumulo/core/compaction/StringType.java    |  24 +
 .../apache/accumulo/core/compaction/Type.java   |  21 +
 .../accumulo/core/compaction/UIntType.java      |  27 +
 .../core/file/DispatchingFileFactory.java       | 136 +++++
 .../accumulo/core/file/FileOperations.java      | 106 ----
 .../accumulo/core/cli/TestClientOpts.java       |   5 +
 .../client/CountingVerifyingReceiver.java       |  64 +++
 .../simple/client/RandomBatchScanner.java       |  38 --
 pom.xml                                         |   1 +
 .../accumulo/master/tableOps/BulkImport.java    | 363 -------------
 .../master/tableOps/CancelCompactions.java      |  23 -
 .../accumulo/master/tableOps/ChooseDir.java     |  53 ++
 .../accumulo/master/tableOps/CleanUp.java       | 287 ++++++++++
 .../master/tableOps/CleanUpBulkImport.java      |  64 +++
 .../accumulo/master/tableOps/CloneInfo.java     |  36 ++
 .../accumulo/master/tableOps/CloneMetadata.java |  54 ++
 .../master/tableOps/ClonePermissions.java       |  73 +++
 .../accumulo/master/tableOps/CloneTable.java    | 195 -------
 .../master/tableOps/CloneZookeeper.java         |  76 +++
 .../accumulo/master/tableOps/CompactRange.java  | 159 ------
 .../master/tableOps/CompactionDriver.java       | 188 +++++++
 .../master/tableOps/CompleteBulkImport.java     |  45 ++
 .../accumulo/master/tableOps/CopyFailed.java    | 158 ++++++
 .../accumulo/master/tableOps/CreateDir.java     |  51 ++
 .../master/tableOps/CreateImportDir.java        |  61 +++
 .../master/tableOps/CreateNamespace.java        | 137 -----
 .../accumulo/master/tableOps/CreateTable.java   | 251 ---------
 .../master/tableOps/DeleteNamespace.java        |  55 --
 .../accumulo/master/tableOps/DeleteTable.java   | 265 ----------
 .../accumulo/master/tableOps/ExportInfo.java    |  29 ++
 .../accumulo/master/tableOps/ExportTable.java   | 257 ---------
 .../master/tableOps/FinishCancelCompaction.java |  40 ++
 .../master/tableOps/FinishCloneTable.java       |  64 +++
 .../master/tableOps/FinishCreateNamespace.java  |  58 +++
 .../master/tableOps/FinishCreateTable.java      |  62 +++
 .../master/tableOps/FinishImportTable.java      |  68 +++
 .../tableOps/ImportPopulateZookeeper.java       | 104 ++++
 .../master/tableOps/ImportSetupPermissions.java |  65 +++
 .../accumulo/master/tableOps/ImportTable.java   | 521 -------------------
 .../master/tableOps/ImportedTableInfo.java      |  31 ++
 .../accumulo/master/tableOps/LoadFiles.java     | 209 ++++++++
 .../master/tableOps/MapImportFileNames.java     | 111 ++++
 .../master/tableOps/MoveExportedFiles.java      |  71 +++
 .../master/tableOps/NamespaceCleanUp.java       |  75 +++
 .../accumulo/master/tableOps/NamespaceInfo.java |  31 ++
 .../master/tableOps/PopulateMetadata.java       |  54 ++
 .../master/tableOps/PopulateMetadataTable.java  | 217 ++++++++
 .../master/tableOps/PopulateZookeeper.java      |  77 +++
 .../PopulateZookeeperWithNamespace.java         |  74 +++
 .../tableOps/SetupNamespacePermissions.java     |  55 ++
 .../master/tableOps/SetupPermissions.java       |  63 +++
 .../accumulo/master/tableOps/TableInfo.java     |  35 ++
 .../accumulo/master/tableOps/TableRangeOp.java  |  45 --
 .../master/tableOps/TableRangeOpWait.java       |  69 +++
 .../master/tableOps/WriteExportFiles.java       | 268 ++++++++++
 .../apache/accumulo/tserver/InMemoryMap.java    | 119 -----
 .../accumulo/tserver/MemKeyComparator.java      |  44 ++
 .../tserver/MemKeyConversionIterator.java       |  96 ++++
 .../PartialMutationSkippingIterator.java        |  54 ++
 .../accumulo/test/EstimateInMemMapOverhead.java | 317 -----------
 .../test/InMemoryMapMemoryUsageTest.java        | 102 ++++
 .../accumulo/test/IntObjectMemoryUsageTest.java |  65 +++
 .../apache/accumulo/test/MemoryUsageTest.java   |  64 +++
 .../accumulo/test/MutationMemoryUsageTest.java  |  98 ++++
 .../accumulo/test/TextMemoryUsageTest.java      |  82 +++
 .../accumulo/test/continuous/HistData.java      |  49 ++
 .../accumulo/test/continuous/Histogram.java     |  30 --
 72 files changed, 4406 insertions(+), 3237 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/core/src/main/java/org/apache/accumulo/core/client/impl/OfflineIterator.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/client/impl/OfflineIterator.java b/core/src/main/java/org/apache/accumulo/core/client/impl/OfflineIterator.java
new file mode 100644
index 0000000..b035e3e
--- /dev/null
+++ b/core/src/main/java/org/apache/accumulo/core/client/impl/OfflineIterator.java
@@ -0,0 +1,340 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.core.client.impl;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map.Entry;
+
+import org.apache.accumulo.core.Constants;
+import org.apache.accumulo.core.client.AccumuloException;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Instance;
+import org.apache.accumulo.core.client.RowIterator;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.client.TableNotFoundException;
+import org.apache.accumulo.core.conf.AccumuloConfiguration;
+import org.apache.accumulo.core.conf.ConfigurationCopy;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.data.Column;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.KeyValue;
+import org.apache.accumulo.core.data.PartialKey;
+import org.apache.accumulo.core.data.Range;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.data.impl.KeyExtent;
+import org.apache.accumulo.core.file.FileOperations;
+import org.apache.accumulo.core.file.FileSKVIterator;
+import org.apache.accumulo.core.iterators.IteratorEnvironment;
+import org.apache.accumulo.core.iterators.IteratorUtil;
+import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
+import org.apache.accumulo.core.iterators.IteratorUtil.IteratorScope;
+import org.apache.accumulo.core.iterators.system.ColumnFamilySkippingIterator;
+import org.apache.accumulo.core.iterators.system.ColumnQualifierFilter;
+import org.apache.accumulo.core.iterators.system.DeletingIterator;
+import org.apache.accumulo.core.iterators.system.MultiIterator;
+import org.apache.accumulo.core.iterators.system.VisibilityFilter;
+import org.apache.accumulo.core.master.state.tables.TableState;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.security.ColumnVisibility;
+import org.apache.accumulo.core.util.CachedConfiguration;
+import org.apache.accumulo.core.util.LocalityGroupUtil;
+import org.apache.accumulo.core.util.Pair;
+import org.apache.accumulo.core.util.UtilWaitThread;
+import org.apache.accumulo.core.volume.VolumeConfiguration;
+import org.apache.commons.lang.NotImplementedException;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.io.Text;
+
+class OfflineIterator implements Iterator<Entry<Key,Value>> {
+
+  static class OfflineIteratorEnvironment implements IteratorEnvironment {
+
+    private final Authorizations authorizations;
+
+    public OfflineIteratorEnvironment(Authorizations auths) {
+      this.authorizations = auths;
+    }
+
+    @Override
+    public SortedKeyValueIterator<Key,Value> reserveMapFileReader(String mapFileName) throws IOException {
+      throw new NotImplementedException();
+    }
+
+    @Override
+    public AccumuloConfiguration getConfig() {
+      return AccumuloConfiguration.getDefaultConfiguration();
+    }
+
+    @Override
+    public IteratorScope getIteratorScope() {
+      return IteratorScope.scan;
+    }
+
+    @Override
+    public boolean isFullMajorCompaction() {
+      return false;
+    }
+
+    private ArrayList<SortedKeyValueIterator<Key,Value>> topLevelIterators = new ArrayList<SortedKeyValueIterator<Key,Value>>();
+
+    @Override
+    public void registerSideChannel(SortedKeyValueIterator<Key,Value> iter) {
+      topLevelIterators.add(iter);
+    }
+
+    @Override
+    public Authorizations getAuthorizations() {
+      return authorizations;
+    }
+
+    SortedKeyValueIterator<Key,Value> getTopLevelIterator(SortedKeyValueIterator<Key,Value> iter) {
+      if (topLevelIterators.isEmpty())
+        return iter;
+      ArrayList<SortedKeyValueIterator<Key,Value>> allIters = new ArrayList<SortedKeyValueIterator<Key,Value>>(topLevelIterators);
+      allIters.add(iter);
+      return new MultiIterator(allIters, false);
+    }
+  }
+
+  private SortedKeyValueIterator<Key,Value> iter;
+  private Range range;
+  private KeyExtent currentExtent;
+  private Connector conn;
+  private String tableId;
+  private Authorizations authorizations;
+  private Instance instance;
+  private ScannerOptions options;
+  private ArrayList<SortedKeyValueIterator<Key,Value>> readers;
+  private AccumuloConfiguration config;
+
+  public OfflineIterator(ScannerOptions options, Instance instance, Credentials credentials, Authorizations authorizations, Text table, Range range) {
+    this.options = new ScannerOptions(options);
+    this.instance = instance;
+    this.range = range;
+
+    if (this.options.fetchedColumns.size() > 0) {
+      this.range = range.bound(this.options.fetchedColumns.first(), this.options.fetchedColumns.last());
+    }
+
+    this.tableId = table.toString();
+    this.authorizations = authorizations;
+    this.readers = new ArrayList<SortedKeyValueIterator<Key,Value>>();
+
+    try {
+      conn = instance.getConnector(credentials.getPrincipal(), credentials.getToken());
+      config = new ConfigurationCopy(conn.instanceOperations().getSiteConfiguration());
+      nextTablet();
+
+      while (iter != null && !iter.hasTop())
+        nextTablet();
+
+    } catch (Exception e) {
+      throw new RuntimeException(e);
+    }
+  }
+
+  @Override
+  public boolean hasNext() {
+    return iter != null && iter.hasTop();
+  }
+
+  @Override
+  public Entry<Key,Value> next() {
+    try {
+      byte[] v = iter.getTopValue().get();
+      // copy just like tablet server does, do this before calling next
+      KeyValue ret = new KeyValue(new Key(iter.getTopKey()), Arrays.copyOf(v, v.length));
+
+      iter.next();
+
+      while (iter != null && !iter.hasTop())
+        nextTablet();
+
+      return ret;
+    } catch (Exception e) {
+      throw new RuntimeException(e);
+    }
+  }
+
+  private void nextTablet() throws TableNotFoundException, AccumuloException, IOException {
+
+    Range nextRange = null;
+
+    if (currentExtent == null) {
+      Text startRow;
+
+      if (range.getStartKey() != null)
+        startRow = range.getStartKey().getRow();
+      else
+        startRow = new Text();
+
+      nextRange = new Range(new KeyExtent(new Text(tableId), startRow, null).getMetadataEntry(), true, null, false);
+    } else {
+
+      if (currentExtent.getEndRow() == null) {
+        iter = null;
+        return;
+      }
+
+      if (range.afterEndKey(new Key(currentExtent.getEndRow()).followingKey(PartialKey.ROW))) {
+        iter = null;
+        return;
+      }
+
+      nextRange = new Range(currentExtent.getMetadataEntry(), false, null, false);
+    }
+
+    List<String> relFiles = new ArrayList<String>();
+
+    Pair<KeyExtent,String> eloc = getTabletFiles(nextRange, relFiles);
+
+    while (eloc.getSecond() != null) {
+      if (Tables.getTableState(instance, tableId) != TableState.OFFLINE) {
+        Tables.clearCache(instance);
+        if (Tables.getTableState(instance, tableId) != TableState.OFFLINE) {
+          throw new AccumuloException("Table is online " + tableId + " cannot scan tablet in offline mode " + eloc.getFirst());
+        }
+      }
+
+      UtilWaitThread.sleep(250);
+
+      eloc = getTabletFiles(nextRange, relFiles);
+    }
+
+    KeyExtent extent = eloc.getFirst();
+
+    if (!extent.getTableId().toString().equals(tableId)) {
+      throw new AccumuloException(" did not find tablets for table " + tableId + " " + extent);
+    }
+
+    if (currentExtent != null && !extent.isPreviousExtent(currentExtent))
+      throw new AccumuloException(" " + currentExtent + " is not previous extent " + extent);
+
+    // Old property is only used to resolve relative paths into absolute paths. For systems upgraded
+    // with relative paths, it's assumed that correct instance.dfs.{uri,dir} is still correct in the configuration
+    @SuppressWarnings("deprecation")
+    String tablesDir = config.get(Property.INSTANCE_DFS_DIR) + Constants.HDFS_TABLES_DIR;
+
+    List<String> absFiles = new ArrayList<String>();
+    for (String relPath : relFiles) {
+      if (relPath.contains(":")) {
+        absFiles.add(relPath);
+      } else {
+        // handle old-style relative paths
+        if (relPath.startsWith("..")) {
+          absFiles.add(tablesDir + relPath.substring(2));
+        } else {
+          absFiles.add(tablesDir + "/" + tableId + relPath);
+        }
+      }
+    }
+
+    iter = createIterator(extent, absFiles);
+    iter.seek(range, LocalityGroupUtil.families(options.fetchedColumns), options.fetchedColumns.size() == 0 ? false : true);
+    currentExtent = extent;
+
+  }
+
+  private Pair<KeyExtent,String> getTabletFiles(Range nextRange, List<String> relFiles) throws TableNotFoundException {
+    Scanner scanner = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
+    scanner.setBatchSize(100);
+    scanner.setRange(nextRange);
+
+    RowIterator rowIter = new RowIterator(scanner);
+    Iterator<Entry<Key,Value>> row = rowIter.next();
+
+    KeyExtent extent = null;
+    String location = null;
+
+    while (row.hasNext()) {
+      Entry<Key,Value> entry = row.next();
+      Key key = entry.getKey();
+
+      if (key.getColumnFamily().equals(DataFileColumnFamily.NAME)) {
+        relFiles.add(key.getColumnQualifier().toString());
+      }
+
+      if (key.getColumnFamily().equals(TabletsSection.CurrentLocationColumnFamily.NAME)
+          || key.getColumnFamily().equals(TabletsSection.FutureLocationColumnFamily.NAME)) {
+        location = entry.getValue().toString();
+      }
+
+      if (TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.hasColumns(key)) {
+        extent = new KeyExtent(key.getRow(), entry.getValue());
+      }
+
+    }
+    return new Pair<KeyExtent,String>(extent, location);
+  }
+
+  private SortedKeyValueIterator<Key,Value> createIterator(KeyExtent extent, List<String> absFiles) throws TableNotFoundException, AccumuloException,
+      IOException {
+
+    // TODO share code w/ tablet - ACCUMULO-1303
+    AccumuloConfiguration acuTableConf = AccumuloConfiguration.getTableConfiguration(conn, tableId);
+
+    Configuration conf = CachedConfiguration.getInstance();
+
+    for (SortedKeyValueIterator<Key,Value> reader : readers) {
+      ((FileSKVIterator) reader).close();
+    }
+
+    readers.clear();
+
+    // TODO need to close files - ACCUMULO-1303
+    for (String file : absFiles) {
+      FileSystem fs = VolumeConfiguration.getVolume(file, conf, config).getFileSystem();
+      FileSKVIterator reader = FileOperations.getInstance().openReader(file, false, fs, conf, acuTableConf, null, null);
+      readers.add(reader);
+    }
+
+    MultiIterator multiIter = new MultiIterator(readers, extent);
+
+    OfflineIteratorEnvironment iterEnv = new OfflineIteratorEnvironment(authorizations);
+
+    DeletingIterator delIter = new DeletingIterator(multiIter, false);
+
+    ColumnFamilySkippingIterator cfsi = new ColumnFamilySkippingIterator(delIter);
+
+    ColumnQualifierFilter colFilter = new ColumnQualifierFilter(cfsi, new HashSet<Column>(options.fetchedColumns));
+
+    byte[] defaultSecurityLabel;
+
+    ColumnVisibility cv = new ColumnVisibility(acuTableConf.get(Property.TABLE_DEFAULT_SCANTIME_VISIBILITY));
+    defaultSecurityLabel = cv.getExpression();
+
+    VisibilityFilter visFilter = new VisibilityFilter(colFilter, authorizations, defaultSecurityLabel);
+
+    return iterEnv.getTopLevelIterator(IteratorUtil.loadIterators(IteratorScope.scan, visFilter, extent, acuTableConf, options.serverSideIteratorList,
+        options.serverSideIteratorOptions, iterEnv, false));
+  }
+
+  @Override
+  public void remove() {
+    throw new UnsupportedOperationException();
+  }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/core/src/main/java/org/apache/accumulo/core/client/impl/OfflineScanner.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/client/impl/OfflineScanner.java b/core/src/main/java/org/apache/accumulo/core/client/impl/OfflineScanner.java
index 2f31319..427a7cc 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/impl/OfflineScanner.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/impl/OfflineScanner.java
@@ -18,332 +18,18 @@ package org.apache.accumulo.core.client.impl;
 
 import static com.google.common.base.Preconditions.checkArgument;
 
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.HashSet;
 import java.util.Iterator;
-import java.util.List;
 import java.util.Map.Entry;
 
 import org.apache.accumulo.core.Constants;
-import org.apache.accumulo.core.client.AccumuloException;
-import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.Instance;
-import org.apache.accumulo.core.client.RowIterator;
 import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.conf.AccumuloConfiguration;
-import org.apache.accumulo.core.conf.ConfigurationCopy;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.data.Column;
 import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.KeyValue;
-import org.apache.accumulo.core.data.PartialKey;
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.data.impl.KeyExtent;
-import org.apache.accumulo.core.file.FileOperations;
-import org.apache.accumulo.core.file.FileSKVIterator;
-import org.apache.accumulo.core.iterators.IteratorEnvironment;
-import org.apache.accumulo.core.iterators.IteratorUtil;
-import org.apache.accumulo.core.iterators.IteratorUtil.IteratorScope;
-import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
-import org.apache.accumulo.core.iterators.system.ColumnFamilySkippingIterator;
-import org.apache.accumulo.core.iterators.system.ColumnQualifierFilter;
-import org.apache.accumulo.core.iterators.system.DeletingIterator;
-import org.apache.accumulo.core.iterators.system.MultiIterator;
-import org.apache.accumulo.core.iterators.system.VisibilityFilter;
-import org.apache.accumulo.core.master.state.tables.TableState;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
 import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.security.ColumnVisibility;
-import org.apache.accumulo.core.util.CachedConfiguration;
-import org.apache.accumulo.core.util.LocalityGroupUtil;
-import org.apache.accumulo.core.util.Pair;
-import org.apache.accumulo.core.util.UtilWaitThread;
-import org.apache.accumulo.core.volume.VolumeConfiguration;
-import org.apache.commons.lang.NotImplementedException;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.io.Text;
 
-class OfflineIterator implements Iterator<Entry<Key,Value>> {
-
-  static class OfflineIteratorEnvironment implements IteratorEnvironment {
-
-    private final Authorizations authorizations;
-
-    public OfflineIteratorEnvironment(Authorizations auths) {
-      this.authorizations = auths;
-    }
-
-    @Override
-    public SortedKeyValueIterator<Key,Value> reserveMapFileReader(String mapFileName) throws IOException {
-      throw new NotImplementedException();
-    }
-
-    @Override
-    public AccumuloConfiguration getConfig() {
-      return AccumuloConfiguration.getDefaultConfiguration();
-    }
-
-    @Override
-    public IteratorScope getIteratorScope() {
-      return IteratorScope.scan;
-    }
-
-    @Override
-    public boolean isFullMajorCompaction() {
-      return false;
-    }
-
-    private ArrayList<SortedKeyValueIterator<Key,Value>> topLevelIterators = new ArrayList<SortedKeyValueIterator<Key,Value>>();
-
-    @Override
-    public void registerSideChannel(SortedKeyValueIterator<Key,Value> iter) {
-      topLevelIterators.add(iter);
-    }
-
-    @Override
-    public Authorizations getAuthorizations() {
-      return authorizations;
-    }
-
-    SortedKeyValueIterator<Key,Value> getTopLevelIterator(SortedKeyValueIterator<Key,Value> iter) {
-      if (topLevelIterators.isEmpty())
-        return iter;
-      ArrayList<SortedKeyValueIterator<Key,Value>> allIters = new ArrayList<SortedKeyValueIterator<Key,Value>>(topLevelIterators);
-      allIters.add(iter);
-      return new MultiIterator(allIters, false);
-    }
-  }
-
-  private SortedKeyValueIterator<Key,Value> iter;
-  private Range range;
-  private KeyExtent currentExtent;
-  private Connector conn;
-  private String tableId;
-  private Authorizations authorizations;
-  private Instance instance;
-  private ScannerOptions options;
-  private ArrayList<SortedKeyValueIterator<Key,Value>> readers;
-  private AccumuloConfiguration config;
-
-  public OfflineIterator(ScannerOptions options, Instance instance, Credentials credentials, Authorizations authorizations, Text table, Range range) {
-    this.options = new ScannerOptions(options);
-    this.instance = instance;
-    this.range = range;
-
-    if (this.options.fetchedColumns.size() > 0) {
-      this.range = range.bound(this.options.fetchedColumns.first(), this.options.fetchedColumns.last());
-    }
-
-    this.tableId = table.toString();
-    this.authorizations = authorizations;
-    this.readers = new ArrayList<SortedKeyValueIterator<Key,Value>>();
-
-    try {
-      conn = instance.getConnector(credentials.getPrincipal(), credentials.getToken());
-      config = new ConfigurationCopy(conn.instanceOperations().getSiteConfiguration());
-      nextTablet();
-
-      while (iter != null && !iter.hasTop())
-        nextTablet();
-
-    } catch (Exception e) {
-      throw new RuntimeException(e);
-    }
-  }
-
-  @Override
-  public boolean hasNext() {
-    return iter != null && iter.hasTop();
-  }
-
-  @Override
-  public Entry<Key,Value> next() {
-    try {
-      byte[] v = iter.getTopValue().get();
-      // copy just like tablet server does, do this before calling next
-      KeyValue ret = new KeyValue(new Key(iter.getTopKey()), Arrays.copyOf(v, v.length));
-
-      iter.next();
-
-      while (iter != null && !iter.hasTop())
-        nextTablet();
-
-      return ret;
-    } catch (Exception e) {
-      throw new RuntimeException(e);
-    }
-  }
-
-  private void nextTablet() throws TableNotFoundException, AccumuloException, IOException {
-
-    Range nextRange = null;
-
-    if (currentExtent == null) {
-      Text startRow;
-
-      if (range.getStartKey() != null)
-        startRow = range.getStartKey().getRow();
-      else
-        startRow = new Text();
-
-      nextRange = new Range(new KeyExtent(new Text(tableId), startRow, null).getMetadataEntry(), true, null, false);
-    } else {
-
-      if (currentExtent.getEndRow() == null) {
-        iter = null;
-        return;
-      }
-
-      if (range.afterEndKey(new Key(currentExtent.getEndRow()).followingKey(PartialKey.ROW))) {
-        iter = null;
-        return;
-      }
-
-      nextRange = new Range(currentExtent.getMetadataEntry(), false, null, false);
-    }
-
-    List<String> relFiles = new ArrayList<String>();
-
-    Pair<KeyExtent,String> eloc = getTabletFiles(nextRange, relFiles);
-
-    while (eloc.getSecond() != null) {
-      if (Tables.getTableState(instance, tableId) != TableState.OFFLINE) {
-        Tables.clearCache(instance);
-        if (Tables.getTableState(instance, tableId) != TableState.OFFLINE) {
-          throw new AccumuloException("Table is online " + tableId + " cannot scan tablet in offline mode " + eloc.getFirst());
-        }
-      }
-
-      UtilWaitThread.sleep(250);
-
-      eloc = getTabletFiles(nextRange, relFiles);
-    }
-
-    KeyExtent extent = eloc.getFirst();
-
-    if (!extent.getTableId().toString().equals(tableId)) {
-      throw new AccumuloException(" did not find tablets for table " + tableId + " " + extent);
-    }
-
-    if (currentExtent != null && !extent.isPreviousExtent(currentExtent))
-      throw new AccumuloException(" " + currentExtent + " is not previous extent " + extent);
-
-    // Old property is only used to resolve relative paths into absolute paths. For systems upgraded
-    // with relative paths, it's assumed that correct instance.dfs.{uri,dir} is still correct in the configuration
-    @SuppressWarnings("deprecation")
-    String tablesDir = config.get(Property.INSTANCE_DFS_DIR) + Constants.HDFS_TABLES_DIR;
-
-    List<String> absFiles = new ArrayList<String>();
-    for (String relPath : relFiles) {
-      if (relPath.contains(":")) {
-        absFiles.add(relPath);
-      } else {
-        // handle old-style relative paths
-        if (relPath.startsWith("..")) {
-          absFiles.add(tablesDir + relPath.substring(2));
-        } else {
-          absFiles.add(tablesDir + "/" + tableId + relPath);
-        }
-      }
-    }
-
-    iter = createIterator(extent, absFiles);
-    iter.seek(range, LocalityGroupUtil.families(options.fetchedColumns), options.fetchedColumns.size() == 0 ? false : true);
-    currentExtent = extent;
-
-  }
-
-  private Pair<KeyExtent,String> getTabletFiles(Range nextRange, List<String> relFiles) throws TableNotFoundException {
-    Scanner scanner = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
-    scanner.setBatchSize(100);
-    scanner.setRange(nextRange);
-
-    RowIterator rowIter = new RowIterator(scanner);
-    Iterator<Entry<Key,Value>> row = rowIter.next();
-
-    KeyExtent extent = null;
-    String location = null;
-
-    while (row.hasNext()) {
-      Entry<Key,Value> entry = row.next();
-      Key key = entry.getKey();
-
-      if (key.getColumnFamily().equals(DataFileColumnFamily.NAME)) {
-        relFiles.add(key.getColumnQualifier().toString());
-      }
-
-      if (key.getColumnFamily().equals(TabletsSection.CurrentLocationColumnFamily.NAME)
-          || key.getColumnFamily().equals(TabletsSection.FutureLocationColumnFamily.NAME)) {
-        location = entry.getValue().toString();
-      }
-
-      if (TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.hasColumns(key)) {
-        extent = new KeyExtent(key.getRow(), entry.getValue());
-      }
-
-    }
-    return new Pair<KeyExtent,String>(extent, location);
-  }
-
-  private SortedKeyValueIterator<Key,Value> createIterator(KeyExtent extent, List<String> absFiles) throws TableNotFoundException, AccumuloException,
-      IOException {
-
-    // TODO share code w/ tablet - ACCUMULO-1303
-    AccumuloConfiguration acuTableConf = AccumuloConfiguration.getTableConfiguration(conn, tableId);
-
-    Configuration conf = CachedConfiguration.getInstance();
-
-    for (SortedKeyValueIterator<Key,Value> reader : readers) {
-      ((FileSKVIterator) reader).close();
-    }
-
-    readers.clear();
-
-    // TODO need to close files - ACCUMULO-1303
-    for (String file : absFiles) {
-      FileSystem fs = VolumeConfiguration.getVolume(file, conf, config).getFileSystem();
-      FileSKVIterator reader = FileOperations.getInstance().openReader(file, false, fs, conf, acuTableConf, null, null);
-      readers.add(reader);
-    }
-
-    MultiIterator multiIter = new MultiIterator(readers, extent);
-
-    OfflineIteratorEnvironment iterEnv = new OfflineIteratorEnvironment(authorizations);
-
-    DeletingIterator delIter = new DeletingIterator(multiIter, false);
-
-    ColumnFamilySkippingIterator cfsi = new ColumnFamilySkippingIterator(delIter);
-
-    ColumnQualifierFilter colFilter = new ColumnQualifierFilter(cfsi, new HashSet<Column>(options.fetchedColumns));
-
-    byte[] defaultSecurityLabel;
-
-    ColumnVisibility cv = new ColumnVisibility(acuTableConf.get(Property.TABLE_DEFAULT_SCANTIME_VISIBILITY));
-    defaultSecurityLabel = cv.getExpression();
-
-    VisibilityFilter visFilter = new VisibilityFilter(colFilter, authorizations, defaultSecurityLabel);
-
-    return iterEnv.getTopLevelIterator(IteratorUtil.loadIterators(IteratorScope.scan, visFilter, extent, acuTableConf, options.serverSideIteratorList,
-        options.serverSideIteratorOptions, iterEnv, false));
-  }
-
-  @Override
-  public void remove() {
-    throw new UnsupportedOperationException();
-  }
-
-}
-
-/**
- *
- */
 public class OfflineScanner extends ScannerOptions implements Scanner {
 
   private int batchSize;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/core/src/main/java/org/apache/accumulo/core/compaction/CompactionSettings.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/compaction/CompactionSettings.java b/core/src/main/java/org/apache/accumulo/core/compaction/CompactionSettings.java
index a45a692..43f8c0f 100644
--- a/core/src/main/java/org/apache/accumulo/core/compaction/CompactionSettings.java
+++ b/core/src/main/java/org/apache/accumulo/core/compaction/CompactionSettings.java
@@ -18,48 +18,6 @@
 package org.apache.accumulo.core.compaction;
 
 import java.util.Map;
-import java.util.regex.Pattern;
-
-import org.apache.accumulo.core.conf.AccumuloConfiguration;
-
-import com.google.common.base.Preconditions;
-
-interface Type {
-  String convert(String str);
-}
-
-class SizeType implements Type {
-  @Override
-  public String convert(String str) {
-    long size = AccumuloConfiguration.getMemoryInBytes(str);
-    Preconditions.checkArgument(size > 0);
-    return Long.toString(size);
-  }
-}
-
-class PatternType implements Type {
-  @Override
-  public String convert(String str) {
-    // ensure it compiles
-    Pattern.compile(str);
-    return str;
-  }
-}
-
-class UIntType implements Type {
-  @Override
-  public String convert(String str) {
-    Preconditions.checkArgument(Integer.parseInt(str) > 0);
-    return str;
-  }
-}
-
-class StringType implements Type {
-  @Override
-  public String convert(String str) {
-    return str;
-  }
-}
 
 public enum CompactionSettings {
 

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/core/src/main/java/org/apache/accumulo/core/compaction/PatternType.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/compaction/PatternType.java b/core/src/main/java/org/apache/accumulo/core/compaction/PatternType.java
new file mode 100644
index 0000000..c52dcb4
--- /dev/null
+++ b/core/src/main/java/org/apache/accumulo/core/compaction/PatternType.java
@@ -0,0 +1,28 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.core.compaction;
+
+import java.util.regex.Pattern;
+
+class PatternType implements Type {
+  @Override
+  public String convert(String str) {
+    // ensure it compiles
+    Pattern.compile(str);
+    return str;
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/core/src/main/java/org/apache/accumulo/core/compaction/SizeType.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/compaction/SizeType.java b/core/src/main/java/org/apache/accumulo/core/compaction/SizeType.java
new file mode 100644
index 0000000..c2af401
--- /dev/null
+++ b/core/src/main/java/org/apache/accumulo/core/compaction/SizeType.java
@@ -0,0 +1,30 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.core.compaction;
+
+import org.apache.accumulo.core.conf.AccumuloConfiguration;
+
+import com.google.common.base.Preconditions;
+
+class SizeType implements Type {
+  @Override
+  public String convert(String str) {
+    long size = AccumuloConfiguration.getMemoryInBytes(str);
+    Preconditions.checkArgument(size > 0);
+    return Long.toString(size);
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/core/src/main/java/org/apache/accumulo/core/compaction/StringType.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/compaction/StringType.java b/core/src/main/java/org/apache/accumulo/core/compaction/StringType.java
new file mode 100644
index 0000000..7098a5c
--- /dev/null
+++ b/core/src/main/java/org/apache/accumulo/core/compaction/StringType.java
@@ -0,0 +1,24 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.core.compaction;
+
+class StringType implements Type {
+  @Override
+  public String convert(String str) {
+    return str;
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/core/src/main/java/org/apache/accumulo/core/compaction/Type.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/compaction/Type.java b/core/src/main/java/org/apache/accumulo/core/compaction/Type.java
new file mode 100644
index 0000000..d8f81a6
--- /dev/null
+++ b/core/src/main/java/org/apache/accumulo/core/compaction/Type.java
@@ -0,0 +1,21 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.core.compaction;
+
+interface Type {
+  String convert(String str);
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/core/src/main/java/org/apache/accumulo/core/compaction/UIntType.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/compaction/UIntType.java b/core/src/main/java/org/apache/accumulo/core/compaction/UIntType.java
new file mode 100644
index 0000000..c8880fc
--- /dev/null
+++ b/core/src/main/java/org/apache/accumulo/core/compaction/UIntType.java
@@ -0,0 +1,27 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.core.compaction;
+
+import com.google.common.base.Preconditions;
+
+class UIntType implements Type {
+  @Override
+  public String convert(String str) {
+    Preconditions.checkArgument(Integer.parseInt(str) > 0);
+    return str;
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/core/src/main/java/org/apache/accumulo/core/file/DispatchingFileFactory.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/file/DispatchingFileFactory.java b/core/src/main/java/org/apache/accumulo/core/file/DispatchingFileFactory.java
new file mode 100644
index 0000000..128a931
--- /dev/null
+++ b/core/src/main/java/org/apache/accumulo/core/file/DispatchingFileFactory.java
@@ -0,0 +1,136 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.core.file;
+
+import java.io.IOException;
+import java.util.Set;
+
+import org.apache.accumulo.core.Constants;
+import org.apache.accumulo.core.conf.AccumuloConfiguration;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.data.ByteSequence;
+import org.apache.accumulo.core.data.Range;
+import org.apache.accumulo.core.file.blockfile.cache.BlockCache;
+import org.apache.accumulo.core.file.map.MapFileOperations;
+import org.apache.accumulo.core.file.rfile.RFile;
+import org.apache.accumulo.core.file.rfile.RFileOperations;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+
+class DispatchingFileFactory extends FileOperations {
+
+  private FileOperations findFileFactory(String file) {
+
+    Path p = new Path(file);
+    String name = p.getName();
+
+    if (name.startsWith(Constants.MAPFILE_EXTENSION + "_")) {
+      return new MapFileOperations();
+    }
+    String[] sp = name.split("\\.");
+
+    if (sp.length < 2) {
+      throw new IllegalArgumentException("File name " + name + " has no extension");
+    }
+
+    String extension = sp[sp.length - 1];
+
+    if (extension.equals(Constants.MAPFILE_EXTENSION) || extension.equals(Constants.MAPFILE_EXTENSION + "_tmp")) {
+      return new MapFileOperations();
+    } else if (extension.equals(RFile.EXTENSION) || extension.equals(RFile.EXTENSION + "_tmp")) {
+      return new RFileOperations();
+    } else {
+      throw new IllegalArgumentException("File type " + extension + " not supported");
+    }
+  }
+
+  @Override
+  public FileSKVIterator openIndex(String file, FileSystem fs, Configuration conf, AccumuloConfiguration acuconf) throws IOException {
+    return findFileFactory(file).openIndex(file, fs, conf, acuconf, null, null);
+  }
+
+  @Override
+  public FileSKVIterator openReader(String file, boolean seekToBeginning, FileSystem fs, Configuration conf, AccumuloConfiguration acuconf) throws IOException {
+    FileSKVIterator iter = findFileFactory(file).openReader(file, seekToBeginning, fs, conf, acuconf, null, null);
+    if (acuconf.getBoolean(Property.TABLE_BLOOM_ENABLED)) {
+      return new BloomFilterLayer.Reader(iter, acuconf);
+    }
+    return iter;
+  }
+
+  @Override
+  public FileSKVWriter openWriter(String file, FileSystem fs, Configuration conf, AccumuloConfiguration acuconf) throws IOException {
+    FileSKVWriter writer = findFileFactory(file).openWriter(file, fs, conf, acuconf);
+    if (acuconf.getBoolean(Property.TABLE_BLOOM_ENABLED)) {
+      return new BloomFilterLayer.Writer(writer, acuconf);
+    }
+    return writer;
+  }
+
+  @Override
+  public long getFileSize(String file, FileSystem fs, Configuration conf, AccumuloConfiguration acuconf) throws IOException {
+    return findFileFactory(file).getFileSize(file, fs, conf, acuconf);
+  }
+
+  @Override
+  public FileSKVIterator openReader(String file, Range range, Set<ByteSequence> columnFamilies, boolean inclusive, FileSystem fs, Configuration conf,
+      AccumuloConfiguration tableConf) throws IOException {
+    return findFileFactory(file).openReader(file, range, columnFamilies, inclusive, fs, conf, tableConf, null, null);
+  }
+
+  @Override
+  public FileSKVIterator openReader(String file, Range range, Set<ByteSequence> columnFamilies, boolean inclusive, FileSystem fs, Configuration conf,
+      AccumuloConfiguration tableConf, BlockCache dataCache, BlockCache indexCache) throws IOException {
+
+    if (!tableConf.getBoolean(Property.TABLE_INDEXCACHE_ENABLED))
+      indexCache = null;
+    if (!tableConf.getBoolean(Property.TABLE_BLOCKCACHE_ENABLED))
+      dataCache = null;
+
+    return findFileFactory(file).openReader(file, range, columnFamilies, inclusive, fs, conf, tableConf, dataCache, indexCache);
+  }
+
+  @Override
+  public FileSKVIterator openReader(String file, boolean seekToBeginning, FileSystem fs, Configuration conf, AccumuloConfiguration acuconf,
+      BlockCache dataCache, BlockCache indexCache) throws IOException {
+
+    if (!acuconf.getBoolean(Property.TABLE_INDEXCACHE_ENABLED))
+      indexCache = null;
+    if (!acuconf.getBoolean(Property.TABLE_BLOCKCACHE_ENABLED))
+      dataCache = null;
+
+    FileSKVIterator iter = findFileFactory(file).openReader(file, seekToBeginning, fs, conf, acuconf, dataCache, indexCache);
+    if (acuconf.getBoolean(Property.TABLE_BLOOM_ENABLED)) {
+      return new BloomFilterLayer.Reader(iter, acuconf);
+    }
+    return iter;
+  }
+
+  @Override
+  public FileSKVIterator openIndex(String file, FileSystem fs, Configuration conf, AccumuloConfiguration acuconf, BlockCache dCache, BlockCache iCache)
+      throws IOException {
+
+    if (!acuconf.getBoolean(Property.TABLE_INDEXCACHE_ENABLED))
+      iCache = null;
+    if (!acuconf.getBoolean(Property.TABLE_BLOCKCACHE_ENABLED))
+      dCache = null;
+
+    return findFileFactory(file).openIndex(file, fs, conf, acuconf, dCache, iCache);
+  }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/core/src/main/java/org/apache/accumulo/core/file/FileOperations.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/file/FileOperations.java b/core/src/main/java/org/apache/accumulo/core/file/FileOperations.java
index 78d0407..3798453 100644
--- a/core/src/main/java/org/apache/accumulo/core/file/FileOperations.java
+++ b/core/src/main/java/org/apache/accumulo/core/file/FileOperations.java
@@ -27,115 +27,9 @@ import org.apache.accumulo.core.conf.Property;
 import org.apache.accumulo.core.data.ByteSequence;
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.file.blockfile.cache.BlockCache;
-import org.apache.accumulo.core.file.map.MapFileOperations;
 import org.apache.accumulo.core.file.rfile.RFile;
-import org.apache.accumulo.core.file.rfile.RFileOperations;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-
-class DispatchingFileFactory extends FileOperations {
-
-  private FileOperations findFileFactory(String file) {
-
-    Path p = new Path(file);
-    String name = p.getName();
-
-    if (name.startsWith(Constants.MAPFILE_EXTENSION + "_")) {
-      return new MapFileOperations();
-    }
-    String[] sp = name.split("\\.");
-
-    if (sp.length < 2) {
-      throw new IllegalArgumentException("File name " + name + " has no extension");
-    }
-
-    String extension = sp[sp.length - 1];
-
-    if (extension.equals(Constants.MAPFILE_EXTENSION) || extension.equals(Constants.MAPFILE_EXTENSION + "_tmp")) {
-      return new MapFileOperations();
-    } else if (extension.equals(RFile.EXTENSION) || extension.equals(RFile.EXTENSION + "_tmp")) {
-      return new RFileOperations();
-    } else {
-      throw new IllegalArgumentException("File type " + extension + " not supported");
-    }
-  }
-
-  @Override
-  public FileSKVIterator openIndex(String file, FileSystem fs, Configuration conf, AccumuloConfiguration acuconf) throws IOException {
-    return findFileFactory(file).openIndex(file, fs, conf, acuconf, null, null);
-  }
-
-  @Override
-  public FileSKVIterator openReader(String file, boolean seekToBeginning, FileSystem fs, Configuration conf, AccumuloConfiguration acuconf) throws IOException {
-    FileSKVIterator iter = findFileFactory(file).openReader(file, seekToBeginning, fs, conf, acuconf, null, null);
-    if (acuconf.getBoolean(Property.TABLE_BLOOM_ENABLED)) {
-      return new BloomFilterLayer.Reader(iter, acuconf);
-    }
-    return iter;
-  }
-
-  @Override
-  public FileSKVWriter openWriter(String file, FileSystem fs, Configuration conf, AccumuloConfiguration acuconf) throws IOException {
-    FileSKVWriter writer = findFileFactory(file).openWriter(file, fs, conf, acuconf);
-    if (acuconf.getBoolean(Property.TABLE_BLOOM_ENABLED)) {
-      return new BloomFilterLayer.Writer(writer, acuconf);
-    }
-    return writer;
-  }
-
-  @Override
-  public long getFileSize(String file, FileSystem fs, Configuration conf, AccumuloConfiguration acuconf) throws IOException {
-    return findFileFactory(file).getFileSize(file, fs, conf, acuconf);
-  }
-
-  @Override
-  public FileSKVIterator openReader(String file, Range range, Set<ByteSequence> columnFamilies, boolean inclusive, FileSystem fs, Configuration conf,
-      AccumuloConfiguration tableConf) throws IOException {
-    return findFileFactory(file).openReader(file, range, columnFamilies, inclusive, fs, conf, tableConf, null, null);
-  }
-
-  @Override
-  public FileSKVIterator openReader(String file, Range range, Set<ByteSequence> columnFamilies, boolean inclusive, FileSystem fs, Configuration conf,
-      AccumuloConfiguration tableConf, BlockCache dataCache, BlockCache indexCache) throws IOException {
-
-    if (!tableConf.getBoolean(Property.TABLE_INDEXCACHE_ENABLED))
-      indexCache = null;
-    if (!tableConf.getBoolean(Property.TABLE_BLOCKCACHE_ENABLED))
-      dataCache = null;
-
-    return findFileFactory(file).openReader(file, range, columnFamilies, inclusive, fs, conf, tableConf, dataCache, indexCache);
-  }
-
-  @Override
-  public FileSKVIterator openReader(String file, boolean seekToBeginning, FileSystem fs, Configuration conf, AccumuloConfiguration acuconf,
-      BlockCache dataCache, BlockCache indexCache) throws IOException {
-
-    if (!acuconf.getBoolean(Property.TABLE_INDEXCACHE_ENABLED))
-      indexCache = null;
-    if (!acuconf.getBoolean(Property.TABLE_BLOCKCACHE_ENABLED))
-      dataCache = null;
-
-    FileSKVIterator iter = findFileFactory(file).openReader(file, seekToBeginning, fs, conf, acuconf, dataCache, indexCache);
-    if (acuconf.getBoolean(Property.TABLE_BLOOM_ENABLED)) {
-      return new BloomFilterLayer.Reader(iter, acuconf);
-    }
-    return iter;
-  }
-
-  @Override
-  public FileSKVIterator openIndex(String file, FileSystem fs, Configuration conf, AccumuloConfiguration acuconf, BlockCache dCache, BlockCache iCache)
-      throws IOException {
-
-    if (!acuconf.getBoolean(Property.TABLE_INDEXCACHE_ENABLED))
-      iCache = null;
-    if (!acuconf.getBoolean(Property.TABLE_BLOCKCACHE_ENABLED))
-      dCache = null;
-
-    return findFileFactory(file).openIndex(file, fs, conf, acuconf, dCache, iCache);
-  }
-
-}
 
 public abstract class FileOperations {
 

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/core/src/test/java/org/apache/accumulo/core/cli/TestClientOpts.java
----------------------------------------------------------------------
diff --git a/core/src/test/java/org/apache/accumulo/core/cli/TestClientOpts.java b/core/src/test/java/org/apache/accumulo/core/cli/TestClientOpts.java
index f0fdcca..65df5c9 100644
--- a/core/src/test/java/org/apache/accumulo/core/cli/TestClientOpts.java
+++ b/core/src/test/java/org/apache/accumulo/core/cli/TestClientOpts.java
@@ -263,5 +263,10 @@ public class TestClientOpts {
     public boolean equals(Object o) {
       return o instanceof EmptyToken;
     }
+
+    @Override
+    public int hashCode() {
+      return 0;
+    }
   }
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/CountingVerifyingReceiver.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/CountingVerifyingReceiver.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/CountingVerifyingReceiver.java
new file mode 100644
index 0000000..873f886
--- /dev/null
+++ b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/CountingVerifyingReceiver.java
@@ -0,0 +1,64 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.examples.simple.client;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+
+import java.util.Arrays;
+import java.util.HashMap;
+
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Value;
+import org.apache.hadoop.io.Text;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Internal class used to verify validity of data read.
+ */
+class CountingVerifyingReceiver {
+  private static final Logger log = LoggerFactory.getLogger(CountingVerifyingReceiver.class);
+
+  long count = 0;
+  int expectedValueSize = 0;
+  HashMap<Text,Boolean> expectedRows;
+
+  CountingVerifyingReceiver(HashMap<Text,Boolean> expectedRows, int expectedValueSize) {
+    this.expectedRows = expectedRows;
+    this.expectedValueSize = expectedValueSize;
+  }
+
+  public void receive(Key key, Value value) {
+
+    String row = key.getRow().toString();
+    long rowid = Integer.parseInt(row.split("_")[1]);
+
+    byte expectedValue[] = RandomBatchWriter.createValue(rowid, expectedValueSize);
+
+    if (!Arrays.equals(expectedValue, value.get())) {
+      log.error("Got unexpected value for " + key + " expected : " + new String(expectedValue, UTF_8) + " got : " + new String(value.get(), UTF_8));
+    }
+
+    if (!expectedRows.containsKey(key.getRow())) {
+      log.error("Got unexpected key " + key);
+    } else {
+      expectedRows.put(key.getRow(), true);
+    }
+
+    count++;
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/RandomBatchScanner.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/RandomBatchScanner.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/RandomBatchScanner.java
index 6f8b485..a43b97d 100644
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/RandomBatchScanner.java
+++ b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/RandomBatchScanner.java
@@ -16,10 +16,8 @@
  */
 package org.apache.accumulo.examples.simple.client;
 
-import static java.nio.charset.StandardCharsets.UTF_8;
 import static org.apache.accumulo.examples.simple.client.RandomBatchWriter.abs;
 
-import java.util.Arrays;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Map.Entry;
@@ -43,42 +41,6 @@ import org.slf4j.LoggerFactory;
 import com.beust.jcommander.Parameter;
 
 /**
- * Internal class used to verify validity of data read.
- */
-class CountingVerifyingReceiver {
-  private static final Logger log = LoggerFactory.getLogger(CountingVerifyingReceiver.class);
-
-  long count = 0;
-  int expectedValueSize = 0;
-  HashMap<Text,Boolean> expectedRows;
-
-  CountingVerifyingReceiver(HashMap<Text,Boolean> expectedRows, int expectedValueSize) {
-    this.expectedRows = expectedRows;
-    this.expectedValueSize = expectedValueSize;
-  }
-
-  public void receive(Key key, Value value) {
-
-    String row = key.getRow().toString();
-    long rowid = Integer.parseInt(row.split("_")[1]);
-
-    byte expectedValue[] = RandomBatchWriter.createValue(rowid, expectedValueSize);
-
-    if (!Arrays.equals(expectedValue, value.get())) {
-      log.error("Got unexpected value for " + key + " expected : " + new String(expectedValue, UTF_8) + " got : " + new String(value.get(), UTF_8));
-    }
-
-    if (!expectedRows.containsKey(key.getRow())) {
-      log.error("Got unexpected key " + key);
-    } else {
-      expectedRows.put(key.getRow(), true);
-    }
-
-    count++;
-  }
-}
-
-/**
  * Simple example for reading random batches of data from Accumulo. See docs/examples/README.batch for instructions.
  */
 public class RandomBatchScanner {

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 0bcc689..f680f84 100644
--- a/pom.xml
+++ b/pom.xml
@@ -946,6 +946,7 @@
                 <property name="eachLine" value="true" />
               </module>
               <module name="TreeWalker">
+                <module name="OneTopLevelClass" />
                 <module name="RegexpSinglelineJava">
                   <property name="format" value="\s+$" />
                   <property name="message" value="Line has trailing whitespace." />

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/server/master/src/main/java/org/apache/accumulo/master/tableOps/BulkImport.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/BulkImport.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/BulkImport.java
index 7f83988..031a80c 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/BulkImport.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/BulkImport.java
@@ -16,71 +16,34 @@
  */
 package org.apache.accumulo.master.tableOps;
 
-import static java.nio.charset.StandardCharsets.UTF_8;
-
-import java.io.BufferedReader;
-import java.io.BufferedWriter;
 import java.io.FileNotFoundException;
 import java.io.IOException;
-import java.io.InputStreamReader;
-import java.io.OutputStreamWriter;
 import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
 import java.util.List;
-import java.util.Map.Entry;
-import java.util.Set;
 import java.util.concurrent.Callable;
-import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Future;
-import java.util.concurrent.ThreadPoolExecutor;
 import java.util.concurrent.TimeUnit;
 
 import org.apache.accumulo.core.Constants;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.IsolatedScanner;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.impl.ServerClient;
 import org.apache.accumulo.core.client.impl.Tables;
-import org.apache.accumulo.core.client.impl.thrift.ClientService;
-import org.apache.accumulo.core.client.impl.thrift.ClientService.Client;
 import org.apache.accumulo.core.client.impl.thrift.TableOperation;
 import org.apache.accumulo.core.client.impl.thrift.TableOperationExceptionType;
 import org.apache.accumulo.core.client.impl.thrift.ThriftTableOperationException;
-import org.apache.accumulo.core.conf.AccumuloConfiguration;
 import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.data.impl.KeyExtent;
 import org.apache.accumulo.core.file.FileOperations;
 import org.apache.accumulo.core.master.state.tables.TableState;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.trace.Tracer;
-import org.apache.accumulo.core.util.Pair;
 import org.apache.accumulo.core.util.SimpleThreadPool;
 import org.apache.accumulo.core.util.UtilWaitThread;
 import org.apache.accumulo.fate.Repo;
 import org.apache.accumulo.master.Master;
 import org.apache.accumulo.server.ServerConstants;
-import org.apache.accumulo.server.fs.FileRef;
 import org.apache.accumulo.server.fs.VolumeManager;
-import org.apache.accumulo.server.master.LiveTServerSet.TServerConnection;
-import org.apache.accumulo.server.master.state.TServerInstance;
 import org.apache.accumulo.server.tablets.UniqueNameAllocator;
 import org.apache.accumulo.server.util.MetadataTableUtil;
-import org.apache.accumulo.server.zookeeper.DistributedWorkQueue;
 import org.apache.accumulo.server.zookeeper.TransactionWatcher.ZooArbitrator;
-import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.MapFile;
-import org.apache.hadoop.io.Text;
-import org.apache.htrace.wrappers.TraceExecutorService;
-import org.apache.thrift.TException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -302,329 +265,3 @@ public class BulkImport extends MasterRepo {
     Utils.getReadLock(tableId, tid).unlock();
   }
 }
-
-class CleanUpBulkImport extends MasterRepo {
-
-  private static final long serialVersionUID = 1L;
-
-  private static final Logger log = LoggerFactory.getLogger(CleanUpBulkImport.class);
-
-  private String tableId;
-  private String source;
-  private String bulk;
-  private String error;
-
-  public CleanUpBulkImport(String tableId, String source, String bulk, String error) {
-    this.tableId = tableId;
-    this.source = source;
-    this.bulk = bulk;
-    this.error = error;
-  }
-
-  @Override
-  public Repo<Master> call(long tid, Master master) throws Exception {
-    log.debug("removing the bulk processing flag file in " + bulk);
-    Path bulkDir = new Path(bulk);
-    MetadataTableUtil.removeBulkLoadInProgressFlag(master, "/" + bulkDir.getParent().getName() + "/" + bulkDir.getName());
-    MetadataTableUtil.addDeleteEntry(master, tableId, bulkDir.toString());
-    log.debug("removing the metadata table markers for loaded files");
-    Connector conn = master.getConnector();
-    MetadataTableUtil.removeBulkLoadEntries(conn, tableId, tid);
-    log.debug("releasing HDFS reservations for " + source + " and " + error);
-    Utils.unreserveHdfsDirectory(source, tid);
-    Utils.unreserveHdfsDirectory(error, tid);
-    Utils.getReadLock(tableId, tid).unlock();
-    log.debug("completing bulk import transaction " + tid);
-    ZooArbitrator.cleanup(Constants.BULK_ARBITRATOR_TYPE, tid);
-    return null;
-  }
-}
-
-class CompleteBulkImport extends MasterRepo {
-
-  private static final long serialVersionUID = 1L;
-
-  private String tableId;
-  private String source;
-  private String bulk;
-  private String error;
-
-  public CompleteBulkImport(String tableId, String source, String bulk, String error) {
-    this.tableId = tableId;
-    this.source = source;
-    this.bulk = bulk;
-    this.error = error;
-  }
-
-  @Override
-  public Repo<Master> call(long tid, Master master) throws Exception {
-    ZooArbitrator.stop(Constants.BULK_ARBITRATOR_TYPE, tid);
-    return new CopyFailed(tableId, source, bulk, error);
-  }
-}
-
-class CopyFailed extends MasterRepo {
-
-  private static final long serialVersionUID = 1L;
-
-  private String tableId;
-  private String source;
-  private String bulk;
-  private String error;
-
-  public CopyFailed(String tableId, String source, String bulk, String error) {
-    this.tableId = tableId;
-    this.source = source;
-    this.bulk = bulk;
-    this.error = error;
-  }
-
-  @Override
-  public long isReady(long tid, Master master) throws Exception {
-    Set<TServerInstance> finished = new HashSet<TServerInstance>();
-    Set<TServerInstance> running = master.onlineTabletServers();
-    for (TServerInstance server : running) {
-      try {
-        TServerConnection client = master.getConnection(server);
-        if (client != null && !client.isActive(tid))
-          finished.add(server);
-      } catch (TException ex) {
-        log.info("Ignoring error trying to check on tid " + tid + " from server " + server + ": " + ex);
-      }
-    }
-    if (finished.containsAll(running))
-      return 0;
-    return 500;
-  }
-
-  @Override
-  public Repo<Master> call(long tid, Master master) throws Exception {
-    // This needs to execute after the arbiter is stopped
-
-    VolumeManager fs = master.getFileSystem();
-
-    if (!fs.exists(new Path(error, BulkImport.FAILURES_TXT)))
-      return new CleanUpBulkImport(tableId, source, bulk, error);
-
-    HashMap<FileRef,String> failures = new HashMap<FileRef,String>();
-    HashMap<FileRef,String> loadedFailures = new HashMap<FileRef,String>();
-
-    try (BufferedReader in = new BufferedReader(new InputStreamReader(fs.open(new Path(error, BulkImport.FAILURES_TXT)), UTF_8))) {
-      String line = null;
-      while ((line = in.readLine()) != null) {
-        Path path = new Path(line);
-        if (!fs.exists(new Path(error, path.getName())))
-          failures.put(new FileRef(line, path), line);
-      }
-    }
-
-    /*
-     * I thought I could move files that have no file references in the table. However its possible a clone references a file. Therefore only move files that
-     * have no loaded markers.
-     */
-
-    // determine which failed files were loaded
-    Connector conn = master.getConnector();
-    Scanner mscanner = new IsolatedScanner(conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY));
-    mscanner.setRange(new KeyExtent(new Text(tableId), null, null).toMetadataRange());
-    mscanner.fetchColumnFamily(TabletsSection.BulkFileColumnFamily.NAME);
-
-    for (Entry<Key,Value> entry : mscanner) {
-      if (Long.parseLong(entry.getValue().toString()) == tid) {
-        FileRef loadedFile = new FileRef(fs, entry.getKey());
-        String absPath = failures.remove(loadedFile);
-        if (absPath != null) {
-          loadedFailures.put(loadedFile, absPath);
-        }
-      }
-    }
-
-    // move failed files that were not loaded
-    for (String failure : failures.values()) {
-      Path orig = new Path(failure);
-      Path dest = new Path(error, orig.getName());
-      fs.rename(orig, dest);
-      log.debug("tid " + tid + " renamed " + orig + " to " + dest + ": import failed");
-    }
-
-    if (loadedFailures.size() > 0) {
-      DistributedWorkQueue bifCopyQueue = new DistributedWorkQueue(Constants.ZROOT + "/" + master.getInstance().getInstanceID() + Constants.ZBULK_FAILED_COPYQ,
-          master.getConfiguration());
-
-      HashSet<String> workIds = new HashSet<String>();
-
-      for (String failure : loadedFailures.values()) {
-        Path orig = new Path(failure);
-        Path dest = new Path(error, orig.getName());
-
-        if (fs.exists(dest))
-          continue;
-
-        bifCopyQueue.addWork(orig.getName(), (failure + "," + dest).getBytes(UTF_8));
-        workIds.add(orig.getName());
-        log.debug("tid " + tid + " added to copyq: " + orig + " to " + dest + ": failed");
-      }
-
-      bifCopyQueue.waitUntilDone(workIds);
-    }
-
-    fs.deleteRecursively(new Path(error, BulkImport.FAILURES_TXT));
-    return new CleanUpBulkImport(tableId, source, bulk, error);
-  }
-
-}
-
-class LoadFiles extends MasterRepo {
-
-  private static final long serialVersionUID = 1L;
-
-  private static ExecutorService threadPool = null;
-  private static final Logger log = LoggerFactory.getLogger(BulkImport.class);
-
-  private String tableId;
-  private String source;
-  private String bulk;
-  private String errorDir;
-  private boolean setTime;
-
-  public LoadFiles(String tableId, String source, String bulk, String errorDir, boolean setTime) {
-    this.tableId = tableId;
-    this.source = source;
-    this.bulk = bulk;
-    this.errorDir = errorDir;
-    this.setTime = setTime;
-  }
-
-  @Override
-  public long isReady(long tid, Master master) throws Exception {
-    if (master.onlineTabletServers().size() == 0)
-      return 500;
-    return 0;
-  }
-
-  private static synchronized ExecutorService getThreadPool(Master master) {
-    if (threadPool == null) {
-      int threadPoolSize = master.getConfiguration().getCount(Property.MASTER_BULK_THREADPOOL_SIZE);
-      ThreadPoolExecutor pool = new SimpleThreadPool(threadPoolSize, "bulk import");
-      pool.allowCoreThreadTimeOut(true);
-      threadPool = new TraceExecutorService(pool);
-    }
-    return threadPool;
-  }
-
-  @Override
-  public Repo<Master> call(final long tid, final Master master) throws Exception {
-    ExecutorService executor = getThreadPool(master);
-    final AccumuloConfiguration conf = master.getConfiguration();
-    VolumeManager fs = master.getFileSystem();
-    List<FileStatus> files = new ArrayList<FileStatus>();
-    for (FileStatus entry : fs.listStatus(new Path(bulk))) {
-      files.add(entry);
-    }
-    log.debug("tid " + tid + " importing " + files.size() + " files");
-
-    Path writable = new Path(this.errorDir, ".iswritable");
-    if (!fs.createNewFile(writable)) {
-      // Maybe this is a re-try... clear the flag and try again
-      fs.delete(writable);
-      if (!fs.createNewFile(writable))
-        throw new ThriftTableOperationException(tableId, null, TableOperation.BULK_IMPORT, TableOperationExceptionType.BULK_BAD_ERROR_DIRECTORY,
-            "Unable to write to " + this.errorDir);
-    }
-    fs.delete(writable);
-
-    final Set<String> filesToLoad = Collections.synchronizedSet(new HashSet<String>());
-    for (FileStatus f : files)
-      filesToLoad.add(f.getPath().toString());
-
-    final int RETRIES = Math.max(1, conf.getCount(Property.MASTER_BULK_RETRIES));
-    for (int attempt = 0; attempt < RETRIES && filesToLoad.size() > 0; attempt++) {
-      List<Future<List<String>>> results = new ArrayList<Future<List<String>>>();
-
-      if (master.onlineTabletServers().size() == 0)
-        log.warn("There are no tablet server to process bulk import, waiting (tid = " + tid + ")");
-
-      while (master.onlineTabletServers().size() == 0) {
-        UtilWaitThread.sleep(500);
-      }
-
-      // Use the threadpool to assign files one-at-a-time to the server
-      final List<String> loaded = Collections.synchronizedList(new ArrayList<String>());
-      for (final String file : filesToLoad) {
-        results.add(executor.submit(new Callable<List<String>>() {
-          @Override
-          public List<String> call() {
-            List<String> failures = new ArrayList<String>();
-            ClientService.Client client = null;
-            String server = null;
-            try {
-              // get a connection to a random tablet server, do not prefer cached connections because
-              // this is running on the master and there are lots of connections to tablet servers
-              // serving the metadata tablets
-              long timeInMillis = master.getConfiguration().getTimeInMillis(Property.MASTER_BULK_TIMEOUT);
-              Pair<String,Client> pair = ServerClient.getConnection(master, false, timeInMillis);
-              client = pair.getSecond();
-              server = pair.getFirst();
-              List<String> attempt = Collections.singletonList(file);
-              log.debug("Asking " + pair.getFirst() + " to bulk import " + file);
-              List<String> fail = client.bulkImportFiles(Tracer.traceInfo(), master.rpcCreds(), tid, tableId, attempt, errorDir, setTime);
-              if (fail.isEmpty()) {
-                loaded.add(file);
-              } else {
-                failures.addAll(fail);
-              }
-            } catch (Exception ex) {
-              log.error("rpc failed server:" + server + ", tid:" + tid + " " + ex);
-            } finally {
-              ServerClient.close(client);
-            }
-            return failures;
-          }
-        }));
-      }
-      Set<String> failures = new HashSet<String>();
-      for (Future<List<String>> f : results)
-        failures.addAll(f.get());
-      filesToLoad.removeAll(loaded);
-      if (filesToLoad.size() > 0) {
-        log.debug("tid " + tid + " attempt " + (attempt + 1) + " " + sampleList(filesToLoad, 10) + " failed");
-        UtilWaitThread.sleep(100);
-      }
-    }
-
-    FSDataOutputStream failFile = fs.create(new Path(errorDir, BulkImport.FAILURES_TXT), true);
-    BufferedWriter out = new BufferedWriter(new OutputStreamWriter(failFile, UTF_8));
-    try {
-      for (String f : filesToLoad) {
-        out.write(f);
-        out.write("\n");
-      }
-    } finally {
-      out.close();
-    }
-
-    // return the next step, which will perform cleanup
-    return new CompleteBulkImport(tableId, source, bulk, errorDir);
-  }
-
-  static String sampleList(Collection<?> potentiallyLongList, int max) {
-    StringBuffer result = new StringBuffer();
-    result.append("[");
-    int i = 0;
-    for (Object obj : potentiallyLongList) {
-      result.append(obj);
-      if (i >= max) {
-        result.append("...");
-        break;
-      } else {
-        result.append(", ");
-      }
-      i++;
-    }
-    if (i < max)
-      result.delete(result.length() - 2, result.length());
-    result.append("]");
-    return result.toString();
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/server/master/src/main/java/org/apache/accumulo/master/tableOps/CancelCompactions.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CancelCompactions.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CancelCompactions.java
index 4f4b27e..e268f17 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CancelCompactions.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CancelCompactions.java
@@ -27,29 +27,6 @@ import org.apache.accumulo.fate.zookeeper.IZooReaderWriter.Mutator;
 import org.apache.accumulo.master.Master;
 import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
 
-class FinishCancelCompaction extends MasterRepo {
-  private static final long serialVersionUID = 1L;
-  private String tableId;
-
-  public FinishCancelCompaction(String tableId) {
-    this.tableId = tableId;
-  }
-
-  @Override
-  public Repo<Master> call(long tid, Master environment) throws Exception {
-    Utils.getReadLock(tableId, tid).unlock();
-    return null;
-  }
-
-  @Override
-  public void undo(long tid, Master environment) throws Exception {
-
-  }
-}
-
-/**
- *
- */
 public class CancelCompactions extends MasterRepo {
 
   private static final long serialVersionUID = 1L;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/server/master/src/main/java/org/apache/accumulo/master/tableOps/ChooseDir.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/ChooseDir.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/ChooseDir.java
new file mode 100644
index 0000000..3e1aa33
--- /dev/null
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/ChooseDir.java
@@ -0,0 +1,53 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.master.tableOps;
+
+import org.apache.accumulo.core.Constants;
+import org.apache.accumulo.fate.Repo;
+import org.apache.accumulo.master.Master;
+import org.apache.accumulo.server.ServerConstants;
+import org.apache.hadoop.fs.Path;
+
+import com.google.common.base.Optional;
+
+class ChooseDir extends MasterRepo {
+  private static final long serialVersionUID = 1L;
+
+  private TableInfo tableInfo;
+
+  ChooseDir(TableInfo ti) {
+    this.tableInfo = ti;
+  }
+
+  @Override
+  public long isReady(long tid, Master environment) throws Exception {
+    return 0;
+  }
+
+  @Override
+  public Repo<Master> call(long tid, Master master) throws Exception {
+    // Constants.DEFAULT_TABLET_LOCATION has a leading slash prepended to it so we don't need to add one here
+    tableInfo.dir = master.getFileSystem().choose(Optional.of(tableInfo.tableId), ServerConstants.getBaseUris()) + Constants.HDFS_TABLES_DIR + Path.SEPARATOR
+        + tableInfo.tableId + Constants.DEFAULT_TABLET_LOCATION;
+    return new CreateDir(tableInfo);
+  }
+
+  @Override
+  public void undo(long tid, Master master) throws Exception {
+
+  }
+}
\ No newline at end of file


[2/9] accumulo git commit: ACCUMULO-3759 Fix Java 8 compiler warnings

Posted by ct...@apache.org.
http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/server/master/src/main/java/org/apache/accumulo/master/tableOps/DeleteTable.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/DeleteTable.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/DeleteTable.java
index 05676e7..a1158f4 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/DeleteTable.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/DeleteTable.java
@@ -16,277 +16,12 @@
  */
 package org.apache.accumulo.master.tableOps;
 
-import java.io.IOException;
-import java.net.UnknownHostException;
-import java.util.Arrays;
-import java.util.Map.Entry;
-
-import org.apache.accumulo.core.client.BatchScanner;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.IteratorSetting;
-import org.apache.accumulo.core.client.Scanner;
 import org.apache.accumulo.core.client.impl.Tables;
 import org.apache.accumulo.core.client.impl.thrift.TableOperation;
-import org.apache.accumulo.core.client.impl.thrift.ThriftSecurityException;
-import org.apache.accumulo.core.conf.AccumuloConfiguration;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.data.impl.KeyExtent;
-import org.apache.accumulo.core.iterators.user.GrepIterator;
 import org.apache.accumulo.core.master.state.tables.TableState;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.volume.Volume;
 import org.apache.accumulo.fate.Repo;
 import org.apache.accumulo.master.Master;
-import org.apache.accumulo.server.ServerConstants;
-import org.apache.accumulo.server.fs.VolumeManager;
-import org.apache.accumulo.server.master.state.MetaDataTableScanner;
-import org.apache.accumulo.server.master.state.TabletLocationState;
-import org.apache.accumulo.server.master.state.TabletState;
-import org.apache.accumulo.server.problems.ProblemReports;
-import org.apache.accumulo.server.security.AuditedSecurityOperation;
 import org.apache.accumulo.server.tables.TableManager;
-import org.apache.accumulo.server.util.MetadataTableUtil;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.Text;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-class CleanUp extends MasterRepo {
-
-  final private static Logger log = LoggerFactory.getLogger(CleanUp.class);
-
-  private static final long serialVersionUID = 1L;
-
-  private String tableId, namespaceId;
-
-  private long creationTime;
-
-  private void readObject(java.io.ObjectInputStream in) throws IOException, ClassNotFoundException {
-    in.defaultReadObject();
-
-    /*
-     * handle the case where we start executing on a new machine where the current time is in the past relative to the previous machine
-     *
-     * if the new machine has time in the future, that will work ok w/ hasCycled
-     */
-    if (System.currentTimeMillis() < creationTime) {
-      creationTime = System.currentTimeMillis();
-    }
-
-  }
-
-  public CleanUp(String tableId, String namespaceId) {
-    this.tableId = tableId;
-    this.namespaceId = namespaceId;
-    creationTime = System.currentTimeMillis();
-  }
-
-  @Override
-  public long isReady(long tid, Master master) throws Exception {
-    if (!master.hasCycled(creationTime)) {
-      return 50;
-    }
-
-    boolean done = true;
-    Range tableRange = new KeyExtent(new Text(tableId), null, null).toMetadataRange();
-    Scanner scanner = master.getConnector().createScanner(MetadataTable.NAME, Authorizations.EMPTY);
-    MetaDataTableScanner.configureScanner(scanner, master);
-    scanner.setRange(tableRange);
-
-    for (Entry<Key,Value> entry : scanner) {
-      TabletLocationState locationState = MetaDataTableScanner.createTabletLocationState(entry.getKey(), entry.getValue());
-      TabletState state = locationState.getState(master.onlineTabletServers());
-      if (state.equals(TabletState.ASSIGNED) || state.equals(TabletState.HOSTED)) {
-        log.debug("Still waiting for table to be deleted: " + tableId + " locationState: " + locationState);
-        done = false;
-        break;
-      }
-    }
-
-    if (!done)
-      return 50;
-
-    return 0;
-  }
-
-  @Override
-  public Repo<Master> call(long tid, Master master) throws Exception {
-
-    master.clearMigrations(tableId);
-
-    int refCount = 0;
-
-    try {
-      // look for other tables that references this table's files
-      Connector conn = master.getConnector();
-      BatchScanner bs = conn.createBatchScanner(MetadataTable.NAME, Authorizations.EMPTY, 8);
-      try {
-        Range allTables = MetadataSchema.TabletsSection.getRange();
-        Range tableRange = MetadataSchema.TabletsSection.getRange(tableId);
-        Range beforeTable = new Range(allTables.getStartKey(), true, tableRange.getStartKey(), false);
-        Range afterTable = new Range(tableRange.getEndKey(), false, allTables.getEndKey(), true);
-        bs.setRanges(Arrays.asList(beforeTable, afterTable));
-        bs.fetchColumnFamily(DataFileColumnFamily.NAME);
-        IteratorSetting cfg = new IteratorSetting(40, "grep", GrepIterator.class);
-        GrepIterator.setTerm(cfg, "/" + tableId + "/");
-        bs.addScanIterator(cfg);
-
-        for (Entry<Key,Value> entry : bs) {
-          if (entry.getKey().getColumnQualifier().toString().contains("/" + tableId + "/")) {
-            refCount++;
-          }
-        }
-      } finally {
-        bs.close();
-      }
-
-    } catch (Exception e) {
-      refCount = -1;
-      log.error("Failed to scan " + MetadataTable.NAME + " looking for references to deleted table " + tableId, e);
-    }
-
-    // remove metadata table entries
-    try {
-      // Intentionally do not pass master lock. If master loses lock, this operation may complete before master can kill itself.
-      // If the master lock passed to deleteTable, it is possible that the delete mutations will be dropped. If the delete operations
-      // are dropped and the operation completes, then the deletes will not be repeated.
-      MetadataTableUtil.deleteTable(tableId, refCount != 0, master, null);
-    } catch (Exception e) {
-      log.error("error deleting " + tableId + " from metadata table", e);
-    }
-
-    // remove any problem reports the table may have
-    try {
-      ProblemReports.getInstance(master).deleteProblemReports(tableId);
-    } catch (Exception e) {
-      log.error("Failed to delete problem reports for table " + tableId, e);
-    }
-
-    if (refCount == 0) {
-      final AccumuloConfiguration conf = master.getConfiguration();
-      boolean archiveFiles = conf.getBoolean(Property.GC_FILE_ARCHIVE);
-
-      // delete the map files
-      try {
-        VolumeManager fs = master.getFileSystem();
-        for (String dir : ServerConstants.getTablesDirs()) {
-          if (archiveFiles) {
-            archiveFile(fs, dir, tableId);
-          } else {
-            fs.deleteRecursively(new Path(dir, tableId));
-          }
-        }
-      } catch (IOException e) {
-        log.error("Unable to remove deleted table directory", e);
-      } catch (IllegalArgumentException exception) {
-        if (exception.getCause() instanceof UnknownHostException) {
-          /* Thrown if HDFS encounters a DNS problem in some edge cases */
-          log.error("Unable to remove deleted table directory", exception);
-        } else {
-          throw exception;
-        }
-      }
-    }
-
-    // remove table from zookeeper
-    try {
-      TableManager.getInstance().removeTable(tableId);
-      Tables.clearCache(master.getInstance());
-    } catch (Exception e) {
-      log.error("Failed to find table id in zookeeper", e);
-    }
-
-    // remove any permissions associated with this table
-    try {
-      AuditedSecurityOperation.getInstance(master).deleteTable(master.rpcCreds(), tableId, namespaceId);
-    } catch (ThriftSecurityException e) {
-      log.error("{}", e.getMessage(), e);
-    }
-
-    Utils.unreserveTable(tableId, tid, true);
-    Utils.unreserveNamespace(namespaceId, tid, false);
-
-    LoggerFactory.getLogger(CleanUp.class).debug("Deleted table " + tableId);
-
-    return null;
-  }
-
-  protected void archiveFile(VolumeManager fs, String dir, String tableId) throws IOException {
-    Path tableDirectory = new Path(dir, tableId);
-    Volume v = fs.getVolumeByPath(tableDirectory);
-    String basePath = v.getBasePath();
-
-    // Path component of URI
-    String tableDirPath = tableDirectory.toUri().getPath();
-
-    // Just the suffix of the path (after the Volume's base path)
-    String tableDirSuffix = tableDirPath.substring(basePath.length());
-
-    // Remove a leading path separator char because Path will treat the "child" as an absolute path with it
-    if (Path.SEPARATOR_CHAR == tableDirSuffix.charAt(0)) {
-      if (tableDirSuffix.length() > 1) {
-        tableDirSuffix = tableDirSuffix.substring(1);
-      } else {
-        tableDirSuffix = "";
-      }
-    }
-
-    // Get the file archive directory on this volume
-    final Path fileArchiveDir = new Path(basePath, ServerConstants.FILE_ARCHIVE_DIR);
-
-    // Make sure it exists just to be safe
-    fs.mkdirs(fileArchiveDir);
-
-    // The destination to archive this table to
-    final Path destTableDir = new Path(fileArchiveDir, tableDirSuffix);
-
-    log.debug("Archiving " + tableDirectory + " to " + tableDirectory);
-
-    if (fs.exists(destTableDir)) {
-      merge(fs, tableDirectory, destTableDir);
-    } else {
-      fs.rename(tableDirectory, destTableDir);
-    }
-  }
-
-  protected void merge(VolumeManager fs, Path src, Path dest) throws IOException {
-    for (FileStatus child : fs.listStatus(src)) {
-      final String childName = child.getPath().getName();
-      final Path childInSrc = new Path(src, childName), childInDest = new Path(dest, childName);
-
-      if (child.isFile()) {
-        if (fs.exists(childInDest)) {
-          log.warn("File already exists in archive, ignoring. " + childInDest);
-        } else {
-          fs.rename(childInSrc, childInDest);
-        }
-      } else if (child.isDirectory()) {
-        if (fs.exists(childInDest)) {
-          // Recurse
-          merge(fs, childInSrc, childInDest);
-        } else {
-          fs.rename(childInSrc, childInDest);
-        }
-      } else {
-        // Symlinks shouldn't exist in table directories..
-        log.warn("Ignoring archiving of non file/directory: " + child);
-      }
-    }
-  }
-
-  @Override
-  public void undo(long tid, Master environment) throws Exception {
-    // nothing to do
-  }
-
-}
 
 public class DeleteTable extends MasterRepo {
 

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/server/master/src/main/java/org/apache/accumulo/master/tableOps/ExportInfo.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/ExportInfo.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/ExportInfo.java
new file mode 100644
index 0000000..d8f276a
--- /dev/null
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/ExportInfo.java
@@ -0,0 +1,29 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.master.tableOps;
+
+import java.io.Serializable;
+
+class ExportInfo implements Serializable {
+
+  private static final long serialVersionUID = 1L;
+
+  public String tableName;
+  public String tableID;
+  public String exportDir;
+  public String namespaceID;
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/server/master/src/main/java/org/apache/accumulo/master/tableOps/ExportTable.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/ExportTable.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/ExportTable.java
index e5b7e86..cd50a18 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/ExportTable.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/ExportTable.java
@@ -16,268 +16,11 @@
  */
 package org.apache.accumulo.master.tableOps;
 
-import static java.nio.charset.StandardCharsets.UTF_8;
-
-import java.io.BufferedOutputStream;
-import java.io.BufferedWriter;
-import java.io.DataOutputStream;
-import java.io.IOException;
-import java.io.OutputStreamWriter;
-import java.io.Serializable;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.zip.ZipEntry;
-import java.util.zip.ZipOutputStream;
-
-import org.apache.accumulo.core.Constants;
-import org.apache.accumulo.core.client.AccumuloException;
-import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.TableNotFoundException;
 import org.apache.accumulo.core.client.impl.Tables;
-import org.apache.accumulo.core.client.impl.thrift.TableOperation;
-import org.apache.accumulo.core.client.impl.thrift.TableOperationExceptionType;
-import org.apache.accumulo.core.client.impl.thrift.ThriftTableOperationException;
-import org.apache.accumulo.core.conf.AccumuloConfiguration;
-import org.apache.accumulo.core.conf.DefaultConfiguration;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.data.impl.KeyExtent;
-import org.apache.accumulo.core.master.state.tables.TableState;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.LogColumnFamily;
-import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.fate.Repo;
 import org.apache.accumulo.master.Master;
-import org.apache.accumulo.server.AccumuloServerContext;
-import org.apache.accumulo.server.ServerConstants;
 import org.apache.accumulo.server.client.HdfsZooInstance;
-import org.apache.accumulo.server.conf.TableConfiguration;
-import org.apache.accumulo.server.fs.VolumeManager;
-import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.Text;
-
-class ExportInfo implements Serializable {
-
-  private static final long serialVersionUID = 1L;
-
-  public String tableName;
-  public String tableID;
-  public String exportDir;
-  public String namespaceID;
-}
-
-class WriteExportFiles extends MasterRepo {
-
-  private static final long serialVersionUID = 1L;
-  private final ExportInfo tableInfo;
-
-  WriteExportFiles(ExportInfo tableInfo) {
-    this.tableInfo = tableInfo;
-  }
-
-  private void checkOffline(Connector conn) throws Exception {
-    if (Tables.getTableState(conn.getInstance(), tableInfo.tableID) != TableState.OFFLINE) {
-      Tables.clearCache(conn.getInstance());
-      if (Tables.getTableState(conn.getInstance(), tableInfo.tableID) != TableState.OFFLINE) {
-        throw new ThriftTableOperationException(tableInfo.tableID, tableInfo.tableName, TableOperation.EXPORT, TableOperationExceptionType.OTHER,
-            "Table is not offline");
-      }
-    }
-  }
-
-  @Override
-  public long isReady(long tid, Master master) throws Exception {
-
-    long reserved = Utils.reserveNamespace(tableInfo.namespaceID, tid, false, true, TableOperation.EXPORT)
-        + Utils.reserveTable(tableInfo.tableID, tid, false, true, TableOperation.EXPORT);
-    if (reserved > 0)
-      return reserved;
-
-    Connector conn = master.getConnector();
-
-    checkOffline(conn);
-
-    Scanner metaScanner = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
-    metaScanner.setRange(new KeyExtent(new Text(tableInfo.tableID), null, null).toMetadataRange());
-
-    // scan for locations
-    metaScanner.fetchColumnFamily(TabletsSection.CurrentLocationColumnFamily.NAME);
-    metaScanner.fetchColumnFamily(TabletsSection.FutureLocationColumnFamily.NAME);
-
-    if (metaScanner.iterator().hasNext()) {
-      return 500;
-    }
-
-    // use the same range to check for walogs that we used to check for hosted (or future hosted) tablets
-    // this is done as a separate scan after we check for locations, because walogs are okay only if there is no location
-    metaScanner.clearColumns();
-    metaScanner.fetchColumnFamily(LogColumnFamily.NAME);
-
-    if (metaScanner.iterator().hasNext()) {
-      throw new ThriftTableOperationException(tableInfo.tableID, tableInfo.tableName, TableOperation.EXPORT, TableOperationExceptionType.OTHER,
-          "Write ahead logs found for table");
-    }
-
-    return 0;
-  }
-
-  @Override
-  public Repo<Master> call(long tid, Master master) throws Exception {
-    try {
-      exportTable(master.getFileSystem(), master, tableInfo.tableName, tableInfo.tableID, tableInfo.exportDir);
-    } catch (IOException ioe) {
-      throw new ThriftTableOperationException(tableInfo.tableID, tableInfo.tableName, TableOperation.EXPORT, TableOperationExceptionType.OTHER,
-          "Failed to create export files " + ioe.getMessage());
-    }
-    Utils.unreserveNamespace(tableInfo.namespaceID, tid, false);
-    Utils.unreserveTable(tableInfo.tableID, tid, false);
-    Utils.unreserveHdfsDirectory(new Path(tableInfo.exportDir).toString(), tid);
-    return null;
-  }
-
-  @Override
-  public void undo(long tid, Master env) throws Exception {
-    Utils.unreserveNamespace(tableInfo.namespaceID, tid, false);
-    Utils.unreserveTable(tableInfo.tableID, tid, false);
-  }
-
-  public static void exportTable(VolumeManager fs, AccumuloServerContext context, String tableName, String tableID, String exportDir) throws Exception {
-
-    fs.mkdirs(new Path(exportDir));
-    Path exportMetaFilePath = fs.getVolumeByPath(new Path(exportDir)).getFileSystem().makeQualified(new Path(exportDir, Constants.EXPORT_FILE));
-
-    FSDataOutputStream fileOut = fs.create(exportMetaFilePath, false);
-    ZipOutputStream zipOut = new ZipOutputStream(fileOut);
-    BufferedOutputStream bufOut = new BufferedOutputStream(zipOut);
-    DataOutputStream dataOut = new DataOutputStream(bufOut);
-
-    try {
-
-      zipOut.putNextEntry(new ZipEntry(Constants.EXPORT_INFO_FILE));
-      OutputStreamWriter osw = new OutputStreamWriter(dataOut, UTF_8);
-      osw.append(ExportTable.EXPORT_VERSION_PROP + ":" + ExportTable.VERSION + "\n");
-      osw.append("srcInstanceName:" + context.getInstance().getInstanceName() + "\n");
-      osw.append("srcInstanceID:" + context.getInstance().getInstanceID() + "\n");
-      osw.append("srcZookeepers:" + context.getInstance().getZooKeepers() + "\n");
-      osw.append("srcTableName:" + tableName + "\n");
-      osw.append("srcTableID:" + tableID + "\n");
-      osw.append(ExportTable.DATA_VERSION_PROP + ":" + ServerConstants.DATA_VERSION + "\n");
-      osw.append("srcCodeVersion:" + Constants.VERSION + "\n");
-
-      osw.flush();
-      dataOut.flush();
-
-      exportConfig(context, tableID, zipOut, dataOut);
-      dataOut.flush();
-
-      Map<String,String> uniqueFiles = exportMetadata(fs, context, tableID, zipOut, dataOut);
-
-      dataOut.close();
-      dataOut = null;
-
-      createDistcpFile(fs, exportDir, exportMetaFilePath, uniqueFiles);
-
-    } finally {
-      if (dataOut != null)
-        dataOut.close();
-    }
-  }
-
-  private static void createDistcpFile(VolumeManager fs, String exportDir, Path exportMetaFilePath, Map<String,String> uniqueFiles) throws IOException {
-    BufferedWriter distcpOut = new BufferedWriter(new OutputStreamWriter(fs.create(new Path(exportDir, "distcp.txt"), false), UTF_8));
-
-    try {
-      for (String file : uniqueFiles.values()) {
-        distcpOut.append(file);
-        distcpOut.newLine();
-      }
-
-      distcpOut.append(exportMetaFilePath.toString());
-      distcpOut.newLine();
-
-      distcpOut.close();
-      distcpOut = null;
-
-    } finally {
-      if (distcpOut != null)
-        distcpOut.close();
-    }
-  }
-
-  private static Map<String,String> exportMetadata(VolumeManager fs, AccumuloServerContext context, String tableID, ZipOutputStream zipOut,
-      DataOutputStream dataOut) throws IOException, TableNotFoundException, AccumuloException, AccumuloSecurityException {
-    zipOut.putNextEntry(new ZipEntry(Constants.EXPORT_METADATA_FILE));
-
-    Map<String,String> uniqueFiles = new HashMap<String,String>();
-
-    Scanner metaScanner = context.getConnector().createScanner(MetadataTable.NAME, Authorizations.EMPTY);
-    metaScanner.fetchColumnFamily(DataFileColumnFamily.NAME);
-    TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.fetch(metaScanner);
-    TabletsSection.ServerColumnFamily.TIME_COLUMN.fetch(metaScanner);
-    metaScanner.setRange(new KeyExtent(new Text(tableID), null, null).toMetadataRange());
-
-    for (Entry<Key,Value> entry : metaScanner) {
-      entry.getKey().write(dataOut);
-      entry.getValue().write(dataOut);
-
-      if (entry.getKey().getColumnFamily().equals(DataFileColumnFamily.NAME)) {
-        String path = fs.getFullPath(entry.getKey()).toString();
-        String tokens[] = path.split("/");
-        if (tokens.length < 1) {
-          throw new RuntimeException("Illegal path " + path);
-        }
-
-        String filename = tokens[tokens.length - 1];
-
-        String existingPath = uniqueFiles.get(filename);
-        if (existingPath == null) {
-          uniqueFiles.put(filename, path);
-        } else if (!existingPath.equals(path)) {
-          // make sure file names are unique, should only apply for tables with file names generated by Accumulo 1.3 and earlier
-          throw new IOException("Cannot export table with nonunique file names " + filename + ". Major compact table.");
-        }
-
-      }
-    }
-    return uniqueFiles;
-  }
-
-  private static void exportConfig(AccumuloServerContext context, String tableID, ZipOutputStream zipOut, DataOutputStream dataOut) throws AccumuloException,
-      AccumuloSecurityException, TableNotFoundException, IOException {
-    Connector conn = context.getConnector();
-
-    DefaultConfiguration defaultConfig = AccumuloConfiguration.getDefaultConfiguration();
-    Map<String,String> siteConfig = conn.instanceOperations().getSiteConfiguration();
-    Map<String,String> systemConfig = conn.instanceOperations().getSystemConfiguration();
-
-    TableConfiguration tableConfig = context.getServerConfigurationFactory().getTableConfiguration(tableID);
-
-    OutputStreamWriter osw = new OutputStreamWriter(dataOut, UTF_8);
-
-    // only put props that are different than defaults and higher level configurations
-    zipOut.putNextEntry(new ZipEntry(Constants.EXPORT_TABLE_CONFIG_FILE));
-    for (Entry<String,String> prop : tableConfig) {
-      if (prop.getKey().startsWith(Property.TABLE_PREFIX.getKey())) {
-        Property key = Property.getPropertyByKey(prop.getKey());
-
-        if (key == null || !defaultConfig.get(key).equals(prop.getValue())) {
-          if (!prop.getValue().equals(siteConfig.get(prop.getKey())) && !prop.getValue().equals(systemConfig.get(prop.getKey()))) {
-            osw.append(prop.getKey() + "=" + prop.getValue() + "\n");
-          }
-        }
-      }
-    }
-
-    osw.flush();
-  }
-}
 
 public class ExportTable extends MasterRepo {
   private static final long serialVersionUID = 1L;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/server/master/src/main/java/org/apache/accumulo/master/tableOps/FinishCancelCompaction.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/FinishCancelCompaction.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/FinishCancelCompaction.java
new file mode 100644
index 0000000..a502a3d
--- /dev/null
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/FinishCancelCompaction.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.master.tableOps;
+
+import org.apache.accumulo.fate.Repo;
+import org.apache.accumulo.master.Master;
+
+class FinishCancelCompaction extends MasterRepo {
+  private static final long serialVersionUID = 1L;
+  private String tableId;
+
+  public FinishCancelCompaction(String tableId) {
+    this.tableId = tableId;
+  }
+
+  @Override
+  public Repo<Master> call(long tid, Master environment) throws Exception {
+    Utils.getReadLock(tableId, tid).unlock();
+    return null;
+  }
+
+  @Override
+  public void undo(long tid, Master environment) throws Exception {
+
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/server/master/src/main/java/org/apache/accumulo/master/tableOps/FinishCloneTable.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/FinishCloneTable.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/FinishCloneTable.java
new file mode 100644
index 0000000..7c3701b
--- /dev/null
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/FinishCloneTable.java
@@ -0,0 +1,64 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.master.tableOps;
+
+import org.apache.accumulo.core.master.state.tables.TableState;
+import org.apache.accumulo.fate.Repo;
+import org.apache.accumulo.master.Master;
+import org.apache.accumulo.server.tables.TableManager;
+import org.slf4j.LoggerFactory;
+
+class FinishCloneTable extends MasterRepo {
+
+  private static final long serialVersionUID = 1L;
+  private CloneInfo cloneInfo;
+
+  public FinishCloneTable(CloneInfo cloneInfo) {
+    this.cloneInfo = cloneInfo;
+  }
+
+  @Override
+  public long isReady(long tid, Master environment) throws Exception {
+    return 0;
+  }
+
+  @Override
+  public Repo<Master> call(long tid, Master environment) throws Exception {
+    // directories are intentionally not created.... this is done because directories should be unique
+    // because they occupy a different namespace than normal tablet directories... also some clones
+    // may never create files.. therefore there is no need to consume namenode space w/ directories
+    // that are not used... tablet will create directories as needed
+
+    TableManager.getInstance().transitionTableState(cloneInfo.tableId, TableState.ONLINE);
+
+    Utils.unreserveNamespace(cloneInfo.srcNamespaceId, tid, false);
+    if (!cloneInfo.srcNamespaceId.equals(cloneInfo.namespaceId))
+      Utils.unreserveNamespace(cloneInfo.namespaceId, tid, false);
+    Utils.unreserveTable(cloneInfo.srcTableId, tid, false);
+    Utils.unreserveTable(cloneInfo.tableId, tid, true);
+
+    environment.getEventCoordinator().event("Cloned table %s from %s", cloneInfo.tableName, cloneInfo.srcTableId);
+
+    LoggerFactory.getLogger(FinishCloneTable.class).debug("Cloned table " + cloneInfo.srcTableId + " " + cloneInfo.tableId + " " + cloneInfo.tableName);
+
+    return null;
+  }
+
+  @Override
+  public void undo(long tid, Master environment) throws Exception {}
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/server/master/src/main/java/org/apache/accumulo/master/tableOps/FinishCreateNamespace.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/FinishCreateNamespace.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/FinishCreateNamespace.java
new file mode 100644
index 0000000..93cc194
--- /dev/null
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/FinishCreateNamespace.java
@@ -0,0 +1,58 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.master.tableOps;
+
+import org.apache.accumulo.fate.Repo;
+import org.apache.accumulo.master.Master;
+import org.slf4j.LoggerFactory;
+
+class FinishCreateNamespace extends MasterRepo {
+
+  private static final long serialVersionUID = 1L;
+
+  private NamespaceInfo namespaceInfo;
+
+  public FinishCreateNamespace(NamespaceInfo ti) {
+    this.namespaceInfo = ti;
+  }
+
+  @Override
+  public long isReady(long tid, Master environment) throws Exception {
+    return 0;
+  }
+
+  @Override
+  public Repo<Master> call(long id, Master env) throws Exception {
+
+    Utils.unreserveNamespace(namespaceInfo.namespaceId, id, true);
+
+    env.getEventCoordinator().event("Created namespace %s ", namespaceInfo.namespaceName);
+
+    LoggerFactory.getLogger(FinishCreateNamespace.class).debug("Created table " + namespaceInfo.namespaceId + " " + namespaceInfo.namespaceName);
+
+    return null;
+  }
+
+  @Override
+  public String getReturn() {
+    return namespaceInfo.namespaceId;
+  }
+
+  @Override
+  public void undo(long tid, Master env) throws Exception {}
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/server/master/src/main/java/org/apache/accumulo/master/tableOps/FinishCreateTable.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/FinishCreateTable.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/FinishCreateTable.java
new file mode 100644
index 0000000..2343efb
--- /dev/null
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/FinishCreateTable.java
@@ -0,0 +1,62 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.master.tableOps;
+
+import org.apache.accumulo.core.master.state.tables.TableState;
+import org.apache.accumulo.fate.Repo;
+import org.apache.accumulo.master.Master;
+import org.apache.accumulo.server.tables.TableManager;
+import org.slf4j.LoggerFactory;
+
+class FinishCreateTable extends MasterRepo {
+
+  private static final long serialVersionUID = 1L;
+
+  private TableInfo tableInfo;
+
+  public FinishCreateTable(TableInfo ti) {
+    this.tableInfo = ti;
+  }
+
+  @Override
+  public long isReady(long tid, Master environment) throws Exception {
+    return 0;
+  }
+
+  @Override
+  public Repo<Master> call(long tid, Master env) throws Exception {
+    TableManager.getInstance().transitionTableState(tableInfo.tableId, TableState.ONLINE);
+
+    Utils.unreserveNamespace(tableInfo.namespaceId, tid, false);
+    Utils.unreserveTable(tableInfo.tableId, tid, true);
+
+    env.getEventCoordinator().event("Created table %s ", tableInfo.tableName);
+
+    LoggerFactory.getLogger(FinishCreateTable.class).debug("Created table " + tableInfo.tableId + " " + tableInfo.tableName);
+
+    return null;
+  }
+
+  @Override
+  public String getReturn() {
+    return tableInfo.tableId;
+  }
+
+  @Override
+  public void undo(long tid, Master env) throws Exception {}
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/server/master/src/main/java/org/apache/accumulo/master/tableOps/FinishImportTable.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/FinishImportTable.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/FinishImportTable.java
new file mode 100644
index 0000000..7dd76b1
--- /dev/null
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/FinishImportTable.java
@@ -0,0 +1,68 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.master.tableOps;
+
+import org.apache.accumulo.core.master.state.tables.TableState;
+import org.apache.accumulo.fate.Repo;
+import org.apache.accumulo.master.Master;
+import org.apache.accumulo.server.tables.TableManager;
+import org.apache.hadoop.fs.Path;
+import org.slf4j.LoggerFactory;
+
+class FinishImportTable extends MasterRepo {
+
+  private static final long serialVersionUID = 1L;
+
+  private ImportedTableInfo tableInfo;
+
+  public FinishImportTable(ImportedTableInfo ti) {
+    this.tableInfo = ti;
+  }
+
+  @Override
+  public long isReady(long tid, Master environment) throws Exception {
+    return 0;
+  }
+
+  @Override
+  public Repo<Master> call(long tid, Master env) throws Exception {
+
+    env.getFileSystem().deleteRecursively(new Path(tableInfo.importDir, "mappings.txt"));
+
+    TableManager.getInstance().transitionTableState(tableInfo.tableId, TableState.ONLINE);
+
+    Utils.unreserveNamespace(tableInfo.namespaceId, tid, false);
+    Utils.unreserveTable(tableInfo.tableId, tid, true);
+
+    Utils.unreserveHdfsDirectory(new Path(tableInfo.exportDir).toString(), tid);
+
+    env.getEventCoordinator().event("Imported table %s ", tableInfo.tableName);
+
+    LoggerFactory.getLogger(FinishImportTable.class).debug("Imported table " + tableInfo.tableId + " " + tableInfo.tableName);
+
+    return null;
+  }
+
+  @Override
+  public String getReturn() {
+    return tableInfo.tableId;
+  }
+
+  @Override
+  public void undo(long tid, Master env) throws Exception {}
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/server/master/src/main/java/org/apache/accumulo/master/tableOps/ImportPopulateZookeeper.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/ImportPopulateZookeeper.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/ImportPopulateZookeeper.java
new file mode 100644
index 0000000..f436fd3
--- /dev/null
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/ImportPopulateZookeeper.java
@@ -0,0 +1,104 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.master.tableOps;
+
+import java.io.IOException;
+import java.util.Map;
+import java.util.Map.Entry;
+
+import org.apache.accumulo.core.Constants;
+import org.apache.accumulo.core.client.Instance;
+import org.apache.accumulo.core.client.impl.Namespaces;
+import org.apache.accumulo.core.client.impl.TableOperationsImpl;
+import org.apache.accumulo.core.client.impl.Tables;
+import org.apache.accumulo.core.client.impl.thrift.TableOperation;
+import org.apache.accumulo.core.client.impl.thrift.TableOperationExceptionType;
+import org.apache.accumulo.core.client.impl.thrift.ThriftTableOperationException;
+import org.apache.accumulo.fate.Repo;
+import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeExistsPolicy;
+import org.apache.accumulo.master.Master;
+import org.apache.accumulo.server.fs.VolumeManager;
+import org.apache.accumulo.server.tables.TableManager;
+import org.apache.accumulo.server.util.TablePropUtil;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+
+class ImportPopulateZookeeper extends MasterRepo {
+
+  private static final long serialVersionUID = 1L;
+
+  private ImportedTableInfo tableInfo;
+
+  ImportPopulateZookeeper(ImportedTableInfo ti) {
+    this.tableInfo = ti;
+  }
+
+  @Override
+  public long isReady(long tid, Master environment) throws Exception {
+    return Utils.reserveTable(tableInfo.tableId, tid, true, false, TableOperation.IMPORT);
+  }
+
+  private Map<String,String> getExportedProps(VolumeManager fs) throws Exception {
+
+    Path path = new Path(tableInfo.exportDir, Constants.EXPORT_FILE);
+
+    try {
+      FileSystem ns = fs.getVolumeByPath(path).getFileSystem();
+      return TableOperationsImpl.getExportedProps(ns, path);
+    } catch (IOException ioe) {
+      throw new ThriftTableOperationException(tableInfo.tableId, tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER,
+          "Error reading table props from " + path + " " + ioe.getMessage());
+    }
+  }
+
+  @Override
+  public Repo<Master> call(long tid, Master env) throws Exception {
+    // reserve the table name in zookeeper or fail
+
+    Utils.tableNameLock.lock();
+    try {
+      // write tableName & tableId to zookeeper
+      Instance instance = env.getInstance();
+
+      Utils.checkTableDoesNotExist(instance, tableInfo.tableName, tableInfo.tableId, TableOperation.CREATE);
+
+      String namespace = Tables.qualify(tableInfo.tableName).getFirst();
+      String namespaceId = Namespaces.getNamespaceId(instance, namespace);
+      TableManager.getInstance().addTable(tableInfo.tableId, namespaceId, tableInfo.tableName, NodeExistsPolicy.OVERWRITE);
+
+      Tables.clearCache(instance);
+    } finally {
+      Utils.tableNameLock.unlock();
+    }
+
+    for (Entry<String,String> entry : getExportedProps(env.getFileSystem()).entrySet())
+      if (!TablePropUtil.setTableProperty(tableInfo.tableId, entry.getKey(), entry.getValue())) {
+        throw new ThriftTableOperationException(tableInfo.tableId, tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER,
+            "Invalid table property " + entry.getKey());
+      }
+
+    return new CreateImportDir(tableInfo);
+  }
+
+  @Override
+  public void undo(long tid, Master env) throws Exception {
+    Instance instance = env.getInstance();
+    TableManager.getInstance().removeTable(tableInfo.tableId);
+    Utils.unreserveTable(tableInfo.tableId, tid, true);
+    Tables.clearCache(instance);
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/server/master/src/main/java/org/apache/accumulo/master/tableOps/ImportSetupPermissions.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/ImportSetupPermissions.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/ImportSetupPermissions.java
new file mode 100644
index 0000000..00fade9
--- /dev/null
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/ImportSetupPermissions.java
@@ -0,0 +1,65 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.master.tableOps;
+
+import org.apache.accumulo.core.client.impl.thrift.ThriftSecurityException;
+import org.apache.accumulo.core.security.TablePermission;
+import org.apache.accumulo.fate.Repo;
+import org.apache.accumulo.master.Master;
+import org.apache.accumulo.server.security.AuditedSecurityOperation;
+import org.apache.accumulo.server.security.SecurityOperation;
+import org.slf4j.LoggerFactory;
+
+class ImportSetupPermissions extends MasterRepo {
+
+  private static final long serialVersionUID = 1L;
+
+  private ImportedTableInfo tableInfo;
+
+  public ImportSetupPermissions(ImportedTableInfo ti) {
+    this.tableInfo = ti;
+  }
+
+  @Override
+  public long isReady(long tid, Master environment) throws Exception {
+    return 0;
+  }
+
+  @Override
+  public Repo<Master> call(long tid, Master env) throws Exception {
+    // give all table permissions to the creator
+    SecurityOperation security = AuditedSecurityOperation.getInstance(env);
+    for (TablePermission permission : TablePermission.values()) {
+      try {
+        security.grantTablePermission(env.rpcCreds(), tableInfo.user, tableInfo.tableId, permission, tableInfo.namespaceId);
+      } catch (ThriftSecurityException e) {
+        LoggerFactory.getLogger(ImportSetupPermissions.class).error("{}", e.getMessage(), e);
+        throw e;
+      }
+    }
+
+    // setup permissions in zookeeper before table info in zookeeper
+    // this way concurrent users will not get a spurious permission denied
+    // error
+    return new ImportPopulateZookeeper(tableInfo);
+  }
+
+  @Override
+  public void undo(long tid, Master env) throws Exception {
+    AuditedSecurityOperation.getInstance(env).deleteTable(env.rpcCreds(), tableInfo.tableId, tableInfo.namespaceId);
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/server/master/src/main/java/org/apache/accumulo/master/tableOps/ImportTable.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/ImportTable.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/ImportTable.java
index 31bc52c..a90474f 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/ImportTable.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/ImportTable.java
@@ -18,542 +18,21 @@ package org.apache.accumulo.master.tableOps;
 
 import static java.nio.charset.StandardCharsets.UTF_8;
 
-import java.io.BufferedInputStream;
 import java.io.BufferedReader;
-import java.io.BufferedWriter;
-import java.io.DataInputStream;
 import java.io.IOException;
 import java.io.InputStreamReader;
-import java.io.OutputStreamWriter;
-import java.io.Serializable;
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Map.Entry;
 import java.util.zip.ZipEntry;
 import java.util.zip.ZipInputStream;
 
 import org.apache.accumulo.core.Constants;
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
 import org.apache.accumulo.core.client.Instance;
-import org.apache.accumulo.core.client.impl.Namespaces;
-import org.apache.accumulo.core.client.impl.TableOperationsImpl;
-import org.apache.accumulo.core.client.impl.Tables;
 import org.apache.accumulo.core.client.impl.thrift.TableOperation;
 import org.apache.accumulo.core.client.impl.thrift.TableOperationExceptionType;
-import org.apache.accumulo.core.client.impl.thrift.ThriftSecurityException;
 import org.apache.accumulo.core.client.impl.thrift.ThriftTableOperationException;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.data.impl.KeyExtent;
-import org.apache.accumulo.core.file.FileOperations;
-import org.apache.accumulo.core.master.state.tables.TableState;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
-import org.apache.accumulo.core.security.TablePermission;
-import org.apache.accumulo.core.util.FastFormat;
 import org.apache.accumulo.fate.Repo;
-import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeExistsPolicy;
 import org.apache.accumulo.master.Master;
 import org.apache.accumulo.server.ServerConstants;
-import org.apache.accumulo.server.fs.VolumeManager;
-import org.apache.accumulo.server.security.AuditedSecurityOperation;
-import org.apache.accumulo.server.security.SecurityOperation;
-import org.apache.accumulo.server.tables.TableManager;
-import org.apache.accumulo.server.tablets.UniqueNameAllocator;
-import org.apache.accumulo.server.util.MetadataTableUtil;
-import org.apache.accumulo.server.util.TablePropUtil;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.Text;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.base.Optional;
-
-/**
- *
- */
-class ImportedTableInfo implements Serializable {
-
-  private static final long serialVersionUID = 1L;
-
-  public String exportDir;
-  public String user;
-  public String tableName;
-  public String tableId;
-  public String importDir;
-  public String namespaceId;
-}
-
-class FinishImportTable extends MasterRepo {
-
-  private static final long serialVersionUID = 1L;
-
-  private ImportedTableInfo tableInfo;
-
-  public FinishImportTable(ImportedTableInfo ti) {
-    this.tableInfo = ti;
-  }
-
-  @Override
-  public long isReady(long tid, Master environment) throws Exception {
-    return 0;
-  }
-
-  @Override
-  public Repo<Master> call(long tid, Master env) throws Exception {
-
-    env.getFileSystem().deleteRecursively(new Path(tableInfo.importDir, "mappings.txt"));
-
-    TableManager.getInstance().transitionTableState(tableInfo.tableId, TableState.ONLINE);
-
-    Utils.unreserveNamespace(tableInfo.namespaceId, tid, false);
-    Utils.unreserveTable(tableInfo.tableId, tid, true);
-
-    Utils.unreserveHdfsDirectory(new Path(tableInfo.exportDir).toString(), tid);
-
-    env.getEventCoordinator().event("Imported table %s ", tableInfo.tableName);
-
-    LoggerFactory.getLogger(FinishImportTable.class).debug("Imported table " + tableInfo.tableId + " " + tableInfo.tableName);
-
-    return null;
-  }
-
-  @Override
-  public String getReturn() {
-    return tableInfo.tableId;
-  }
-
-  @Override
-  public void undo(long tid, Master env) throws Exception {}
-
-}
-
-class MoveExportedFiles extends MasterRepo {
-
-  private static final long serialVersionUID = 1L;
-
-  private ImportedTableInfo tableInfo;
-
-  MoveExportedFiles(ImportedTableInfo ti) {
-    this.tableInfo = ti;
-  }
-
-  @Override
-  public Repo<Master> call(long tid, Master master) throws Exception {
-    try {
-      VolumeManager fs = master.getFileSystem();
-
-      Map<String,String> fileNameMappings = PopulateMetadataTable.readMappingFile(fs, tableInfo);
-
-      for (String oldFileName : fileNameMappings.keySet()) {
-        if (!fs.exists(new Path(tableInfo.exportDir, oldFileName))) {
-          throw new ThriftTableOperationException(tableInfo.tableId, tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER,
-              "File referenced by exported table does not exists " + oldFileName);
-        }
-      }
-
-      FileStatus[] files = fs.listStatus(new Path(tableInfo.exportDir));
-
-      for (FileStatus fileStatus : files) {
-        String newName = fileNameMappings.get(fileStatus.getPath().getName());
-
-        if (newName != null)
-          fs.rename(fileStatus.getPath(), new Path(tableInfo.importDir, newName));
-      }
-
-      return new FinishImportTable(tableInfo);
-    } catch (IOException ioe) {
-      log.warn("{}", ioe.getMessage(), ioe);
-      throw new ThriftTableOperationException(tableInfo.tableId, tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER,
-          "Error renaming files " + ioe.getMessage());
-    }
-  }
-}
-
-class PopulateMetadataTable extends MasterRepo {
-
-  private static final long serialVersionUID = 1L;
-
-  private ImportedTableInfo tableInfo;
-
-  PopulateMetadataTable(ImportedTableInfo ti) {
-    this.tableInfo = ti;
-  }
-
-  static Map<String,String> readMappingFile(VolumeManager fs, ImportedTableInfo tableInfo) throws Exception {
-    BufferedReader in = new BufferedReader(new InputStreamReader(fs.open(new Path(tableInfo.importDir, "mappings.txt")), UTF_8));
-
-    try {
-      Map<String,String> map = new HashMap<String,String>();
-
-      String line = null;
-      while ((line = in.readLine()) != null) {
-        String sa[] = line.split(":", 2);
-        map.put(sa[0], sa[1]);
-      }
-
-      return map;
-    } finally {
-      in.close();
-    }
-
-  }
-
-  @Override
-  public Repo<Master> call(long tid, Master master) throws Exception {
-
-    Path path = new Path(tableInfo.exportDir, Constants.EXPORT_FILE);
-
-    BatchWriter mbw = null;
-    ZipInputStream zis = null;
-
-    try {
-      VolumeManager fs = master.getFileSystem();
-
-      mbw = master.getConnector().createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
-
-      zis = new ZipInputStream(fs.open(path));
-
-      Map<String,String> fileNameMappings = readMappingFile(fs, tableInfo);
-
-      log.info("importDir is " + tableInfo.importDir);
-
-      // This is a directory already prefixed with proper volume information e.g. hdfs://localhost:8020/path/to/accumulo/tables/...
-      final String bulkDir = tableInfo.importDir;
-
-      final String[] tableDirs = ServerConstants.getTablesDirs();
-
-      ZipEntry zipEntry;
-      while ((zipEntry = zis.getNextEntry()) != null) {
-        if (zipEntry.getName().equals(Constants.EXPORT_METADATA_FILE)) {
-          DataInputStream in = new DataInputStream(new BufferedInputStream(zis));
-
-          Key key = new Key();
-          Value val = new Value();
-
-          Mutation m = null;
-          Text currentRow = null;
-          int dirCount = 0;
-
-          while (true) {
-            key.readFields(in);
-            val.readFields(in);
-
-            Text endRow = new KeyExtent(key.getRow(), (Text) null).getEndRow();
-            Text metadataRow = new KeyExtent(new Text(tableInfo.tableId), endRow, null).getMetadataEntry();
-
-            Text cq;
-
-            if (key.getColumnFamily().equals(DataFileColumnFamily.NAME)) {
-              String oldName = new Path(key.getColumnQualifier().toString()).getName();
-              String newName = fileNameMappings.get(oldName);
-
-              if (newName == null) {
-                throw new ThriftTableOperationException(tableInfo.tableId, tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER,
-                    "File " + oldName + " does not exist in import dir");
-              }
-
-              cq = new Text(bulkDir + "/" + newName);
-            } else {
-              cq = key.getColumnQualifier();
-            }
-
-            if (m == null) {
-              // Make a unique directory inside the table's dir. Cannot import multiple tables into one table, so don't need to use unique allocator
-              String tabletDir = new String(FastFormat.toZeroPaddedString(dirCount++, 8, 16, Constants.CLONE_PREFIX_BYTES), UTF_8);
-
-              // Build up a full hdfs://localhost:8020/accumulo/tables/$id/c-XXXXXXX
-              String absolutePath = getClonedTabletDir(master, tableDirs, tabletDir);
-
-              m = new Mutation(metadataRow);
-              TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.put(m, new Value(absolutePath.getBytes(UTF_8)));
-              currentRow = metadataRow;
-            }
-
-            if (!currentRow.equals(metadataRow)) {
-              mbw.addMutation(m);
-
-              // Make a unique directory inside the table's dir. Cannot import multiple tables into one table, so don't need to use unique allocator
-              String tabletDir = new String(FastFormat.toZeroPaddedString(dirCount++, 8, 16, Constants.CLONE_PREFIX_BYTES), UTF_8);
-
-              // Build up a full hdfs://localhost:8020/accumulo/tables/$id/c-XXXXXXX
-              String absolutePath = getClonedTabletDir(master, tableDirs, tabletDir);
-
-              m = new Mutation(metadataRow);
-              TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.put(m, new Value(absolutePath.getBytes(UTF_8)));
-            }
-
-            m.put(key.getColumnFamily(), cq, val);
-
-            if (endRow == null && TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.hasColumns(key)) {
-              mbw.addMutation(m);
-              break; // its the last column in the last row
-            }
-          }
-
-          break;
-        }
-      }
-
-      return new MoveExportedFiles(tableInfo);
-    } catch (IOException ioe) {
-      log.warn("{}", ioe.getMessage(), ioe);
-      throw new ThriftTableOperationException(tableInfo.tableId, tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER,
-          "Error reading " + path + " " + ioe.getMessage());
-    } finally {
-      if (zis != null) {
-        try {
-          zis.close();
-        } catch (IOException ioe) {
-          log.warn("Failed to close zip file ", ioe);
-        }
-      }
-
-      if (mbw != null) {
-        mbw.close();
-      }
-    }
-  }
-
-  /**
-   * Given options for tables (across multiple volumes), construct an absolute path using the unique name within the chosen volume
-   *
-   * @return An absolute, unique path for the imported table
-   */
-  protected String getClonedTabletDir(Master master, String[] tableDirs, String tabletDir) {
-    // We can try to spread out the tablet dirs across all volumes
-    String tableDir = master.getFileSystem().choose(Optional.of(tableInfo.tableId), tableDirs);
-
-    // Build up a full hdfs://localhost:8020/accumulo/tables/$id/c-XXXXXXX
-    return tableDir + "/" + tableInfo.tableId + "/" + tabletDir;
-  }
-
-  @Override
-  public void undo(long tid, Master environment) throws Exception {
-    MetadataTableUtil.deleteTable(tableInfo.tableId, false, environment, environment.getMasterLock());
-  }
-}
-
-class MapImportFileNames extends MasterRepo {
-
-  private static final long serialVersionUID = 1L;
-
-  private ImportedTableInfo tableInfo;
-
-  MapImportFileNames(ImportedTableInfo ti) {
-    this.tableInfo = ti;
-  }
-
-  @Override
-  public Repo<Master> call(long tid, Master environment) throws Exception {
-
-    Path path = new Path(tableInfo.importDir, "mappings.txt");
-
-    BufferedWriter mappingsWriter = null;
-
-    try {
-      VolumeManager fs = environment.getFileSystem();
-
-      fs.mkdirs(new Path(tableInfo.importDir));
-
-      FileStatus[] files = fs.listStatus(new Path(tableInfo.exportDir));
-
-      UniqueNameAllocator namer = UniqueNameAllocator.getInstance();
-
-      mappingsWriter = new BufferedWriter(new OutputStreamWriter(fs.create(path), UTF_8));
-
-      for (FileStatus fileStatus : files) {
-        String fileName = fileStatus.getPath().getName();
-        log.info("filename " + fileStatus.getPath().toString());
-        String sa[] = fileName.split("\\.");
-        String extension = "";
-        if (sa.length > 1) {
-          extension = sa[sa.length - 1];
-
-          if (!FileOperations.getValidExtensions().contains(extension)) {
-            continue;
-          }
-        } else {
-          // assume it is a map file
-          extension = Constants.MAPFILE_EXTENSION;
-        }
-
-        String newName = "I" + namer.getNextName() + "." + extension;
-
-        mappingsWriter.append(fileName);
-        mappingsWriter.append(':');
-        mappingsWriter.append(newName);
-        mappingsWriter.newLine();
-      }
-
-      mappingsWriter.close();
-      mappingsWriter = null;
-
-      return new PopulateMetadataTable(tableInfo);
-    } catch (IOException ioe) {
-      log.warn("{}", ioe.getMessage(), ioe);
-      throw new ThriftTableOperationException(tableInfo.tableId, tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER,
-          "Error writing mapping file " + path + " " + ioe.getMessage());
-    } finally {
-      if (mappingsWriter != null)
-        try {
-          mappingsWriter.close();
-        } catch (IOException ioe) {
-          log.warn("Failed to close " + path, ioe);
-        }
-    }
-  }
-
-  @Override
-  public void undo(long tid, Master env) throws Exception {
-    env.getFileSystem().deleteRecursively(new Path(tableInfo.importDir));
-  }
-}
-
-class CreateImportDir extends MasterRepo {
-  private static final Logger log = LoggerFactory.getLogger(CreateImportDir.class);
-  private static final long serialVersionUID = 1L;
-
-  private ImportedTableInfo tableInfo;
-
-  CreateImportDir(ImportedTableInfo ti) {
-    this.tableInfo = ti;
-  }
-
-  @Override
-  public Repo<Master> call(long tid, Master master) throws Exception {
-
-    UniqueNameAllocator namer = UniqueNameAllocator.getInstance();
-
-    Path exportDir = new Path(tableInfo.exportDir);
-    String[] tableDirs = ServerConstants.getTablesDirs();
-
-    log.info("Looking for matching filesystem for " + exportDir + " from options " + Arrays.toString(tableDirs));
-    Path base = master.getFileSystem().matchingFileSystem(exportDir, tableDirs);
-    log.info("Chose base table directory of " + base);
-    Path directory = new Path(base, tableInfo.tableId);
-
-    Path newBulkDir = new Path(directory, Constants.BULK_PREFIX + namer.getNextName());
-
-    tableInfo.importDir = newBulkDir.toString();
-
-    log.info("Using import dir: " + tableInfo.importDir);
-
-    return new MapImportFileNames(tableInfo);
-  }
-}
-
-class ImportPopulateZookeeper extends MasterRepo {
-
-  private static final long serialVersionUID = 1L;
-
-  private ImportedTableInfo tableInfo;
-
-  ImportPopulateZookeeper(ImportedTableInfo ti) {
-    this.tableInfo = ti;
-  }
-
-  @Override
-  public long isReady(long tid, Master environment) throws Exception {
-    return Utils.reserveTable(tableInfo.tableId, tid, true, false, TableOperation.IMPORT);
-  }
-
-  private Map<String,String> getExportedProps(VolumeManager fs) throws Exception {
-
-    Path path = new Path(tableInfo.exportDir, Constants.EXPORT_FILE);
-
-    try {
-      FileSystem ns = fs.getVolumeByPath(path).getFileSystem();
-      return TableOperationsImpl.getExportedProps(ns, path);
-    } catch (IOException ioe) {
-      throw new ThriftTableOperationException(tableInfo.tableId, tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER,
-          "Error reading table props from " + path + " " + ioe.getMessage());
-    }
-  }
-
-  @Override
-  public Repo<Master> call(long tid, Master env) throws Exception {
-    // reserve the table name in zookeeper or fail
-
-    Utils.tableNameLock.lock();
-    try {
-      // write tableName & tableId to zookeeper
-      Instance instance = env.getInstance();
-
-      Utils.checkTableDoesNotExist(instance, tableInfo.tableName, tableInfo.tableId, TableOperation.CREATE);
-
-      String namespace = Tables.qualify(tableInfo.tableName).getFirst();
-      String namespaceId = Namespaces.getNamespaceId(instance, namespace);
-      TableManager.getInstance().addTable(tableInfo.tableId, namespaceId, tableInfo.tableName, NodeExistsPolicy.OVERWRITE);
-
-      Tables.clearCache(instance);
-    } finally {
-      Utils.tableNameLock.unlock();
-    }
-
-    for (Entry<String,String> entry : getExportedProps(env.getFileSystem()).entrySet())
-      if (!TablePropUtil.setTableProperty(tableInfo.tableId, entry.getKey(), entry.getValue())) {
-        throw new ThriftTableOperationException(tableInfo.tableId, tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER,
-            "Invalid table property " + entry.getKey());
-      }
-
-    return new CreateImportDir(tableInfo);
-  }
-
-  @Override
-  public void undo(long tid, Master env) throws Exception {
-    Instance instance = env.getInstance();
-    TableManager.getInstance().removeTable(tableInfo.tableId);
-    Utils.unreserveTable(tableInfo.tableId, tid, true);
-    Tables.clearCache(instance);
-  }
-}
-
-class ImportSetupPermissions extends MasterRepo {
-
-  private static final long serialVersionUID = 1L;
-
-  private ImportedTableInfo tableInfo;
-
-  public ImportSetupPermissions(ImportedTableInfo ti) {
-    this.tableInfo = ti;
-  }
-
-  @Override
-  public long isReady(long tid, Master environment) throws Exception {
-    return 0;
-  }
-
-  @Override
-  public Repo<Master> call(long tid, Master env) throws Exception {
-    // give all table permissions to the creator
-    SecurityOperation security = AuditedSecurityOperation.getInstance(env);
-    for (TablePermission permission : TablePermission.values()) {
-      try {
-        security.grantTablePermission(env.rpcCreds(), tableInfo.user, tableInfo.tableId, permission, tableInfo.namespaceId);
-      } catch (ThriftSecurityException e) {
-        LoggerFactory.getLogger(ImportSetupPermissions.class).error("{}", e.getMessage(), e);
-        throw e;
-      }
-    }
-
-    // setup permissions in zookeeper before table info in zookeeper
-    // this way concurrent users will not get a spurious permission denied
-    // error
-    return new ImportPopulateZookeeper(tableInfo);
-  }
-
-  @Override
-  public void undo(long tid, Master env) throws Exception {
-    AuditedSecurityOperation.getInstance(env).deleteTable(env.rpcCreds(), tableInfo.tableId, tableInfo.namespaceId);
-  }
-}
 
 public class ImportTable extends MasterRepo {
   private static final long serialVersionUID = 1L;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/server/master/src/main/java/org/apache/accumulo/master/tableOps/ImportedTableInfo.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/ImportedTableInfo.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/ImportedTableInfo.java
new file mode 100644
index 0000000..34bb6c8
--- /dev/null
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/ImportedTableInfo.java
@@ -0,0 +1,31 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.master.tableOps;
+
+import java.io.Serializable;
+
+class ImportedTableInfo implements Serializable {
+
+  private static final long serialVersionUID = 1L;
+
+  public String exportDir;
+  public String user;
+  public String tableName;
+  public String tableId;
+  public String importDir;
+  public String namespaceId;
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/server/master/src/main/java/org/apache/accumulo/master/tableOps/LoadFiles.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/LoadFiles.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/LoadFiles.java
new file mode 100644
index 0000000..c478a5d
--- /dev/null
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/LoadFiles.java
@@ -0,0 +1,209 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.master.tableOps;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+
+import java.io.BufferedWriter;
+import java.io.OutputStreamWriter;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Future;
+import java.util.concurrent.ThreadPoolExecutor;
+
+import org.apache.accumulo.core.client.impl.ServerClient;
+import org.apache.accumulo.core.client.impl.thrift.ClientService;
+import org.apache.accumulo.core.client.impl.thrift.TableOperation;
+import org.apache.accumulo.core.client.impl.thrift.TableOperationExceptionType;
+import org.apache.accumulo.core.client.impl.thrift.ThriftTableOperationException;
+import org.apache.accumulo.core.client.impl.thrift.ClientService.Client;
+import org.apache.accumulo.core.conf.AccumuloConfiguration;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.trace.Tracer;
+import org.apache.accumulo.core.util.Pair;
+import org.apache.accumulo.core.util.SimpleThreadPool;
+import org.apache.accumulo.core.util.UtilWaitThread;
+import org.apache.accumulo.fate.Repo;
+import org.apache.accumulo.master.Master;
+import org.apache.accumulo.server.fs.VolumeManager;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.Path;
+import org.apache.htrace.wrappers.TraceExecutorService;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+class LoadFiles extends MasterRepo {
+
+  private static final long serialVersionUID = 1L;
+
+  private static ExecutorService threadPool = null;
+  private static final Logger log = LoggerFactory.getLogger(BulkImport.class);
+
+  private String tableId;
+  private String source;
+  private String bulk;
+  private String errorDir;
+  private boolean setTime;
+
+  public LoadFiles(String tableId, String source, String bulk, String errorDir, boolean setTime) {
+    this.tableId = tableId;
+    this.source = source;
+    this.bulk = bulk;
+    this.errorDir = errorDir;
+    this.setTime = setTime;
+  }
+
+  @Override
+  public long isReady(long tid, Master master) throws Exception {
+    if (master.onlineTabletServers().size() == 0)
+      return 500;
+    return 0;
+  }
+
+  private static synchronized ExecutorService getThreadPool(Master master) {
+    if (threadPool == null) {
+      int threadPoolSize = master.getConfiguration().getCount(Property.MASTER_BULK_THREADPOOL_SIZE);
+      ThreadPoolExecutor pool = new SimpleThreadPool(threadPoolSize, "bulk import");
+      pool.allowCoreThreadTimeOut(true);
+      threadPool = new TraceExecutorService(pool);
+    }
+    return threadPool;
+  }
+
+  @Override
+  public Repo<Master> call(final long tid, final Master master) throws Exception {
+    ExecutorService executor = getThreadPool(master);
+    final AccumuloConfiguration conf = master.getConfiguration();
+    VolumeManager fs = master.getFileSystem();
+    List<FileStatus> files = new ArrayList<FileStatus>();
+    for (FileStatus entry : fs.listStatus(new Path(bulk))) {
+      files.add(entry);
+    }
+    log.debug("tid " + tid + " importing " + files.size() + " files");
+
+    Path writable = new Path(this.errorDir, ".iswritable");
+    if (!fs.createNewFile(writable)) {
+      // Maybe this is a re-try... clear the flag and try again
+      fs.delete(writable);
+      if (!fs.createNewFile(writable))
+        throw new ThriftTableOperationException(tableId, null, TableOperation.BULK_IMPORT, TableOperationExceptionType.BULK_BAD_ERROR_DIRECTORY,
+            "Unable to write to " + this.errorDir);
+    }
+    fs.delete(writable);
+
+    final Set<String> filesToLoad = Collections.synchronizedSet(new HashSet<String>());
+    for (FileStatus f : files)
+      filesToLoad.add(f.getPath().toString());
+
+    final int RETRIES = Math.max(1, conf.getCount(Property.MASTER_BULK_RETRIES));
+    for (int attempt = 0; attempt < RETRIES && filesToLoad.size() > 0; attempt++) {
+      List<Future<List<String>>> results = new ArrayList<Future<List<String>>>();
+
+      if (master.onlineTabletServers().size() == 0)
+        log.warn("There are no tablet server to process bulk import, waiting (tid = " + tid + ")");
+
+      while (master.onlineTabletServers().size() == 0) {
+        UtilWaitThread.sleep(500);
+      }
+
+      // Use the threadpool to assign files one-at-a-time to the server
+      final List<String> loaded = Collections.synchronizedList(new ArrayList<String>());
+      for (final String file : filesToLoad) {
+        results.add(executor.submit(new Callable<List<String>>() {
+          @Override
+          public List<String> call() {
+            List<String> failures = new ArrayList<String>();
+            ClientService.Client client = null;
+            String server = null;
+            try {
+              // get a connection to a random tablet server, do not prefer cached connections because
+              // this is running on the master and there are lots of connections to tablet servers
+              // serving the metadata tablets
+              long timeInMillis = master.getConfiguration().getTimeInMillis(Property.MASTER_BULK_TIMEOUT);
+              Pair<String,Client> pair = ServerClient.getConnection(master, false, timeInMillis);
+              client = pair.getSecond();
+              server = pair.getFirst();
+              List<String> attempt = Collections.singletonList(file);
+              log.debug("Asking " + pair.getFirst() + " to bulk import " + file);
+              List<String> fail = client.bulkImportFiles(Tracer.traceInfo(), master.rpcCreds(), tid, tableId, attempt, errorDir, setTime);
+              if (fail.isEmpty()) {
+                loaded.add(file);
+              } else {
+                failures.addAll(fail);
+              }
+            } catch (Exception ex) {
+              log.error("rpc failed server:" + server + ", tid:" + tid + " " + ex);
+            } finally {
+              ServerClient.close(client);
+            }
+            return failures;
+          }
+        }));
+      }
+      Set<String> failures = new HashSet<String>();
+      for (Future<List<String>> f : results)
+        failures.addAll(f.get());
+      filesToLoad.removeAll(loaded);
+      if (filesToLoad.size() > 0) {
+        log.debug("tid " + tid + " attempt " + (attempt + 1) + " " + sampleList(filesToLoad, 10) + " failed");
+        UtilWaitThread.sleep(100);
+      }
+    }
+
+    FSDataOutputStream failFile = fs.create(new Path(errorDir, BulkImport.FAILURES_TXT), true);
+    BufferedWriter out = new BufferedWriter(new OutputStreamWriter(failFile, UTF_8));
+    try {
+      for (String f : filesToLoad) {
+        out.write(f);
+        out.write("\n");
+      }
+    } finally {
+      out.close();
+    }
+
+    // return the next step, which will perform cleanup
+    return new CompleteBulkImport(tableId, source, bulk, errorDir);
+  }
+
+  static String sampleList(Collection<?> potentiallyLongList, int max) {
+    StringBuffer result = new StringBuffer();
+    result.append("[");
+    int i = 0;
+    for (Object obj : potentiallyLongList) {
+      result.append(obj);
+      if (i >= max) {
+        result.append("...");
+        break;
+      } else {
+        result.append(", ");
+      }
+      i++;
+    }
+    if (i < max)
+      result.delete(result.length() - 2, result.length());
+    result.append("]");
+    return result.toString();
+  }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/server/master/src/main/java/org/apache/accumulo/master/tableOps/MapImportFileNames.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/MapImportFileNames.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/MapImportFileNames.java
new file mode 100644
index 0000000..0ee91dd
--- /dev/null
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/MapImportFileNames.java
@@ -0,0 +1,111 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.master.tableOps;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+
+import java.io.BufferedWriter;
+import java.io.IOException;
+import java.io.OutputStreamWriter;
+
+import org.apache.accumulo.core.Constants;
+import org.apache.accumulo.core.client.impl.thrift.TableOperation;
+import org.apache.accumulo.core.client.impl.thrift.TableOperationExceptionType;
+import org.apache.accumulo.core.client.impl.thrift.ThriftTableOperationException;
+import org.apache.accumulo.core.file.FileOperations;
+import org.apache.accumulo.fate.Repo;
+import org.apache.accumulo.master.Master;
+import org.apache.accumulo.server.fs.VolumeManager;
+import org.apache.accumulo.server.tablets.UniqueNameAllocator;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.Path;
+
+class MapImportFileNames extends MasterRepo {
+
+  private static final long serialVersionUID = 1L;
+
+  private ImportedTableInfo tableInfo;
+
+  MapImportFileNames(ImportedTableInfo ti) {
+    this.tableInfo = ti;
+  }
+
+  @Override
+  public Repo<Master> call(long tid, Master environment) throws Exception {
+
+    Path path = new Path(tableInfo.importDir, "mappings.txt");
+
+    BufferedWriter mappingsWriter = null;
+
+    try {
+      VolumeManager fs = environment.getFileSystem();
+
+      fs.mkdirs(new Path(tableInfo.importDir));
+
+      FileStatus[] files = fs.listStatus(new Path(tableInfo.exportDir));
+
+      UniqueNameAllocator namer = UniqueNameAllocator.getInstance();
+
+      mappingsWriter = new BufferedWriter(new OutputStreamWriter(fs.create(path), UTF_8));
+
+      for (FileStatus fileStatus : files) {
+        String fileName = fileStatus.getPath().getName();
+        log.info("filename " + fileStatus.getPath().toString());
+        String sa[] = fileName.split("\\.");
+        String extension = "";
+        if (sa.length > 1) {
+          extension = sa[sa.length - 1];
+
+          if (!FileOperations.getValidExtensions().contains(extension)) {
+            continue;
+          }
+        } else {
+          // assume it is a map file
+          extension = Constants.MAPFILE_EXTENSION;
+        }
+
+        String newName = "I" + namer.getNextName() + "." + extension;
+
+        mappingsWriter.append(fileName);
+        mappingsWriter.append(':');
+        mappingsWriter.append(newName);
+        mappingsWriter.newLine();
+      }
+
+      mappingsWriter.close();
+      mappingsWriter = null;
+
+      return new PopulateMetadataTable(tableInfo);
+    } catch (IOException ioe) {
+      log.warn("{}", ioe.getMessage(), ioe);
+      throw new ThriftTableOperationException(tableInfo.tableId, tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER,
+          "Error writing mapping file " + path + " " + ioe.getMessage());
+    } finally {
+      if (mappingsWriter != null)
+        try {
+          mappingsWriter.close();
+        } catch (IOException ioe) {
+          log.warn("Failed to close " + path, ioe);
+        }
+    }
+  }
+
+  @Override
+  public void undo(long tid, Master env) throws Exception {
+    env.getFileSystem().deleteRecursively(new Path(tableInfo.importDir));
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/server/master/src/main/java/org/apache/accumulo/master/tableOps/MoveExportedFiles.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/MoveExportedFiles.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/MoveExportedFiles.java
new file mode 100644
index 0000000..19395df
--- /dev/null
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/MoveExportedFiles.java
@@ -0,0 +1,71 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.master.tableOps;
+
+import java.io.IOException;
+import java.util.Map;
+
+import org.apache.accumulo.core.client.impl.thrift.TableOperation;
+import org.apache.accumulo.core.client.impl.thrift.TableOperationExceptionType;
+import org.apache.accumulo.core.client.impl.thrift.ThriftTableOperationException;
+import org.apache.accumulo.fate.Repo;
+import org.apache.accumulo.master.Master;
+import org.apache.accumulo.server.fs.VolumeManager;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.Path;
+
+class MoveExportedFiles extends MasterRepo {
+
+  private static final long serialVersionUID = 1L;
+
+  private ImportedTableInfo tableInfo;
+
+  MoveExportedFiles(ImportedTableInfo ti) {
+    this.tableInfo = ti;
+  }
+
+  @Override
+  public Repo<Master> call(long tid, Master master) throws Exception {
+    try {
+      VolumeManager fs = master.getFileSystem();
+
+      Map<String,String> fileNameMappings = PopulateMetadataTable.readMappingFile(fs, tableInfo);
+
+      for (String oldFileName : fileNameMappings.keySet()) {
+        if (!fs.exists(new Path(tableInfo.exportDir, oldFileName))) {
+          throw new ThriftTableOperationException(tableInfo.tableId, tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER,
+              "File referenced by exported table does not exists " + oldFileName);
+        }
+      }
+
+      FileStatus[] files = fs.listStatus(new Path(tableInfo.exportDir));
+
+      for (FileStatus fileStatus : files) {
+        String newName = fileNameMappings.get(fileStatus.getPath().getName());
+
+        if (newName != null)
+          fs.rename(fileStatus.getPath(), new Path(tableInfo.importDir, newName));
+      }
+
+      return new FinishImportTable(tableInfo);
+    } catch (IOException ioe) {
+      log.warn("{}", ioe.getMessage(), ioe);
+      throw new ThriftTableOperationException(tableInfo.tableId, tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER,
+          "Error renaming files " + ioe.getMessage());
+    }
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6e2e6780/server/master/src/main/java/org/apache/accumulo/master/tableOps/NamespaceCleanUp.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/NamespaceCleanUp.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/NamespaceCleanUp.java
new file mode 100644
index 0000000..2444374
--- /dev/null
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/NamespaceCleanUp.java
@@ -0,0 +1,75 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.master.tableOps;
+
+import org.apache.accumulo.core.client.impl.Tables;
+import org.apache.accumulo.core.client.impl.thrift.ThriftSecurityException;
+import org.apache.accumulo.fate.Repo;
+import org.apache.accumulo.master.Master;
+import org.apache.accumulo.server.security.AuditedSecurityOperation;
+import org.apache.accumulo.server.tables.TableManager;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+class NamespaceCleanUp extends MasterRepo {
+
+  private static final Logger log = LoggerFactory.getLogger(NamespaceCleanUp.class);
+
+  private static final long serialVersionUID = 1L;
+
+  private String namespaceId;
+
+  public NamespaceCleanUp(String namespaceId) {
+    this.namespaceId = namespaceId;
+  }
+
+  @Override
+  public long isReady(long tid, Master master) throws Exception {
+    return 0;
+  }
+
+  @Override
+  public Repo<Master> call(long id, Master master) throws Exception {
+
+    // remove from zookeeper
+    try {
+      TableManager.getInstance().removeNamespace(namespaceId);
+    } catch (Exception e) {
+      log.error("Failed to find namespace in zookeeper", e);
+    }
+    Tables.clearCache(master.getInstance());
+
+    // remove any permissions associated with this namespace
+    try {
+      AuditedSecurityOperation.getInstance(master).deleteNamespace(master.rpcCreds(), namespaceId);
+    } catch (ThriftSecurityException e) {
+      log.error("{}", e.getMessage(), e);
+    }
+
+    Utils.unreserveNamespace(namespaceId, id, true);
+
+    log.debug("Deleted namespace " + namespaceId);
+
+    return null;
+  }
+
+  @Override
+  public void undo(long tid, Master environment) throws Exception {
+    // nothing to do
+  }
+
+}