You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@accumulo.apache.org by ct...@apache.org on 2013/07/17 04:33:16 UTC

[14/50] [abbrv] ACCUMULO-1481 : Add tests for splitting/merging root table; refactor to consolidate metadata constants and structures in an organized way; begin consolidating metadata ops into a servicer interface to abstract the code that actually does

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/core/src/main/java/org/apache/accumulo/core/metadata/TableMetadataServicer.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/metadata/TableMetadataServicer.java b/core/src/main/java/org/apache/accumulo/core/metadata/TableMetadataServicer.java
new file mode 100644
index 0000000..0b088f5
--- /dev/null
+++ b/core/src/main/java/org/apache/accumulo/core/metadata/TableMetadataServicer.java
@@ -0,0 +1,143 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.core.metadata;
+
+import java.util.Iterator;
+import java.util.Map.Entry;
+import java.util.SortedMap;
+import java.util.SortedSet;
+
+import org.apache.accumulo.core.client.AccumuloException;
+import org.apache.accumulo.core.client.AccumuloSecurityException;
+import org.apache.accumulo.core.client.Instance;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.client.TableNotFoundException;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.KeyExtent;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.security.CredentialHelper;
+import org.apache.accumulo.core.security.thrift.TCredentials;
+import org.apache.hadoop.io.Text;
+
+/**
+ * A {@link MetadataServicer} that is backed by a table
+ */
+abstract class TableMetadataServicer extends MetadataServicer {
+  
+  private Instance instance;
+  private TCredentials credentials;
+  private String tableIdBeingServiced;
+  private String serviceTableName;
+  
+  public TableMetadataServicer(Instance instance, TCredentials credentials, String serviceTableName, String tableIdBeingServiced) {
+    this.instance = instance;
+    this.credentials = credentials;
+    this.serviceTableName = serviceTableName;
+    this.tableIdBeingServiced = tableIdBeingServiced;
+  }
+  
+  @Override
+  public String getServicedTableId() {
+    return tableIdBeingServiced;
+  }
+  
+  public String getServicingTableName() {
+    return serviceTableName;
+  }
+  
+  @Override
+  public void getTabletLocations(SortedMap<KeyExtent,String> tablets) throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
+    
+    Scanner scanner = instance.getConnector(credentials.getPrincipal(), CredentialHelper.extractToken(credentials)).createScanner(getServicingTableName(),
+        Authorizations.EMPTY);
+    
+    TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.fetch(scanner);
+    scanner.fetchColumnFamily(TabletsSection.CurrentLocationColumnFamily.NAME);
+    
+    // position at first entry in metadata table for given table
+    scanner.setRange(TabletsSection.getRange(getServicedTableId()));
+    
+    Text colf = new Text();
+    Text colq = new Text();
+    
+    KeyExtent currentKeyExtent = null;
+    String location = null;
+    Text row = null;
+    // acquire this table's tablets from the metadata table which services it
+    for (Entry<Key,Value> entry : scanner) {
+      if (row != null) {
+        if (!row.equals(entry.getKey().getRow())) {
+          currentKeyExtent = null;
+          location = null;
+          row = entry.getKey().getRow();
+        }
+      } else {
+        row = entry.getKey().getRow();
+      }
+      
+      colf = entry.getKey().getColumnFamily(colf);
+      colq = entry.getKey().getColumnQualifier(colq);
+      
+      if (TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.equals(colf, colq)) {
+        currentKeyExtent = new KeyExtent(entry.getKey().getRow(), entry.getValue());
+        tablets.put(currentKeyExtent, location);
+        currentKeyExtent = null;
+        location = null;
+      } else if (colf.equals(TabletsSection.CurrentLocationColumnFamily.NAME)) {
+        location = entry.getValue().toString();
+      }
+      
+    }
+    
+    validateEntries(tablets);
+  }
+  
+  private void validateEntries(SortedMap<KeyExtent,String> tablets) throws AccumuloException {
+    SortedSet<KeyExtent> tabletsKeys = (SortedSet<KeyExtent>) tablets.keySet();
+    // sanity check of metadata table entries
+    // make sure tablets has no holes, and that it starts and ends w/ null
+    if (tabletsKeys.size() == 0)
+      throw new AccumuloException("No entries found in metadata table for table " + getServicedTableId());
+    
+    if (tabletsKeys.first().getPrevEndRow() != null)
+      throw new AccumuloException("Problem with metadata table, first entry for table " + getServicedTableId() + "- " + tabletsKeys.first()
+          + " - has non null prev end row");
+    
+    if (tabletsKeys.last().getEndRow() != null)
+      throw new AccumuloException("Problem with metadata table, last entry for table " + getServicedTableId() + "- " + tabletsKeys.first()
+          + " - has non null end row");
+    
+    Iterator<KeyExtent> tabIter = tabletsKeys.iterator();
+    Text lastEndRow = tabIter.next().getEndRow();
+    while (tabIter.hasNext()) {
+      KeyExtent tabke = tabIter.next();
+      
+      if (tabke.getPrevEndRow() == null)
+        throw new AccumuloException("Problem with metadata table, it has null prev end row in middle of table " + tabke);
+      
+      if (!tabke.getPrevEndRow().equals(lastEndRow))
+        throw new AccumuloException("Problem with metadata table, it has a hole " + tabke.getPrevEndRow() + " != " + lastEndRow);
+      
+      lastEndRow = tabke.getEndRow();
+    }
+    
+    // end METADATA table sanity check
+  }
+  
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/core/src/main/java/org/apache/accumulo/core/metadata/schema/DataFileValue.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/metadata/schema/DataFileValue.java b/core/src/main/java/org/apache/accumulo/core/metadata/schema/DataFileValue.java
new file mode 100644
index 0000000..d3323a4
--- /dev/null
+++ b/core/src/main/java/org/apache/accumulo/core/metadata/schema/DataFileValue.java
@@ -0,0 +1,96 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.core.metadata.schema;
+
+public class DataFileValue {
+  private long size;
+  private long numEntries;
+  private long time = -1;
+  
+  public DataFileValue(long size, long numEntries, long time) {
+    this.size = size;
+    this.numEntries = numEntries;
+    this.time = time;
+  }
+  
+  public DataFileValue(long size, long numEntries) {
+    this.size = size;
+    this.numEntries = numEntries;
+    this.time = -1;
+  }
+  
+  public DataFileValue(byte[] encodedDFV) {
+    String[] ba = new String(encodedDFV).split(",");
+    
+    size = Long.parseLong(ba[0]);
+    numEntries = Long.parseLong(ba[1]);
+    
+    if (ba.length == 3)
+      time = Long.parseLong(ba[2]);
+    else
+      time = -1;
+  }
+  
+  public long getSize() {
+    return size;
+  }
+  
+  public long getNumEntries() {
+    return numEntries;
+  }
+  
+  public boolean isTimeSet() {
+    return time >= 0;
+  }
+  
+  public long getTime() {
+    return time;
+  }
+  
+  public byte[] encode() {
+    if (time >= 0)
+      return ("" + size + "," + numEntries + "," + time).getBytes();
+    return ("" + size + "," + numEntries).getBytes();
+  }
+  
+  @Override
+  public boolean equals(Object o) {
+    if (o instanceof DataFileValue) {
+      DataFileValue odfv = (DataFileValue) o;
+      
+      return size == odfv.size && numEntries == odfv.numEntries;
+    }
+    
+    return false;
+  }
+  
+  @Override
+  public int hashCode() {
+    return Long.valueOf(size + numEntries).hashCode();
+  }
+  
+  @Override
+  public String toString() {
+    return size + " " + numEntries;
+  }
+  
+  public void setTime(long time) {
+    if (time < 0)
+      throw new IllegalArgumentException();
+    this.time = time;
+  }
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/core/src/main/java/org/apache/accumulo/core/metadata/schema/MetadataSchema.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/metadata/schema/MetadataSchema.java b/core/src/main/java/org/apache/accumulo/core/metadata/schema/MetadataSchema.java
new file mode 100644
index 0000000..4c2b6f8
--- /dev/null
+++ b/core/src/main/java/org/apache/accumulo/core/metadata/schema/MetadataSchema.java
@@ -0,0 +1,236 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.core.metadata.schema;
+
+import org.apache.accumulo.core.client.admin.TimeType;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.PartialKey;
+import org.apache.accumulo.core.data.Range;
+import org.apache.accumulo.core.util.ColumnFQ;
+import org.apache.hadoop.io.Text;
+
+/**
+ * Describes the table schema used for metadata tables
+ */
+public class MetadataSchema {
+  
+  private static final String RESERVED_PREFIX = "~";
+  
+  private static class Section {
+    private String rowPrefix;
+    private Range range;
+    
+    private Section(String startRow, boolean startInclusive, String endRow, boolean endInclusive) {
+      rowPrefix = startRow;
+      range = new Range(startRow, startInclusive, endRow, endInclusive);
+    }
+  }
+  
+  /**
+   * Used for storing information about tablets
+   */
+  public static class TabletsSection {
+    private static final Section section = new Section(null, false, RESERVED_PREFIX, false);
+    
+    public static Range getRange() {
+      return section.range;
+    }
+    
+    public static Range getRange(String tableId) {
+      return new Range(new Key(tableId + ';'), true, new Key(tableId + '<').followingKey(PartialKey.ROW), false);
+    }
+    
+    public static Text getRow(Text tableId, Text endRow) {
+      Text entry = new Text(tableId);
+      
+      if (endRow == null) {
+        // append delimiter for default tablet
+        entry.append(new byte[] {'<'}, 0, 1);
+      } else {
+        // append delimiter for regular tablets
+        entry.append(new byte[] {';'}, 0, 1);
+        entry.append(endRow.getBytes(), 0, endRow.getLength());
+      }
+      
+      return entry;
+    }
+    
+    /**
+     * Column family for storing the tablet information needed by clients
+     */
+    public static class TabletColumnFamily {
+      /**
+       * This needs to sort after all other column families for that tablet, because the {@link PREV_ROW_COLUMN} sits in this and that needs to sort last
+       * because the {@link SimpleGarbageCollector} relies on this.
+       */
+      public static final Text NAME = new Text("~tab");
+      /**
+       * README : very important that prevRow sort last to avoid race conditions between garbage collector and split this needs to sort after everything else
+       * for that tablet
+       */
+      public static final ColumnFQ PREV_ROW_COLUMN = new ColumnFQ(NAME, new Text("~pr"));
+      /**
+       * A temporary field in case a split fails and we need to roll back
+       */
+      public static final ColumnFQ OLD_PREV_ROW_COLUMN = new ColumnFQ(NAME, new Text("oldprevrow"));
+      /**
+       * A temporary field for splits to optimize certain operations
+       */
+      public static final ColumnFQ SPLIT_RATIO_COLUMN = new ColumnFQ(NAME, new Text("splitRatio"));
+    }
+    
+    /**
+     * Column family for recording information used by the TServer
+     */
+    public static class ServerColumnFamily {
+      public static final Text NAME = new Text("srv");
+      /**
+       * Holds the location of the tablet in the DFS file system
+       */
+      public static final ColumnFQ DIRECTORY_COLUMN = new ColumnFQ(NAME, new Text("dir"));
+      /**
+       * Holds the {@link TimeType}
+       */
+      public static final ColumnFQ TIME_COLUMN = new ColumnFQ(NAME, new Text("time"));
+      /**
+       * Holds flush IDs to enable waiting on a flush to complete
+       */
+      public static final ColumnFQ FLUSH_COLUMN = new ColumnFQ(NAME, new Text("flush"));
+      /**
+       * Holds compact IDs to enable waiting on a compaction to complete
+       */
+      public static final ColumnFQ COMPACT_COLUMN = new ColumnFQ(NAME, new Text("compact"));
+      /**
+       * Holds lock IDs to enable a sanity check to ensure that the TServer writing to the metadata tablet is not dead
+       */
+      public static final ColumnFQ LOCK_COLUMN = new ColumnFQ(NAME, new Text("lock"));
+    }
+    
+    /**
+     * Column family for storing entries created by the TServer to indicate it has loaded a tablet that it was assigned
+     */
+    public static class CurrentLocationColumnFamily {
+      public static final Text NAME = new Text("loc");
+    }
+    
+    /**
+     * Column family for storing the assigned location
+     */
+    public static class FutureLocationColumnFamily {
+      public static final Text NAME = new Text("future");
+    }
+    
+    /**
+     * Column family for storing last location, as a hint for assignment
+     */
+    public static class LastLocationColumnFamily {
+      public static final Text NAME = new Text("last");
+    }
+    
+    /**
+     * Temporary markers that indicate a tablet loaded a bulk file
+     */
+    public static class BulkFileColumnFamily {
+      public static final Text NAME = new Text("loaded");
+    }
+    
+    /**
+     * Temporary marker that indicates a tablet was successfully cloned
+     */
+    public static class ClonedColumnFamily {
+      public static final Text NAME = new Text("!cloned");
+    }
+    
+    /**
+     * Column family for storing files used by a tablet
+     */
+    public static class DataFileColumnFamily {
+      public static final Text NAME = new Text("file");
+    }
+    
+    /**
+     * Column family for storing the set of files scanned with an isolated scanner, to prevent them from being deleted
+     */
+    public static class ScanFileColumnFamily {
+      public static final Text NAME = new Text("scan");
+    }
+    
+    /**
+     * Column family for storing write-ahead log entries
+     */
+    public static class LogColumnFamily {
+      public static final Text NAME = new Text("log");
+    }
+    
+    /**
+     * Column family for indicating that the files in a tablet have been trimmed to only include data for the current tablet, so that they are safe to merge
+     */
+    public static class ChoppedColumnFamily {
+      public static final Text NAME = new Text("chopped");
+      public static final ColumnFQ CHOPPED_COLUMN = new ColumnFQ(NAME, new Text("chopped"));
+    }
+  }
+  
+  /**
+   * Contains additional metadata in a reserved area not for tablets
+   */
+  public static class ReservedSection {
+    private static final Section section = new Section(RESERVED_PREFIX, true, null, false);
+    
+    public static Range getRange() {
+      return section.range;
+    }
+    
+    public static String getRowPrefix() {
+      return section.rowPrefix;
+    }
+    
+  }
+  
+  /**
+   * Holds delete markers for potentially unused files/directories
+   */
+  public static class DeletesSection {
+    private static final Section section = new Section(RESERVED_PREFIX + "del", true, RESERVED_PREFIX + "dem", false);
+    
+    public static Range getRange() {
+      return section.range;
+    }
+    
+    public static String getRowPrefix() {
+      return section.rowPrefix;
+    }
+    
+  }
+  
+  /**
+   * Holds bulk-load-in-progress processing flags
+   */
+  public static class BlipSection {
+    private static final Section section = new Section(RESERVED_PREFIX + "blip", true, RESERVED_PREFIX + "bliq", false);
+    
+    public static Range getRange() {
+      return section.range;
+    }
+    
+    public static String getRowPrefix() {
+      return section.rowPrefix;
+    }
+    
+  }
+  
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/core/src/main/java/org/apache/accumulo/core/util/ColumnFQ.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/util/ColumnFQ.java b/core/src/main/java/org/apache/accumulo/core/util/ColumnFQ.java
index 7cf1c6f..8826bb1 100644
--- a/core/src/main/java/org/apache/accumulo/core/util/ColumnFQ.java
+++ b/core/src/main/java/org/apache/accumulo/core/util/ColumnFQ.java
@@ -69,30 +69,6 @@ public class ColumnFQ implements Comparable<ColumnFQ> {
     m.putDelete(colf, colq);
   }
   
-  /**
-   * @deprecated since 1.5, use {@link #fetch(ScannerBase)} instead
-   */
-  @Deprecated
-  public static void fetch(ScannerBase sb, ColumnFQ cfq) {
-    sb.fetchColumn(cfq.colf, cfq.colq);
-  }
-  
-  /**
-   * @deprecated since 1.5, use {@link #put(Mutation, Value)} instead
-   */
-  @Deprecated
-  public static void put(Mutation m, ColumnFQ cfq, Value v) {
-    m.put(cfq.colf, cfq.colq, v);
-  }
-  
-  /**
-   * @deprecated since 1.5, use {@link #putDelete(Mutation)} instead
-   */
-  @Deprecated
-  public static void putDelete(Mutation m, ColumnFQ cfq) {
-    m.putDelete(cfq.colf, cfq.colq);
-  }
-
   @Override
   public boolean equals(Object o) {
     if (!(o instanceof ColumnFQ))

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/core/src/main/java/org/apache/accumulo/core/util/Merge.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/util/Merge.java b/core/src/main/java/org/apache/accumulo/core/util/Merge.java
index bad43bb..b1d0205 100644
--- a/core/src/main/java/org/apache/accumulo/core/util/Merge.java
+++ b/core/src/main/java/org/apache/accumulo/core/util/Merge.java
@@ -31,6 +31,9 @@ import org.apache.accumulo.core.conf.Property;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.KeyExtent;
 import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.commons.cli.ParseException;
 import org.apache.hadoop.io.Text;
@@ -216,8 +219,8 @@ public class Merge {
       throw new MergeException(e);
     }
     scanner.setRange(new KeyExtent(new Text(tableId), end, start).toMetadataRange());
-    scanner.fetchColumnFamily(MetadataTable.DATAFILE_COLUMN_FAMILY);
-    MetadataTable.PREV_ROW_COLUMN.fetch(scanner);
+    scanner.fetchColumnFamily(DataFileColumnFamily.NAME);
+    TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.fetch(scanner);
     final Iterator<Entry<Key,Value>> iterator = scanner.iterator();
     
     Iterator<Size> result = new Iterator<Size>() {
@@ -233,12 +236,12 @@ public class Merge {
         while (iterator.hasNext()) {
           Entry<Key,Value> entry = iterator.next();
           Key key = entry.getKey();
-          if (key.getColumnFamily().equals(MetadataTable.DATAFILE_COLUMN_FAMILY)) {
+          if (key.getColumnFamily().equals(DataFileColumnFamily.NAME)) {
             String[] sizeEntries = new String(entry.getValue().get()).split(",");
             if (sizeEntries.length == 2) {
               tabletSize += Long.parseLong(sizeEntries[0]);
             }
-          } else if (MetadataTable.PREV_ROW_COLUMN.hasColumns(key)) {
+          } else if (TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.hasColumns(key)) {
             KeyExtent extent = new KeyExtent(key.getRow(), entry.getValue());
             return new Size(extent, tabletSize);
           }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/core/src/main/java/org/apache/accumulo/core/util/MetadataTable.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/util/MetadataTable.java b/core/src/main/java/org/apache/accumulo/core/util/MetadataTable.java
deleted file mode 100644
index 98bc13d..0000000
--- a/core/src/main/java/org/apache/accumulo/core/util/MetadataTable.java
+++ /dev/null
@@ -1,400 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.core.util;
-
-import java.util.ArrayList;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.SortedMap;
-import java.util.SortedSet;
-import java.util.TreeMap;
-
-import org.apache.accumulo.core.client.AccumuloException;
-import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.Instance;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.client.impl.Tables;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.KeyExtent;
-import org.apache.accumulo.core.data.PartialKey;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.security.CredentialHelper;
-import org.apache.accumulo.core.security.thrift.TCredentials;
-import org.apache.hadoop.io.Text;
-
-public class MetadataTable {
-  
-  public static final String ID = "!0";
-  public static final String NAME = "!METADATA";
-  
-  /**
-   * Initial tablet directory name
-   */
-  public static final String TABLE_TABLET_LOCATION = "/table_info";
-  
-  /**
-   * Reserved keyspace is any row that begins with a tilde '~' character
-   */
-  public static final Key RESERVED_RANGE_START_KEY = new Key(new Text(new byte[] {'~'}));
-  public static final Range NON_ROOT_KEYSPACE = new Range(null, false, RESERVED_RANGE_START_KEY, false);
-  public static final Range KEYSPACE = new Range(new Key(new Text(ID)), true, RESERVED_RANGE_START_KEY, false);
-  public static final Range DELETED_RANGE = new Range(new Key(new Text("~del")), true, new Key(new Text("~dem")), false);
-  public static final String BLIP_FLAG_PREFIX = "~blip"; // BLIP = bulk load in progress
-  public static final Range BLIP_KEYSPACE = new Range(new Key(new Text(BLIP_FLAG_PREFIX)), true, new Key(new Text("~bliq")), false);
-  
-  public static final Text CURRENT_LOCATION_COLUMN_FAMILY = new Text("loc");
-  public static final Text FUTURE_LOCATION_COLUMN_FAMILY = new Text("future");
-  public static final Text LAST_LOCATION_COLUMN_FAMILY = new Text("last");
-  /**
-   * Temporary marker that indicates a tablet loaded a bulk file
-   */
-  public static final Text BULKFILE_COLUMN_FAMILY = new Text("loaded");
-  
-  /**
-   * Temporary marker that indicates a tablet was successfully cloned
-   */
-  public static final Text CLONED_COLUMN_FAMILY = new Text("!cloned");
-  
-  /**
-   * This needs to sort after all other column families for that tablet, because the {@link #PREV_ROW_COLUMN} sits in this and that needs to sort last because
-   * the {@link SimpleGarbageCollector} relies on this.
-   */
-  public static final Text TABLET_COLUMN_FAMILY = new Text("~tab");
-  
-  /**
-   * README : very important that prevRow sort last to avoid race conditions between garbage collector and split this needs to sort after everything else for
-   * that tablet
-   */
-  public static final ColumnFQ PREV_ROW_COLUMN = new ColumnFQ(TABLET_COLUMN_FAMILY, new Text("~pr"));
-  public static final ColumnFQ OLD_PREV_ROW_COLUMN = new ColumnFQ(TABLET_COLUMN_FAMILY, new Text("oldprevrow"));
-  public static final ColumnFQ SPLIT_RATIO_COLUMN = new ColumnFQ(TABLET_COLUMN_FAMILY, new Text("splitRatio"));
-  
-  public static final Text SERVER_COLUMN_FAMILY = new Text("srv");
-  public static final ColumnFQ DIRECTORY_COLUMN = new ColumnFQ(SERVER_COLUMN_FAMILY, new Text("dir"));
-  public static final ColumnFQ TIME_COLUMN = new ColumnFQ(SERVER_COLUMN_FAMILY, new Text("time"));
-  public static final ColumnFQ FLUSH_COLUMN = new ColumnFQ(SERVER_COLUMN_FAMILY, new Text("flush"));
-  public static final ColumnFQ COMPACT_COLUMN = new ColumnFQ(SERVER_COLUMN_FAMILY, new Text("compact"));
-  public static final ColumnFQ LOCK_COLUMN = new ColumnFQ(SERVER_COLUMN_FAMILY, new Text("lock"));
-  
-  public static final Text DATAFILE_COLUMN_FAMILY = new Text("file");
-  public static final Text SCANFILE_COLUMN_FAMILY = new Text("scan");
-  public static final Text LOG_COLUMN_FAMILY = new Text("log");
-  public static final Text CHOPPED_COLUMN_FAMILY = new Text("chopped");
-  public static final ColumnFQ CHOPPED_COLUMN = new ColumnFQ(CHOPPED_COLUMN_FAMILY, new Text("chopped"));
-  
-  public static class DataFileValue {
-    private long size;
-    private long numEntries;
-    private long time = -1;
-    
-    public DataFileValue(long size, long numEntries, long time) {
-      this.size = size;
-      this.numEntries = numEntries;
-      this.time = time;
-    }
-    
-    public DataFileValue(long size, long numEntries) {
-      this.size = size;
-      this.numEntries = numEntries;
-      this.time = -1;
-    }
-    
-    public DataFileValue(byte[] encodedDFV) {
-      String[] ba = new String(encodedDFV).split(",");
-      
-      size = Long.parseLong(ba[0]);
-      numEntries = Long.parseLong(ba[1]);
-      
-      if (ba.length == 3)
-        time = Long.parseLong(ba[2]);
-      else
-        time = -1;
-    }
-    
-    public long getSize() {
-      return size;
-    }
-    
-    public long getNumEntries() {
-      return numEntries;
-    }
-    
-    public boolean isTimeSet() {
-      return time >= 0;
-    }
-    
-    public long getTime() {
-      return time;
-    }
-    
-    public byte[] encode() {
-      if (time >= 0)
-        return ("" + size + "," + numEntries + "," + time).getBytes();
-      return ("" + size + "," + numEntries).getBytes();
-    }
-    
-    @Override
-    public boolean equals(Object o) {
-      if (o instanceof DataFileValue) {
-        DataFileValue odfv = (DataFileValue) o;
-        
-        return size == odfv.size && numEntries == odfv.numEntries;
-      }
-      
-      return false;
-    }
-    
-    @Override
-    public int hashCode() {
-      return Long.valueOf(size + numEntries).hashCode();
-    }
-    
-    @Override
-    public String toString() {
-      return size + " " + numEntries;
-    }
-    
-    public void setTime(long time) {
-      if (time < 0)
-        throw new IllegalArgumentException();
-      this.time = time;
-    }
-  }
-  
-  public static Pair<SortedMap<KeyExtent,Text>,List<KeyExtent>> getMetadataLocationEntries(SortedMap<Key,Value> entries) {
-    Key key;
-    Value val;
-    Text location = null;
-    Value prevRow = null;
-    KeyExtent ke;
-    
-    SortedMap<KeyExtent,Text> results = new TreeMap<KeyExtent,Text>();
-    ArrayList<KeyExtent> locationless = new ArrayList<KeyExtent>();
-    
-    Text lastRowFromKey = new Text();
-    
-    // text obj below is meant to be reused in loop for efficiency
-    Text colf = new Text();
-    Text colq = new Text();
-    
-    for (Entry<Key,Value> entry : entries.entrySet()) {
-      key = entry.getKey();
-      val = entry.getValue();
-      
-      if (key.compareRow(lastRowFromKey) != 0) {
-        prevRow = null;
-        location = null;
-        key.getRow(lastRowFromKey);
-      }
-      
-      colf = key.getColumnFamily(colf);
-      colq = key.getColumnQualifier(colq);
-      
-      // interpret the row id as a key extent
-      if (colf.equals(CURRENT_LOCATION_COLUMN_FAMILY) || colf.equals(FUTURE_LOCATION_COLUMN_FAMILY)) {
-        if (location != null) {
-          throw new IllegalStateException("Tablet has multiple locations : " + lastRowFromKey);
-        }
-        location = new Text(val.toString());
-      } else if (PREV_ROW_COLUMN.equals(colf, colq)) {
-        prevRow = new Value(val);
-      }
-      
-      if (prevRow != null) {
-        ke = new KeyExtent(key.getRow(), prevRow);
-        if (location != null)
-          results.put(ke, location);
-        else
-          locationless.add(ke);
-        
-        location = null;
-        prevRow = null;
-      }
-    }
-    
-    return new Pair<SortedMap<KeyExtent,Text>,List<KeyExtent>>(results, locationless);
-  }
-  
-  public static SortedMap<Text,SortedMap<ColumnFQ,Value>> getTabletEntries(SortedMap<Key,Value> tabletKeyValues, List<ColumnFQ> columns) {
-    TreeMap<Text,SortedMap<ColumnFQ,Value>> tabletEntries = new TreeMap<Text,SortedMap<ColumnFQ,Value>>();
-    
-    HashSet<ColumnFQ> colSet = null;
-    if (columns != null) {
-      colSet = new HashSet<ColumnFQ>(columns);
-    }
-    
-    for (Entry<Key,Value> entry : tabletKeyValues.entrySet()) {
-      
-      if (columns != null && !colSet.contains(new ColumnFQ(entry.getKey()))) {
-        continue;
-      }
-      
-      Text row = entry.getKey().getRow();
-      
-      SortedMap<ColumnFQ,Value> colVals = tabletEntries.get(row);
-      if (colVals == null) {
-        colVals = new TreeMap<ColumnFQ,Value>();
-        tabletEntries.put(row, colVals);
-      }
-      
-      colVals.put(new ColumnFQ(entry.getKey()), entry.getValue());
-    }
-    
-    return tabletEntries;
-  }
-  
-  public static void getEntries(Instance instance, TCredentials credentials, String table, boolean isTid, Map<KeyExtent,String> locations,
-      SortedSet<KeyExtent> tablets) throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
-    String tableId = isTid ? table : Tables.getNameToIdMap(instance).get(table);
-    
-    String systemTableToRead = tableId.equals(ID) ? RootTable.NAME : NAME;
-    Scanner scanner = instance.getConnector(credentials.getPrincipal(), CredentialHelper.extractToken(credentials)).createScanner(systemTableToRead,
-        Authorizations.EMPTY);
-    
-    PREV_ROW_COLUMN.fetch(scanner);
-    scanner.fetchColumnFamily(CURRENT_LOCATION_COLUMN_FAMILY);
-    
-    // position at first entry in metadata table for given table
-    KeyExtent ke = new KeyExtent(new Text(tableId), new Text(), null);
-    Key startKey = new Key(ke.getMetadataEntry());
-    ke = new KeyExtent(new Text(tableId), null, null);
-    Key endKey = new Key(ke.getMetadataEntry()).followingKey(PartialKey.ROW);
-    scanner.setRange(new Range(startKey, endKey));
-    
-    Text colf = new Text();
-    Text colq = new Text();
-    
-    KeyExtent currentKeyExtent = null;
-    String location = null;
-    Text row = null;
-    // acquire this tables METADATA table entries
-    boolean haveExtent = false;
-    boolean haveLocation = false;
-    for (Entry<Key,Value> entry : scanner) {
-      if (row != null) {
-        if (!row.equals(entry.getKey().getRow())) {
-          currentKeyExtent = null;
-          haveExtent = false;
-          haveLocation = false;
-          row = entry.getKey().getRow();
-        }
-      } else
-        row = entry.getKey().getRow();
-      
-      colf = entry.getKey().getColumnFamily(colf);
-      colq = entry.getKey().getColumnQualifier(colq);
-      
-      // stop scanning metadata table when another table is reached
-      if (!(new KeyExtent(entry.getKey().getRow(), (Text) null)).getTableId().toString().equals(tableId))
-        break;
-      
-      if (PREV_ROW_COLUMN.equals(colf, colq)) {
-        currentKeyExtent = new KeyExtent(entry.getKey().getRow(), entry.getValue());
-        tablets.add(currentKeyExtent);
-        haveExtent = true;
-      } else if (colf.equals(CURRENT_LOCATION_COLUMN_FAMILY)) {
-        location = entry.getValue().toString();
-        haveLocation = true;
-      }
-      
-      if (haveExtent && haveLocation) {
-        locations.put(currentKeyExtent, location);
-        haveExtent = false;
-        haveLocation = false;
-        currentKeyExtent = null;
-      }
-    }
-    
-    validateEntries(tableId, tablets);
-  }
-  
-  public static void validateEntries(String tableId, SortedSet<KeyExtent> tablets) throws AccumuloException {
-    // sanity check of metadata table entries
-    // make sure tablets has no holes, and that it starts and ends w/ null
-    if (tablets.size() == 0)
-      throw new AccumuloException("No entries found in metadata table for table " + tableId);
-    
-    if (tablets.first().getPrevEndRow() != null)
-      throw new AccumuloException("Problem with metadata table, first entry for table " + tableId + "- " + tablets.first() + " - has non null prev end row");
-    
-    if (tablets.last().getEndRow() != null)
-      throw new AccumuloException("Problem with metadata table, last entry for table " + tableId + "- " + tablets.first() + " - has non null end row");
-    
-    Iterator<KeyExtent> tabIter = tablets.iterator();
-    Text lastEndRow = tabIter.next().getEndRow();
-    while (tabIter.hasNext()) {
-      KeyExtent tabke = tabIter.next();
-      
-      if (tabke.getPrevEndRow() == null)
-        throw new AccumuloException("Problem with metadata table, it has null prev end row in middle of table " + tabke);
-      
-      if (!tabke.getPrevEndRow().equals(lastEndRow))
-        throw new AccumuloException("Problem with metadata table, it has a hole " + tabke.getPrevEndRow() + " != " + lastEndRow);
-      
-      lastEndRow = tabke.getEndRow();
-    }
-    
-    // end METADATA table sanity check
-  }
-  
-  public static boolean isContiguousRange(KeyExtent ke, SortedSet<KeyExtent> children) {
-    if (children.size() == 0)
-      return false;
-    
-    if (children.size() == 1)
-      return children.first().equals(ke);
-    
-    Text per = children.first().getPrevEndRow();
-    Text er = children.last().getEndRow();
-    
-    boolean perEqual = (per == ke.getPrevEndRow() || per != null && ke.getPrevEndRow() != null && ke.getPrevEndRow().compareTo(per) == 0);
-    
-    boolean erEqual = (er == ke.getEndRow() || er != null && ke.getEndRow() != null && ke.getEndRow().compareTo(er) == 0);
-    
-    if (!perEqual || !erEqual)
-      return false;
-    
-    Iterator<KeyExtent> iter = children.iterator();
-    
-    Text lastEndRow = iter.next().getEndRow();
-    
-    while (iter.hasNext()) {
-      KeyExtent cke = iter.next();
-      
-      per = cke.getPrevEndRow();
-      
-      // something in the middle should not be null
-      
-      if (per == null || lastEndRow == null)
-        return false;
-      
-      if (per.compareTo(lastEndRow) != 0)
-        return false;
-      
-      lastEndRow = cke.getEndRow();
-    }
-    
-    return true;
-  }
-  
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/core/src/main/java/org/apache/accumulo/core/util/RootTable.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/util/RootTable.java b/core/src/main/java/org/apache/accumulo/core/util/RootTable.java
deleted file mode 100644
index 1209110..0000000
--- a/core/src/main/java/org/apache/accumulo/core/util/RootTable.java
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.core.util;
-
-import org.apache.accumulo.core.data.KeyExtent;
-import org.apache.accumulo.core.data.Range;
-import org.apache.hadoop.io.Text;
-
-/**
- * 
- */
-public class RootTable {
-  
-  public static final String ID = "!!R";
-  public static final String NAME = "!!ROOT";
-  
-  public static final String ROOT_TABLET_LOCATION = "/root_tablet";
-  
-  public static final String ZROOT_TABLET = ROOT_TABLET_LOCATION;
-  public static final String ZROOT_TABLET_LOCATION = ZROOT_TABLET + "/location";
-  public static final String ZROOT_TABLET_FUTURE_LOCATION = ZROOT_TABLET + "/future_location";
-  public static final String ZROOT_TABLET_LAST_LOCATION = ZROOT_TABLET + "/lastlocation";
-  public static final String ZROOT_TABLET_WALOGS = ZROOT_TABLET + "/walogs";
-  
-  public static final KeyExtent EXTENT = new KeyExtent(new Text(ID), null, null);
-  public static final Range METADATA_TABLETS_RANGE = new Range(null, false, MetadataTable.RESERVED_RANGE_START_KEY, false);
-  
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/core/src/main/java/org/apache/accumulo/core/util/shell/commands/FlushCommand.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/util/shell/commands/FlushCommand.java b/core/src/main/java/org/apache/accumulo/core/util/shell/commands/FlushCommand.java
index 17e220b..6bfdce2 100644
--- a/core/src/main/java/org/apache/accumulo/core/util/shell/commands/FlushCommand.java
+++ b/core/src/main/java/org/apache/accumulo/core/util/shell/commands/FlushCommand.java
@@ -19,7 +19,7 @@ package org.apache.accumulo.core.util.shell.commands;
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.util.MetadataTable;
+import org.apache.accumulo.core.metadata.MetadataTable;
 import org.apache.accumulo.core.util.shell.Shell;
 import org.apache.commons.cli.CommandLine;
 import org.apache.commons.cli.Option;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/core/src/main/java/org/apache/accumulo/core/util/shell/commands/GetSplitsCommand.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/util/shell/commands/GetSplitsCommand.java b/core/src/main/java/org/apache/accumulo/core/util/shell/commands/GetSplitsCommand.java
index 469d91b..4093fa4 100644
--- a/core/src/main/java/org/apache/accumulo/core/util/shell/commands/GetSplitsCommand.java
+++ b/core/src/main/java/org/apache/accumulo/core/util/shell/commands/GetSplitsCommand.java
@@ -30,9 +30,10 @@ import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.KeyExtent;
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.RootTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
 import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.util.MetadataTable;
-import org.apache.accumulo.core.util.RootTable;
 import org.apache.accumulo.core.util.TextUtil;
 import org.apache.accumulo.core.util.format.BinaryFormatter;
 import org.apache.accumulo.core.util.shell.Shell;
@@ -72,14 +73,14 @@ public class GetSplitsCommand extends Command {
       } else {
         String systemTableToCheck = MetadataTable.NAME.equals(tableName) ? RootTable.NAME : MetadataTable.NAME;
         final Scanner scanner = shellState.getConnector().createScanner(systemTableToCheck, Authorizations.EMPTY);
-        MetadataTable.PREV_ROW_COLUMN.fetch(scanner);
+        TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.fetch(scanner);
         final Text start = new Text(shellState.getConnector().tableOperations().tableIdMap().get(tableName));
         final Text end = new Text(start);
         end.append(new byte[] {'<'}, 0, 1);
         scanner.setRange(new Range(start, end));
         for (Iterator<Entry<Key,Value>> iterator = scanner.iterator(); iterator.hasNext();) {
           final Entry<Key,Value> next = iterator.next();
-          if (MetadataTable.PREV_ROW_COLUMN.hasColumns(next.getKey())) {
+          if (TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.hasColumns(next.getKey())) {
             KeyExtent extent = new KeyExtent(next.getKey().getRow(), next.getValue());
             final String pr = encode(encode, extent.getPrevEndRow());
             final String er = encode(encode, extent.getEndRow());

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/core/src/main/java/org/apache/accumulo/core/util/shell/commands/OfflineCommand.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/util/shell/commands/OfflineCommand.java b/core/src/main/java/org/apache/accumulo/core/util/shell/commands/OfflineCommand.java
index 0365a39..70de3d4 100644
--- a/core/src/main/java/org/apache/accumulo/core/util/shell/commands/OfflineCommand.java
+++ b/core/src/main/java/org/apache/accumulo/core/util/shell/commands/OfflineCommand.java
@@ -19,7 +19,7 @@ package org.apache.accumulo.core.util.shell.commands;
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.util.MetadataTable;
+import org.apache.accumulo.core.metadata.MetadataTable;
 import org.apache.accumulo.core.util.shell.Shell;
 
 public class OfflineCommand extends TableOperation {

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/core/src/main/java/org/apache/accumulo/core/util/shell/commands/OnlineCommand.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/util/shell/commands/OnlineCommand.java b/core/src/main/java/org/apache/accumulo/core/util/shell/commands/OnlineCommand.java
index c6a2eff..5ffbe3a 100644
--- a/core/src/main/java/org/apache/accumulo/core/util/shell/commands/OnlineCommand.java
+++ b/core/src/main/java/org/apache/accumulo/core/util/shell/commands/OnlineCommand.java
@@ -19,7 +19,7 @@ package org.apache.accumulo.core.util.shell.commands;
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.util.MetadataTable;
+import org.apache.accumulo.core.metadata.MetadataTable;
 import org.apache.accumulo.core.util.shell.Shell;
 
 public class OnlineCommand extends TableOperation {

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/core/src/test/java/org/apache/accumulo/core/client/impl/TabletLocatorImplTest.java
----------------------------------------------------------------------
diff --git a/core/src/test/java/org/apache/accumulo/core/client/impl/TabletLocatorImplTest.java b/core/src/test/java/org/apache/accumulo/core/client/impl/TabletLocatorImplTest.java
index 7ffec0f..f160cb3 100644
--- a/core/src/test/java/org/apache/accumulo/core/client/impl/TabletLocatorImplTest.java
+++ b/core/src/test/java/org/apache/accumulo/core/client/impl/TabletLocatorImplTest.java
@@ -47,10 +47,12 @@ import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.PartialKey;
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.metadata.MetadataLocationObtainer;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.RootTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
 import org.apache.accumulo.core.security.thrift.TCredentials;
-import org.apache.accumulo.core.util.MetadataTable;
 import org.apache.accumulo.core.util.Pair;
-import org.apache.accumulo.core.util.RootTable;
 import org.apache.hadoop.io.Text;
 
 public class TabletLocatorImplTest extends TestCase {
@@ -454,7 +456,7 @@ public class TabletLocatorImplTest extends TestCase {
     public Connector getConnector(org.apache.accumulo.core.security.thrift.AuthInfo auth) throws AccumuloException, AccumuloSecurityException {
       return getConnector(auth.user, auth.getPassword());
     }
-
+    
     @Override
     public Connector getConnector(String principal, AuthenticationToken token) throws AccumuloException, AccumuloSecurityException {
       throw new UnsupportedOperationException();
@@ -474,7 +476,8 @@ public class TabletLocatorImplTest extends TestCase {
     }
     
     @Override
-    public TabletLocations lookupTablet(TabletLocation src, Text row, Text stopRow, TabletLocator parent, TCredentials credentials) throws AccumuloSecurityException {
+    public TabletLocations lookupTablet(TabletLocation src, Text row, Text stopRow, TabletLocator parent, TCredentials credentials)
+        throws AccumuloSecurityException {
       
       // System.out.println("lookupTablet("+src+","+row+","+stopRow+","+ parent+")");
       // System.out.println(tservers);
@@ -503,7 +506,7 @@ public class TabletLocatorImplTest extends TestCase {
       
       SortedMap<Key,Value> results = tabletData.tailMap(startKey).headMap(stopKey);
       
-      Pair<SortedMap<KeyExtent,Text>,List<KeyExtent>> metadata = MetadataTable.getMetadataLocationEntries(results);
+      Pair<SortedMap<KeyExtent,Text>,List<KeyExtent>> metadata = MetadataLocationObtainer.getMetadataLocationEntries(results);
       
       for (Entry<KeyExtent,Text> entry : metadata.getFirst().entrySet()) {
         list.add(new TabletLocation(entry.getKey(), entry.getValue().toString()));
@@ -559,7 +562,7 @@ public class TabletLocatorImplTest extends TestCase {
       if (failures.size() > 0)
         parent.invalidateCache(failures);
       
-      SortedMap<KeyExtent,Text> metadata = MetadataTable.getMetadataLocationEntries(results).getFirst();
+      SortedMap<KeyExtent,Text> metadata = MetadataLocationObtainer.getMetadataLocationEntries(results).getFirst();
       
       for (Entry<KeyExtent,Text> entry : metadata.entrySet()) {
         list.add(new TabletLocation(entry.getKey(), entry.getValue().toString()));
@@ -606,18 +609,19 @@ public class TabletLocatorImplTest extends TestCase {
     if (location != null) {
       if (instance == null)
         instance = "";
-      Key lk = new Key(mr, MetadataTable.CURRENT_LOCATION_COLUMN_FAMILY, new Text(instance));
+      Key lk = new Key(mr, TabletsSection.CurrentLocationColumnFamily.NAME, new Text(instance));
       tabletData.put(lk, new Value(location.getBytes()));
     }
     
-    Key pk = new Key(mr, MetadataTable.PREV_ROW_COLUMN.getColumnFamily(), MetadataTable.PREV_ROW_COLUMN.getColumnQualifier());
+    Key pk = new Key(mr, TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.getColumnFamily(),
+        TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.getColumnQualifier());
     tabletData.put(pk, per);
   }
   
   static void setLocation(TServers tservers, String server, KeyExtent tablet, KeyExtent ke, String location) {
     setLocation(tservers, server, tablet, ke, location, "");
   }
-
+  
   static void deleteServer(TServers tservers, String server) {
     tservers.tservers.remove(server);
     
@@ -1274,7 +1278,6 @@ public class TabletLocatorImplTest extends TestCase {
     } catch (Exception e) {
       
     }
-
-
+    
   }
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/core/src/test/java/org/apache/accumulo/core/metadata/MetadataServicerTest.java
----------------------------------------------------------------------
diff --git a/core/src/test/java/org/apache/accumulo/core/metadata/MetadataServicerTest.java b/core/src/test/java/org/apache/accumulo/core/metadata/MetadataServicerTest.java
new file mode 100644
index 0000000..63fe434
--- /dev/null
+++ b/core/src/test/java/org/apache/accumulo/core/metadata/MetadataServicerTest.java
@@ -0,0 +1,86 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.core.metadata;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotEquals;
+import static org.junit.Assert.assertTrue;
+
+import org.apache.accumulo.core.client.AccumuloException;
+import org.apache.accumulo.core.client.AccumuloSecurityException;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.TableExistsException;
+import org.apache.accumulo.core.client.TableNotFoundException;
+import org.apache.accumulo.core.client.mock.MockInstance;
+import org.apache.accumulo.core.client.security.tokens.PasswordToken;
+import org.apache.accumulo.core.security.CredentialHelper;
+import org.apache.accumulo.core.security.thrift.TCredentials;
+import org.junit.Test;
+
+public class MetadataServicerTest {
+  
+  @Test
+  public void checkSystemTableIdentifiers() {
+    assertNotEquals(RootTable.ID, MetadataTable.ID);
+    assertNotEquals(RootTable.NAME, MetadataTable.NAME);
+  }
+  
+  @Test
+  public void testGetCorrectServicer() throws AccumuloException, AccumuloSecurityException, TableExistsException, TableNotFoundException {
+    String userTableName = "A";
+    MockInstance instance = new MockInstance("metadataTest");
+    Connector connector = instance.getConnector("root", new PasswordToken(""));
+    connector.tableOperations().create(userTableName);
+    String userTableId = connector.tableOperations().tableIdMap().get(userTableName);
+    TCredentials credentials = CredentialHelper.createSquelchError("root", new PasswordToken(""), instance.getInstanceID());
+    
+    MetadataServicer ms = MetadataServicer.forTableId(instance, credentials, RootTable.ID);
+    assertTrue(ms instanceof ServicerForRootTable);
+    assertFalse(ms instanceof TableMetadataServicer);
+    assertEquals(RootTable.ID, ms.getServicedTableId());
+    
+    ms = MetadataServicer.forTableId(instance, credentials, MetadataTable.ID);
+    assertTrue(ms instanceof ServicerForMetadataTable);
+    assertTrue(ms instanceof TableMetadataServicer);
+    assertEquals(RootTable.NAME, ((TableMetadataServicer) ms).getServicingTableName());
+    assertEquals(MetadataTable.ID, ms.getServicedTableId());
+    
+    ms = MetadataServicer.forTableId(instance, credentials, userTableId);
+    assertTrue(ms instanceof ServicerForUserTables);
+    assertTrue(ms instanceof TableMetadataServicer);
+    assertEquals(MetadataTable.NAME, ((TableMetadataServicer) ms).getServicingTableName());
+    assertEquals(userTableId, ms.getServicedTableId());
+    
+    ms = MetadataServicer.forTableName(instance, credentials, RootTable.NAME);
+    assertTrue(ms instanceof ServicerForRootTable);
+    assertFalse(ms instanceof TableMetadataServicer);
+    assertEquals(RootTable.ID, ms.getServicedTableId());
+    
+    ms = MetadataServicer.forTableName(instance, credentials, MetadataTable.NAME);
+    assertTrue(ms instanceof ServicerForMetadataTable);
+    assertTrue(ms instanceof TableMetadataServicer);
+    assertEquals(RootTable.NAME, ((TableMetadataServicer) ms).getServicingTableName());
+    assertEquals(MetadataTable.ID, ms.getServicedTableId());
+    
+    ms = MetadataServicer.forTableName(instance, credentials, userTableName);
+    assertTrue(ms instanceof ServicerForUserTables);
+    assertTrue(ms instanceof TableMetadataServicer);
+    assertEquals(MetadataTable.NAME, ((TableMetadataServicer) ms).getServicingTableName());
+    assertEquals(userTableId, ms.getServicedTableId());
+  }
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/core/src/test/java/org/apache/accumulo/core/util/MetadataTableTest.java
----------------------------------------------------------------------
diff --git a/core/src/test/java/org/apache/accumulo/core/util/MetadataTableTest.java b/core/src/test/java/org/apache/accumulo/core/util/MetadataTableTest.java
deleted file mode 100644
index 7b942bf..0000000
--- a/core/src/test/java/org/apache/accumulo/core/util/MetadataTableTest.java
+++ /dev/null
@@ -1,109 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.core.util;
-
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotEquals;
-import static org.junit.Assert.assertTrue;
-
-import java.util.SortedSet;
-import java.util.TreeSet;
-
-import org.apache.accumulo.core.data.KeyExtent;
-import org.apache.hadoop.io.Text;
-import org.junit.Test;
-
-public class MetadataTableTest {
-  
-  @Test
-  public void checkSystemTableIdentifiers() {
-    assertNotEquals(RootTable.ID, MetadataTable.ID);
-    assertNotEquals(RootTable.NAME, MetadataTable.NAME);
-  }
-  
-  private KeyExtent createKeyExtent(String tname, String er, String per) {
-    return new KeyExtent(new Text(tname), er == null ? null : new Text(er), per == null ? null : new Text(per));
-  }
-  
-  private SortedSet<KeyExtent> createKeyExtents(String data[][]) {
-    
-    TreeSet<KeyExtent> extents = new TreeSet<KeyExtent>();
-    for (String[] exdata : data) {
-      extents.add(createKeyExtent(exdata[0], exdata[1], exdata[2]));
-    }
-    
-    return extents;
-  }
-  
-  private void runTest(String beginRange, String endRange) {
-    KeyExtent ke = createKeyExtent("foo", endRange, beginRange);
-    
-    SortedSet<KeyExtent> children = createKeyExtents(new String[][] {new String[] {"foo", endRange, beginRange}});
-    
-    assertTrue(MetadataTable.isContiguousRange(ke, children));
-    
-    children = createKeyExtents(new String[][] {new String[] {"foo", "r1", beginRange}, new String[] {"foo", endRange, "r1"}});
-    
-    assertTrue(MetadataTable.isContiguousRange(ke, children));
-    
-    children = createKeyExtents(new String[][] {new String[] {"foo", "r1", beginRange}, new String[] {"foo", endRange, "r2"}});
-    
-    assertFalse(MetadataTable.isContiguousRange(ke, children));
-    
-    children = createKeyExtents(new String[][] {new String[] {"foo", "r1", beginRange}, new String[] {"foo", (endRange == null ? "r2" : endRange + "Z"), "r1"}});
-    
-    assertFalse(MetadataTable.isContiguousRange(ke, children));
-    
-    children = createKeyExtents(new String[][] {new String[] {"foo", "r1", (beginRange == null ? "r0" : "a" + beginRange)},
-        new String[] {"foo", endRange, "r1"}});
-    
-    assertFalse(MetadataTable.isContiguousRange(ke, children));
-    
-    children = createKeyExtents(new String[][] {new String[] {"foo", "r1", beginRange}, new String[] {"foo", "r2", "r1"}, new String[] {"foo", endRange, "r2"}});
-    
-    assertTrue(MetadataTable.isContiguousRange(ke, children));
-    
-    children = createKeyExtents(new String[][] {new String[] {"foo", "r1", beginRange}, new String[] {"foo", "r2", "r1"}, new String[] {"foo", endRange, "r1"}});
-    
-    assertFalse(MetadataTable.isContiguousRange(ke, children));
-    
-    children = createKeyExtents(new String[][] {new String[] {"foo", "r1", beginRange}, new String[] {"foo", "r2", null}, new String[] {"foo", endRange, "r2"}});
-    
-    assertFalse(MetadataTable.isContiguousRange(ke, children));
-    
-    if (endRange == null) {
-      children = createKeyExtents(new String[][] {new String[] {"foo", "r1", beginRange}, new String[] {"foo", null, "r1"},
-          new String[] {"foo", endRange, "r2"}});
-      
-      assertFalse(MetadataTable.isContiguousRange(ke, children));
-    }
-    
-    children = createKeyExtents(new String[][] {new String[] {"foo", "r1", beginRange}, new String[] {"foo", "r2", "r1"}, new String[] {"foo", "r3", "r2"},
-        new String[] {"foo", endRange, "r3"}});
-    
-    assertTrue(MetadataTable.isContiguousRange(ke, children));
-    
-  }
-  
-  @Test
-  public void testICR1() {
-    runTest(null, null);
-    runTest(null, "r4");
-    runTest("r0", null);
-    runTest("r0", "r4");
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/proxy/src/test/java/org/apache/accumulo/proxy/SimpleTest.java
----------------------------------------------------------------------
diff --git a/proxy/src/test/java/org/apache/accumulo/proxy/SimpleTest.java b/proxy/src/test/java/org/apache/accumulo/proxy/SimpleTest.java
index c70e110..b98cf31 100644
--- a/proxy/src/test/java/org/apache/accumulo/proxy/SimpleTest.java
+++ b/proxy/src/test/java/org/apache/accumulo/proxy/SimpleTest.java
@@ -50,7 +50,7 @@ import org.apache.accumulo.core.iterators.DevNull;
 import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
 import org.apache.accumulo.core.iterators.user.SummingCombiner;
 import org.apache.accumulo.core.iterators.user.VersioningIterator;
-import org.apache.accumulo.core.util.MetadataTable;
+import org.apache.accumulo.core.metadata.MetadataTable;
 import org.apache.accumulo.core.util.UtilWaitThread;
 import org.apache.accumulo.examples.simple.constraints.NumericValueConstraint;
 import org.apache.accumulo.minicluster.MiniAccumuloCluster;
@@ -147,7 +147,6 @@ public class SimpleTest {
     props.put("tokenClass", PasswordToken.class.getName());
     
     protocolClass = getRandomProtocol();
-    System.out.println(protocolClass.getName());
     
     proxyPort = PortUtils.getRandomFreePort();
     proxyServer = Proxy.createProxyServer(org.apache.accumulo.proxy.thrift.AccumuloProxy.class, org.apache.accumulo.proxy.ProxyServer.class, proxyPort,
@@ -1001,14 +1000,14 @@ public class SimpleTest {
     client.closeScanner(scanner);
     assertEquals(10, more.getResults().size());
     client.deleteTable(creds, "test2");
-
+    
     // don't know how to test this, call it just for fun
     client.clearLocatorCache(creds, TABLE_TEST);
-
+    
     // compact
     client.compactTable(creds, TABLE_TEST, null, null, null, true, true);
     assertEquals(1, countFiles(TABLE_TEST));
-
+    
     // get disk usage
     client.cloneTable(creds, TABLE_TEST, "test2", true, null, null);
     Set<String> tablesToScan = new HashSet<String>();
@@ -1028,7 +1027,7 @@ public class SimpleTest {
     assertEquals(1, diskUsage.get(2).getTables().size());
     client.deleteTable(creds, "foo");
     client.deleteTable(creds, "test2");
-
+    
     // export/import
     String dir = folder.getRoot() + "/test";
     String destDir = folder.getRoot() + "/test_dest";

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/server/src/main/java/org/apache/accumulo/server/ServerConstants.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/ServerConstants.java b/server/src/main/java/org/apache/accumulo/server/ServerConstants.java
index 699fdd8..3765cce 100644
--- a/server/src/main/java/org/apache/accumulo/server/ServerConstants.java
+++ b/server/src/main/java/org/apache/accumulo/server/ServerConstants.java
@@ -17,9 +17,9 @@
 package org.apache.accumulo.server;
 
 import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.RootTable;
 import org.apache.accumulo.core.util.CachedConfiguration;
-import org.apache.accumulo.core.util.MetadataTable;
-import org.apache.accumulo.core.util.RootTable;
 import org.apache.accumulo.server.conf.ServerConfiguration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
@@ -28,8 +28,12 @@ public class ServerConstants {
   
   // versions should never be negative
   public static final Integer WIRE_VERSION = 2;
-  public static final int DATA_VERSION = 5;
-  public static final int PREV_DATA_VERSION = 4;
+  
+  /**
+   * current version reflects the addition of a separate root table (ACCUMULO-1481)
+   */
+  public static final int DATA_VERSION = 6;
+  public static final int PREV_DATA_VERSION = 5;
   
   // these are functions to delay loading the Accumulo configuration unless we must
   public static String[] getBaseDirs() {
@@ -92,6 +96,6 @@ public class ServerConstants {
   }
   
   public static String getRootTabletDir() {
-    return prefix(getRootTableDirs(), RootTable.ZROOT_TABLET)[0];
+    return prefix(getRootTableDirs(), RootTable.ROOT_TABLET_LOCATION)[0];
   }
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/server/src/main/java/org/apache/accumulo/server/client/BulkImporter.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/client/BulkImporter.java b/server/src/main/java/org/apache/accumulo/server/client/BulkImporter.java
index 0c647ea..b1bb894 100644
--- a/server/src/main/java/org/apache/accumulo/server/client/BulkImporter.java
+++ b/server/src/main/java/org/apache/accumulo/server/client/BulkImporter.java
@@ -51,11 +51,11 @@ import org.apache.accumulo.core.data.thrift.TKeyExtent;
 import org.apache.accumulo.core.file.FileOperations;
 import org.apache.accumulo.core.file.FileSKVIterator;
 import org.apache.accumulo.core.file.FileUtil;
+import org.apache.accumulo.core.metadata.MetadataTable;
 import org.apache.accumulo.core.security.thrift.TCredentials;
 import org.apache.accumulo.core.tabletserver.thrift.TabletClientService;
 import org.apache.accumulo.core.util.CachedConfiguration;
 import org.apache.accumulo.core.util.LoggingRunnable;
-import org.apache.accumulo.core.util.MetadataTable;
 import org.apache.accumulo.core.util.NamingThreadFactory;
 import org.apache.accumulo.core.util.StopWatch;
 import org.apache.accumulo.core.util.ThriftUtil;
@@ -132,7 +132,7 @@ public class BulkImporter {
     }
     
     ClientService.Client client = null;
-    final TabletLocator locator = TabletLocator.getInstance(instance, new Text(tableId));
+    final TabletLocator locator = TabletLocator.getLocator(instance, new Text(tableId));
     
     try {
       final Map<Path,List<TabletLocation>> assignments = Collections.synchronizedSortedMap(new TreeMap<Path,List<TabletLocation>>());

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/server/src/main/java/org/apache/accumulo/server/client/HdfsZooInstance.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/client/HdfsZooInstance.java b/server/src/main/java/org/apache/accumulo/server/client/HdfsZooInstance.java
index 7a50dae..db5ece0 100644
--- a/server/src/main/java/org/apache/accumulo/server/client/HdfsZooInstance.java
+++ b/server/src/main/java/org/apache/accumulo/server/client/HdfsZooInstance.java
@@ -32,11 +32,11 @@ import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
 import org.apache.accumulo.core.client.security.tokens.PasswordToken;
 import org.apache.accumulo.core.conf.AccumuloConfiguration;
 import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.metadata.RootTable;
 import org.apache.accumulo.core.security.CredentialHelper;
 import org.apache.accumulo.core.security.thrift.TCredentials;
 import org.apache.accumulo.core.util.ByteBufferUtil;
 import org.apache.accumulo.core.util.OpTimer;
-import org.apache.accumulo.core.util.RootTable;
 import org.apache.accumulo.core.util.StringUtil;
 import org.apache.accumulo.core.util.TextUtil;
 import org.apache.accumulo.core.zookeeper.ZooUtil;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/server/src/main/java/org/apache/accumulo/server/constraints/MetadataConstraints.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/constraints/MetadataConstraints.java b/server/src/main/java/org/apache/accumulo/server/constraints/MetadataConstraints.java
index 03cbde3..ce5e5e4 100644
--- a/server/src/main/java/org/apache/accumulo/server/constraints/MetadataConstraints.java
+++ b/server/src/main/java/org/apache/accumulo/server/constraints/MetadataConstraints.java
@@ -28,9 +28,15 @@ import org.apache.accumulo.core.data.ColumnUpdate;
 import org.apache.accumulo.core.data.KeyExtent;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.schema.DataFileValue;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.ChoppedColumnFamily;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.ClonedColumnFamily;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.LogColumnFamily;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.ScanFileColumnFamily;
 import org.apache.accumulo.core.util.ColumnFQ;
-import org.apache.accumulo.core.util.MetadataTable;
-import org.apache.accumulo.core.util.MetadataTable.DataFileValue;
 import org.apache.accumulo.core.zookeeper.ZooUtil;
 import org.apache.accumulo.fate.zookeeper.TransactionWatcher.Arbitrator;
 import org.apache.accumulo.server.client.HdfsZooInstance;
@@ -55,14 +61,15 @@ public class MetadataConstraints implements Constraint {
     }
   }
   
-  private static final HashSet<ColumnFQ> validColumnQuals = new HashSet<ColumnFQ>(Arrays.asList(new ColumnFQ[] {MetadataTable.PREV_ROW_COLUMN,
-      MetadataTable.OLD_PREV_ROW_COLUMN, MetadataTable.DIRECTORY_COLUMN, MetadataTable.SPLIT_RATIO_COLUMN, MetadataTable.TIME_COLUMN,
-      MetadataTable.LOCK_COLUMN, MetadataTable.FLUSH_COLUMN, MetadataTable.COMPACT_COLUMN}));
+  private static final HashSet<ColumnFQ> validColumnQuals = new HashSet<ColumnFQ>(Arrays.asList(new ColumnFQ[] {
+      TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN, TabletsSection.TabletColumnFamily.OLD_PREV_ROW_COLUMN,
+      TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN, TabletsSection.TabletColumnFamily.SPLIT_RATIO_COLUMN, TabletsSection.ServerColumnFamily.TIME_COLUMN,
+      TabletsSection.ServerColumnFamily.LOCK_COLUMN, TabletsSection.ServerColumnFamily.FLUSH_COLUMN, TabletsSection.ServerColumnFamily.COMPACT_COLUMN}));
   
-  private static final HashSet<Text> validColumnFams = new HashSet<Text>(Arrays.asList(new Text[] {MetadataTable.BULKFILE_COLUMN_FAMILY,
-      MetadataTable.LOG_COLUMN_FAMILY, MetadataTable.SCANFILE_COLUMN_FAMILY, MetadataTable.DATAFILE_COLUMN_FAMILY,
-      MetadataTable.CURRENT_LOCATION_COLUMN_FAMILY, MetadataTable.LAST_LOCATION_COLUMN_FAMILY, MetadataTable.FUTURE_LOCATION_COLUMN_FAMILY,
-      MetadataTable.CHOPPED_COLUMN_FAMILY, MetadataTable.CLONED_COLUMN_FAMILY}));
+  private static final HashSet<Text> validColumnFams = new HashSet<Text>(Arrays.asList(new Text[] {TabletsSection.BulkFileColumnFamily.NAME,
+      LogColumnFamily.NAME, ScanFileColumnFamily.NAME, DataFileColumnFamily.NAME,
+      TabletsSection.CurrentLocationColumnFamily.NAME, TabletsSection.LastLocationColumnFamily.NAME, TabletsSection.FutureLocationColumnFamily.NAME,
+      ChoppedColumnFamily.NAME, ClonedColumnFamily.NAME}));
   
   private static boolean isValidColumn(ColumnUpdate cu) {
     
@@ -78,19 +85,20 @@ public class MetadataConstraints implements Constraint {
   static private ArrayList<Short> addViolation(ArrayList<Short> lst, int violation) {
     if (lst == null)
       lst = new ArrayList<Short>();
-    lst.add((short)violation);
+    lst.add((short) violation);
     return lst;
   }
   
   static private ArrayList<Short> addIfNotPresent(ArrayList<Short> lst, int intViolation) {
     if (lst == null)
       return addViolation(lst, intViolation);
-    short violation = (short)intViolation;
+    short violation = (short) intViolation;
     if (!lst.contains(violation))
       return addViolation(lst, intViolation);
     return lst;
   }
   
+  @Override
   public List<Short> check(Environment env, Mutation mutation) {
     
     ArrayList<Short> violations = null;
@@ -144,7 +152,7 @@ public class MetadataConstraints implements Constraint {
     }
     
     boolean checkedBulk = false;
-
+    
     for (ColumnUpdate columnUpdate : colUpdates) {
       Text columnFamily = new Text(columnUpdate.getColumnFamily());
       
@@ -155,11 +163,11 @@ public class MetadataConstraints implements Constraint {
         continue;
       }
       
-      if (columnUpdate.getValue().length == 0 && !columnFamily.equals(MetadataTable.SCANFILE_COLUMN_FAMILY)) {
+      if (columnUpdate.getValue().length == 0 && !columnFamily.equals(ScanFileColumnFamily.NAME)) {
         violations = addViolation(violations, 6);
       }
       
-      if (columnFamily.equals(MetadataTable.DATAFILE_COLUMN_FAMILY)) {
+      if (columnFamily.equals(DataFileColumnFamily.NAME)) {
         try {
           DataFileValue dfv = new DataFileValue(columnUpdate.getValue());
           
@@ -171,33 +179,33 @@ public class MetadataConstraints implements Constraint {
         } catch (ArrayIndexOutOfBoundsException aiooe) {
           violations = addViolation(violations, 1);
         }
-      } else if (columnFamily.equals(MetadataTable.SCANFILE_COLUMN_FAMILY)) {
+      } else if (columnFamily.equals(ScanFileColumnFamily.NAME)) {
         
-      } else if (columnFamily.equals(MetadataTable.BULKFILE_COLUMN_FAMILY)) {
+      } else if (columnFamily.equals(TabletsSection.BulkFileColumnFamily.NAME)) {
         if (!columnUpdate.isDeleted() && !checkedBulk) {
           // splits, which also write the time reference, are allowed to write this reference even when
           // the transaction is not running because the other half of the tablet is holding a reference
           // to the file.
           boolean isSplitMutation = false;
-          // When a tablet is assigned, it re-writes the metadata.  It should probably only update the location information, 
-          // but it writes everything.  We allow it to re-write the bulk information if it is setting the location. 
-          // See ACCUMULO-1230. 
+          // When a tablet is assigned, it re-writes the metadata. It should probably only update the location information,
+          // but it writes everything. We allow it to re-write the bulk information if it is setting the location.
+          // See ACCUMULO-1230.
           boolean isLocationMutation = false;
           
           HashSet<Text> dataFiles = new HashSet<Text>();
           HashSet<Text> loadedFiles = new HashSet<Text>();
-
+          
           String tidString = new String(columnUpdate.getValue());
           int otherTidCount = 0;
-
+          
           for (ColumnUpdate update : mutation.getUpdates()) {
-            if (new ColumnFQ(update).equals(MetadataTable.DIRECTORY_COLUMN)) {
+            if (new ColumnFQ(update).equals(TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN)) {
               isSplitMutation = true;
-            } else if (new Text(update.getColumnFamily()).equals(MetadataTable.CURRENT_LOCATION_COLUMN_FAMILY)) {
+            } else if (new Text(update.getColumnFamily()).equals(TabletsSection.CurrentLocationColumnFamily.NAME)) {
               isLocationMutation = true;
-            } else if (new Text(update.getColumnFamily()).equals(MetadataTable.DATAFILE_COLUMN_FAMILY)) {
+            } else if (new Text(update.getColumnFamily()).equals(DataFileColumnFamily.NAME)) {
               dataFiles.add(new Text(update.getColumnQualifier()));
-            } else if (new Text(update.getColumnFamily()).equals(MetadataTable.BULKFILE_COLUMN_FAMILY)) {
+            } else if (new Text(update.getColumnFamily()).equals(TabletsSection.BulkFileColumnFamily.NAME)) {
               loadedFiles.add(new Text(update.getColumnQualifier()));
               
               if (!new String(update.getValue()).equals(tidString)) {
@@ -223,7 +231,7 @@ public class MetadataConstraints implements Constraint {
       } else {
         if (!isValidColumn(columnUpdate)) {
           violations = addViolation(violations, 2);
-        } else if (new ColumnFQ(columnUpdate).equals(MetadataTable.PREV_ROW_COLUMN) && columnUpdate.getValue().length > 0
+        } else if (new ColumnFQ(columnUpdate).equals(TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN) && columnUpdate.getValue().length > 0
             && (violations == null || !violations.contains((short) 4))) {
           KeyExtent ke = new KeyExtent(new Text(mutation.getRow()), (Text) null);
           
@@ -234,7 +242,7 @@ public class MetadataConstraints implements Constraint {
           if (!prevEndRowLessThanEndRow) {
             violations = addViolation(violations, 3);
           }
-        } else if (new ColumnFQ(columnUpdate).equals(MetadataTable.LOCK_COLUMN)) {
+        } else if (new ColumnFQ(columnUpdate).equals(TabletsSection.ServerColumnFamily.LOCK_COLUMN)) {
           if (zooCache == null) {
             zooCache = new ZooCache();
           }
@@ -263,7 +271,8 @@ public class MetadataConstraints implements Constraint {
     if (violations != null) {
       log.debug("violating metadata mutation : " + new String(mutation.getRow()));
       for (ColumnUpdate update : mutation.getUpdates()) {
-        log.debug(" update: " + new String(update.getColumnFamily()) + ":" + new String(update.getColumnQualifier()) + " value " + (update.isDeleted() ? "[delete]" : new String(update.getValue())));
+        log.debug(" update: " + new String(update.getColumnFamily()) + ":" + new String(update.getColumnQualifier()) + " value "
+            + (update.isDeleted() ? "[delete]" : new String(update.getValue())));
       }
     }
     
@@ -273,7 +282,8 @@ public class MetadataConstraints implements Constraint {
   protected Arbitrator getArbitrator() {
     return new ZooArbitrator();
   }
-
+  
+  @Override
   public String getViolationDescription(short violationCode) {
     switch (violationCode) {
       case 1:
@@ -296,6 +306,7 @@ public class MetadataConstraints implements Constraint {
     return null;
   }
   
+  @Override
   protected void finalize() {
     if (zooCache != null)
       zooCache.clear();

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/server/src/main/java/org/apache/accumulo/server/gc/GarbageCollectWriteAheadLogs.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/gc/GarbageCollectWriteAheadLogs.java b/server/src/main/java/org/apache/accumulo/server/gc/GarbageCollectWriteAheadLogs.java
index d88a85c..d50cff2 100644
--- a/server/src/main/java/org/apache/accumulo/server/gc/GarbageCollectWriteAheadLogs.java
+++ b/server/src/main/java/org/apache/accumulo/server/gc/GarbageCollectWriteAheadLogs.java
@@ -42,8 +42,8 @@ import org.apache.accumulo.server.ServerConstants;
 import org.apache.accumulo.server.fs.VolumeManager;
 import org.apache.accumulo.server.security.SecurityConstants;
 import org.apache.accumulo.server.util.AddressUtil;
-import org.apache.accumulo.server.util.MetadataTable;
-import org.apache.accumulo.server.util.MetadataTable.LogEntry;
+import org.apache.accumulo.server.util.MetadataTableUtil;
+import org.apache.accumulo.server.util.MetadataTableUtil.LogEntry;
 import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
 import org.apache.accumulo.trace.instrument.Span;
 import org.apache.accumulo.trace.instrument.Trace;
@@ -223,7 +223,7 @@ public class GarbageCollectWriteAheadLogs {
   private static int removeMetadataEntries(Map<Path,String> fileToServerMap, Set<Path> sortedWALogs, GCStatus status) throws IOException, KeeperException,
       InterruptedException {
     int count = 0;
-    Iterator<LogEntry> iterator = MetadataTable.getLogEntries(SecurityConstants.getSystemCredentials());
+    Iterator<LogEntry> iterator = MetadataTableUtil.getLogEntries(SecurityConstants.getSystemCredentials());
     while (iterator.hasNext()) {
       for (String filename : iterator.next().logSet) {
         Path path;