You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@accumulo.apache.org by ct...@apache.org on 2013/07/03 20:32:53 UTC

svn commit: r1499510 [2/7] - in /accumulo/trunk: core/src/main/java/org/apache/accumulo/core/client/ core/src/main/java/org/apache/accumulo/core/client/admin/ core/src/main/java/org/apache/accumulo/core/client/impl/ core/src/main/java/org/apache/accumu...

Added: accumulo/trunk/core/src/main/java/org/apache/accumulo/core/metadata/ServicerForUserTables.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/core/src/main/java/org/apache/accumulo/core/metadata/ServicerForUserTables.java?rev=1499510&view=auto
==============================================================================
--- accumulo/trunk/core/src/main/java/org/apache/accumulo/core/metadata/ServicerForUserTables.java (added)
+++ accumulo/trunk/core/src/main/java/org/apache/accumulo/core/metadata/ServicerForUserTables.java Wed Jul  3 18:32:51 2013
@@ -0,0 +1,32 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.core.metadata;
+
+import org.apache.accumulo.core.client.Instance;
+import org.apache.accumulo.core.security.thrift.TCredentials;
+
+/**
+ * A metadata servicer for user tables.<br />
+ * Metadata for user tables are serviced in the metadata table.
+ */
+class ServicerForUserTables extends TableMetadataServicer {
+  
+  public ServicerForUserTables(Instance instance, TCredentials credentials, String tableId) {
+    super(instance, credentials, MetadataTable.NAME, tableId);
+  }
+  
+}

Propchange: accumulo/trunk/core/src/main/java/org/apache/accumulo/core/metadata/ServicerForUserTables.java
------------------------------------------------------------------------------
    svn:mime-type = text/plain

Added: accumulo/trunk/core/src/main/java/org/apache/accumulo/core/metadata/TableMetadataServicer.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/core/src/main/java/org/apache/accumulo/core/metadata/TableMetadataServicer.java?rev=1499510&view=auto
==============================================================================
--- accumulo/trunk/core/src/main/java/org/apache/accumulo/core/metadata/TableMetadataServicer.java (added)
+++ accumulo/trunk/core/src/main/java/org/apache/accumulo/core/metadata/TableMetadataServicer.java Wed Jul  3 18:32:51 2013
@@ -0,0 +1,143 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.core.metadata;
+
+import java.util.Iterator;
+import java.util.Map.Entry;
+import java.util.SortedMap;
+import java.util.SortedSet;
+
+import org.apache.accumulo.core.client.AccumuloException;
+import org.apache.accumulo.core.client.AccumuloSecurityException;
+import org.apache.accumulo.core.client.Instance;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.client.TableNotFoundException;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.KeyExtent;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.security.CredentialHelper;
+import org.apache.accumulo.core.security.thrift.TCredentials;
+import org.apache.hadoop.io.Text;
+
+/**
+ * A {@link MetadataServicer} that is backed by a table
+ */
+abstract class TableMetadataServicer extends MetadataServicer {
+  
+  private Instance instance;
+  private TCredentials credentials;
+  private String tableIdBeingServiced;
+  private String serviceTableName;
+  
+  public TableMetadataServicer(Instance instance, TCredentials credentials, String serviceTableName, String tableIdBeingServiced) {
+    this.instance = instance;
+    this.credentials = credentials;
+    this.serviceTableName = serviceTableName;
+    this.tableIdBeingServiced = tableIdBeingServiced;
+  }
+  
+  @Override
+  public String getServicedTableId() {
+    return tableIdBeingServiced;
+  }
+  
+  public String getServicingTableName() {
+    return serviceTableName;
+  }
+  
+  @Override
+  public void getTabletLocations(SortedMap<KeyExtent,String> tablets) throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
+    
+    Scanner scanner = instance.getConnector(credentials.getPrincipal(), CredentialHelper.extractToken(credentials)).createScanner(getServicingTableName(),
+        Authorizations.EMPTY);
+    
+    TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.fetch(scanner);
+    scanner.fetchColumnFamily(TabletsSection.CurrentLocationColumnFamily.NAME);
+    
+    // position at first entry in metadata table for given table
+    scanner.setRange(TabletsSection.getRange(getServicedTableId()));
+    
+    Text colf = new Text();
+    Text colq = new Text();
+    
+    KeyExtent currentKeyExtent = null;
+    String location = null;
+    Text row = null;
+    // acquire this table's tablets from the metadata table which services it
+    for (Entry<Key,Value> entry : scanner) {
+      if (row != null) {
+        if (!row.equals(entry.getKey().getRow())) {
+          currentKeyExtent = null;
+          location = null;
+          row = entry.getKey().getRow();
+        }
+      } else {
+        row = entry.getKey().getRow();
+      }
+      
+      colf = entry.getKey().getColumnFamily(colf);
+      colq = entry.getKey().getColumnQualifier(colq);
+      
+      if (TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.equals(colf, colq)) {
+        currentKeyExtent = new KeyExtent(entry.getKey().getRow(), entry.getValue());
+        tablets.put(currentKeyExtent, location);
+        currentKeyExtent = null;
+        location = null;
+      } else if (colf.equals(TabletsSection.CurrentLocationColumnFamily.NAME)) {
+        location = entry.getValue().toString();
+      }
+      
+    }
+    
+    validateEntries(tablets);
+  }
+  
+  private void validateEntries(SortedMap<KeyExtent,String> tablets) throws AccumuloException {
+    SortedSet<KeyExtent> tabletsKeys = (SortedSet<KeyExtent>) tablets.keySet();
+    // sanity check of metadata table entries
+    // make sure tablets has no holes, and that it starts and ends w/ null
+    if (tabletsKeys.size() == 0)
+      throw new AccumuloException("No entries found in metadata table for table " + getServicedTableId());
+    
+    if (tabletsKeys.first().getPrevEndRow() != null)
+      throw new AccumuloException("Problem with metadata table, first entry for table " + getServicedTableId() + "- " + tabletsKeys.first()
+          + " - has non null prev end row");
+    
+    if (tabletsKeys.last().getEndRow() != null)
+      throw new AccumuloException("Problem with metadata table, last entry for table " + getServicedTableId() + "- " + tabletsKeys.first()
+          + " - has non null end row");
+    
+    Iterator<KeyExtent> tabIter = tabletsKeys.iterator();
+    Text lastEndRow = tabIter.next().getEndRow();
+    while (tabIter.hasNext()) {
+      KeyExtent tabke = tabIter.next();
+      
+      if (tabke.getPrevEndRow() == null)
+        throw new AccumuloException("Problem with metadata table, it has null prev end row in middle of table " + tabke);
+      
+      if (!tabke.getPrevEndRow().equals(lastEndRow))
+        throw new AccumuloException("Problem with metadata table, it has a hole " + tabke.getPrevEndRow() + " != " + lastEndRow);
+      
+      lastEndRow = tabke.getEndRow();
+    }
+    
+    // end METADATA table sanity check
+  }
+  
+}

Added: accumulo/trunk/core/src/main/java/org/apache/accumulo/core/metadata/schema/DataFileValue.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/core/src/main/java/org/apache/accumulo/core/metadata/schema/DataFileValue.java?rev=1499510&view=auto
==============================================================================
--- accumulo/trunk/core/src/main/java/org/apache/accumulo/core/metadata/schema/DataFileValue.java (added)
+++ accumulo/trunk/core/src/main/java/org/apache/accumulo/core/metadata/schema/DataFileValue.java Wed Jul  3 18:32:51 2013
@@ -0,0 +1,96 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.core.metadata.schema;
+
+public class DataFileValue {
+  private long size;
+  private long numEntries;
+  private long time = -1;
+  
+  public DataFileValue(long size, long numEntries, long time) {
+    this.size = size;
+    this.numEntries = numEntries;
+    this.time = time;
+  }
+  
+  public DataFileValue(long size, long numEntries) {
+    this.size = size;
+    this.numEntries = numEntries;
+    this.time = -1;
+  }
+  
+  public DataFileValue(byte[] encodedDFV) {
+    String[] ba = new String(encodedDFV).split(",");
+    
+    size = Long.parseLong(ba[0]);
+    numEntries = Long.parseLong(ba[1]);
+    
+    if (ba.length == 3)
+      time = Long.parseLong(ba[2]);
+    else
+      time = -1;
+  }
+  
+  public long getSize() {
+    return size;
+  }
+  
+  public long getNumEntries() {
+    return numEntries;
+  }
+  
+  public boolean isTimeSet() {
+    return time >= 0;
+  }
+  
+  public long getTime() {
+    return time;
+  }
+  
+  public byte[] encode() {
+    if (time >= 0)
+      return ("" + size + "," + numEntries + "," + time).getBytes();
+    return ("" + size + "," + numEntries).getBytes();
+  }
+  
+  @Override
+  public boolean equals(Object o) {
+    if (o instanceof DataFileValue) {
+      DataFileValue odfv = (DataFileValue) o;
+      
+      return size == odfv.size && numEntries == odfv.numEntries;
+    }
+    
+    return false;
+  }
+  
+  @Override
+  public int hashCode() {
+    return Long.valueOf(size + numEntries).hashCode();
+  }
+  
+  @Override
+  public String toString() {
+    return size + " " + numEntries;
+  }
+  
+  public void setTime(long time) {
+    if (time < 0)
+      throw new IllegalArgumentException();
+    this.time = time;
+  }
+}

Propchange: accumulo/trunk/core/src/main/java/org/apache/accumulo/core/metadata/schema/DataFileValue.java
------------------------------------------------------------------------------
    svn:mime-type = text/plain

Added: accumulo/trunk/core/src/main/java/org/apache/accumulo/core/metadata/schema/MetadataSchema.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/core/src/main/java/org/apache/accumulo/core/metadata/schema/MetadataSchema.java?rev=1499510&view=auto
==============================================================================
--- accumulo/trunk/core/src/main/java/org/apache/accumulo/core/metadata/schema/MetadataSchema.java (added)
+++ accumulo/trunk/core/src/main/java/org/apache/accumulo/core/metadata/schema/MetadataSchema.java Wed Jul  3 18:32:51 2013
@@ -0,0 +1,236 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.core.metadata.schema;
+
+import org.apache.accumulo.core.client.admin.TimeType;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.PartialKey;
+import org.apache.accumulo.core.data.Range;
+import org.apache.accumulo.core.util.ColumnFQ;
+import org.apache.hadoop.io.Text;
+
+/**
+ * Describes the table schema used for metadata tables
+ */
+public class MetadataSchema {
+  
+  private static final String RESERVED_PREFIX = "~";
+  
+  private static class Section {
+    private String rowPrefix;
+    private Range range;
+    
+    private Section(String startRow, boolean startInclusive, String endRow, boolean endInclusive) {
+      rowPrefix = startRow;
+      range = new Range(startRow, startInclusive, endRow, endInclusive);
+    }
+  }
+  
+  /**
+   * Used for storing information about tablets
+   */
+  public static class TabletsSection {
+    private static final Section section = new Section(null, false, RESERVED_PREFIX, false);
+    
+    public static Range getRange() {
+      return section.range;
+    }
+    
+    public static Range getRange(String tableId) {
+      return new Range(new Key(tableId + ';'), true, new Key(tableId + '<').followingKey(PartialKey.ROW), false);
+    }
+    
+    public static Text getRow(Text tableId, Text endRow) {
+      Text entry = new Text(tableId);
+      
+      if (endRow == null) {
+        // append delimiter for default tablet
+        entry.append(new byte[] {'<'}, 0, 1);
+      } else {
+        // append delimiter for regular tablets
+        entry.append(new byte[] {';'}, 0, 1);
+        entry.append(endRow.getBytes(), 0, endRow.getLength());
+      }
+      
+      return entry;
+    }
+    
+    /**
+     * Column family for storing the tablet information needed by clients
+     */
+    public static class TabletColumnFamily {
+      /**
+       * This needs to sort after all other column families for that tablet, because the {@link PREV_ROW_COLUMN} sits in this and that needs to sort last
+       * because the {@link SimpleGarbageCollector} relies on this.
+       */
+      public static final Text NAME = new Text("~tab");
+      /**
+       * README : very important that prevRow sort last to avoid race conditions between garbage collector and split this needs to sort after everything else
+       * for that tablet
+       */
+      public static final ColumnFQ PREV_ROW_COLUMN = new ColumnFQ(NAME, new Text("~pr"));
+      /**
+       * A temporary field in case a split fails and we need to roll back
+       */
+      public static final ColumnFQ OLD_PREV_ROW_COLUMN = new ColumnFQ(NAME, new Text("oldprevrow"));
+      /**
+       * A temporary field for splits to optimize certain operations
+       */
+      public static final ColumnFQ SPLIT_RATIO_COLUMN = new ColumnFQ(NAME, new Text("splitRatio"));
+    }
+    
+    /**
+     * Column family for recording information used by the TServer
+     */
+    public static class ServerColumnFamily {
+      public static final Text NAME = new Text("srv");
+      /**
+       * Holds the location of the tablet in the DFS file system
+       */
+      public static final ColumnFQ DIRECTORY_COLUMN = new ColumnFQ(NAME, new Text("dir"));
+      /**
+       * Holds the {@link TimeType}
+       */
+      public static final ColumnFQ TIME_COLUMN = new ColumnFQ(NAME, new Text("time"));
+      /**
+       * Holds flush IDs to enable waiting on a flush to complete
+       */
+      public static final ColumnFQ FLUSH_COLUMN = new ColumnFQ(NAME, new Text("flush"));
+      /**
+       * Holds compact IDs to enable waiting on a compaction to complete
+       */
+      public static final ColumnFQ COMPACT_COLUMN = new ColumnFQ(NAME, new Text("compact"));
+      /**
+       * Holds lock IDs to enable a sanity check to ensure that the TServer writing to the metadata tablet is not dead
+       */
+      public static final ColumnFQ LOCK_COLUMN = new ColumnFQ(NAME, new Text("lock"));
+    }
+    
+    /**
+     * Column family for storing entries created by the TServer to indicate it has loaded a tablet that it was assigned
+     */
+    public static class CurrentLocationColumnFamily {
+      public static final Text NAME = new Text("loc");
+    }
+    
+    /**
+     * Column family for storing the assigned location
+     */
+    public static class FutureLocationColumnFamily {
+      public static final Text NAME = new Text("future");
+    }
+    
+    /**
+     * Column family for storing last location, as a hint for assignment
+     */
+    public static class LastLocationColumnFamily {
+      public static final Text NAME = new Text("last");
+    }
+    
+    /**
+     * Temporary markers that indicate a tablet loaded a bulk file
+     */
+    public static class BulkFileColumnFamily {
+      public static final Text NAME = new Text("loaded");
+    }
+    
+    /**
+     * Temporary marker that indicates a tablet was successfully cloned
+     */
+    public static class ClonedColumnFamily {
+      public static final Text NAME = new Text("!cloned");
+    }
+    
+    /**
+     * Column family for storing files used by a tablet
+     */
+    public static class DataFileColumnFamily {
+      public static final Text NAME = new Text("file");
+    }
+    
+    /**
+     * Column family for storing the set of files scanned with an isolated scanner, to prevent them from being deleted
+     */
+    public static class ScanFileColumnFamily {
+      public static final Text NAME = new Text("scan");
+    }
+    
+    /**
+     * Column family for storing write-ahead log entries
+     */
+    public static class LogColumnFamily {
+      public static final Text NAME = new Text("log");
+    }
+    
+    /**
+     * Column family for indicating that the files in a tablet have been trimmed to only include data for the current tablet, so that they are safe to merge
+     */
+    public static class ChoppedColumnFamily {
+      public static final Text NAME = new Text("chopped");
+      public static final ColumnFQ CHOPPED_COLUMN = new ColumnFQ(NAME, new Text("chopped"));
+    }
+  }
+  
+  /**
+   * Contains additional metadata in a reserved area not for tablets
+   */
+  public static class ReservedSection {
+    private static final Section section = new Section(RESERVED_PREFIX, true, null, false);
+    
+    public static Range getRange() {
+      return section.range;
+    }
+    
+    public static String getRowPrefix() {
+      return section.rowPrefix;
+    }
+    
+  }
+  
+  /**
+   * Holds delete markers for potentially unused files/directories
+   */
+  public static class DeletesSection {
+    private static final Section section = new Section(RESERVED_PREFIX + "del", true, RESERVED_PREFIX + "dem", false);
+    
+    public static Range getRange() {
+      return section.range;
+    }
+    
+    public static String getRowPrefix() {
+      return section.rowPrefix;
+    }
+    
+  }
+  
+  /**
+   * Holds bulk-load-in-progress processing flags
+   */
+  public static class BlipSection {
+    private static final Section section = new Section(RESERVED_PREFIX + "blip", true, RESERVED_PREFIX + "bliq", false);
+    
+    public static Range getRange() {
+      return section.range;
+    }
+    
+    public static String getRowPrefix() {
+      return section.rowPrefix;
+    }
+    
+  }
+  
+}

Propchange: accumulo/trunk/core/src/main/java/org/apache/accumulo/core/metadata/schema/MetadataSchema.java
------------------------------------------------------------------------------
    svn:mime-type = text/plain

Modified: accumulo/trunk/core/src/main/java/org/apache/accumulo/core/util/ColumnFQ.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/core/src/main/java/org/apache/accumulo/core/util/ColumnFQ.java?rev=1499510&r1=1499509&r2=1499510&view=diff
==============================================================================
--- accumulo/trunk/core/src/main/java/org/apache/accumulo/core/util/ColumnFQ.java (original)
+++ accumulo/trunk/core/src/main/java/org/apache/accumulo/core/util/ColumnFQ.java Wed Jul  3 18:32:51 2013
@@ -69,30 +69,6 @@ public class ColumnFQ implements Compara
     m.putDelete(colf, colq);
   }
   
-  /**
-   * @deprecated since 1.5, use {@link #fetch(ScannerBase)} instead
-   */
-  @Deprecated
-  public static void fetch(ScannerBase sb, ColumnFQ cfq) {
-    sb.fetchColumn(cfq.colf, cfq.colq);
-  }
-  
-  /**
-   * @deprecated since 1.5, use {@link #put(Mutation, Value)} instead
-   */
-  @Deprecated
-  public static void put(Mutation m, ColumnFQ cfq, Value v) {
-    m.put(cfq.colf, cfq.colq, v);
-  }
-  
-  /**
-   * @deprecated since 1.5, use {@link #putDelete(Mutation)} instead
-   */
-  @Deprecated
-  public static void putDelete(Mutation m, ColumnFQ cfq) {
-    m.putDelete(cfq.colf, cfq.colq);
-  }
-
   @Override
   public boolean equals(Object o) {
     if (!(o instanceof ColumnFQ))

Modified: accumulo/trunk/core/src/main/java/org/apache/accumulo/core/util/Merge.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/core/src/main/java/org/apache/accumulo/core/util/Merge.java?rev=1499510&r1=1499509&r2=1499510&view=diff
==============================================================================
--- accumulo/trunk/core/src/main/java/org/apache/accumulo/core/util/Merge.java (original)
+++ accumulo/trunk/core/src/main/java/org/apache/accumulo/core/util/Merge.java Wed Jul  3 18:32:51 2013
@@ -31,6 +31,9 @@ import org.apache.accumulo.core.conf.Pro
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.KeyExtent;
 import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.commons.cli.ParseException;
 import org.apache.hadoop.io.Text;
@@ -216,8 +219,8 @@ public class Merge {
       throw new MergeException(e);
     }
     scanner.setRange(new KeyExtent(new Text(tableId), end, start).toMetadataRange());
-    scanner.fetchColumnFamily(MetadataTable.DATAFILE_COLUMN_FAMILY);
-    MetadataTable.PREV_ROW_COLUMN.fetch(scanner);
+    scanner.fetchColumnFamily(DataFileColumnFamily.NAME);
+    TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.fetch(scanner);
     final Iterator<Entry<Key,Value>> iterator = scanner.iterator();
     
     Iterator<Size> result = new Iterator<Size>() {
@@ -233,12 +236,12 @@ public class Merge {
         while (iterator.hasNext()) {
           Entry<Key,Value> entry = iterator.next();
           Key key = entry.getKey();
-          if (key.getColumnFamily().equals(MetadataTable.DATAFILE_COLUMN_FAMILY)) {
+          if (key.getColumnFamily().equals(DataFileColumnFamily.NAME)) {
             String[] sizeEntries = new String(entry.getValue().get()).split(",");
             if (sizeEntries.length == 2) {
               tabletSize += Long.parseLong(sizeEntries[0]);
             }
-          } else if (MetadataTable.PREV_ROW_COLUMN.hasColumns(key)) {
+          } else if (TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.hasColumns(key)) {
             KeyExtent extent = new KeyExtent(key.getRow(), entry.getValue());
             return new Size(extent, tabletSize);
           }

Modified: accumulo/trunk/core/src/main/java/org/apache/accumulo/core/util/shell/commands/FlushCommand.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/core/src/main/java/org/apache/accumulo/core/util/shell/commands/FlushCommand.java?rev=1499510&r1=1499509&r2=1499510&view=diff
==============================================================================
--- accumulo/trunk/core/src/main/java/org/apache/accumulo/core/util/shell/commands/FlushCommand.java (original)
+++ accumulo/trunk/core/src/main/java/org/apache/accumulo/core/util/shell/commands/FlushCommand.java Wed Jul  3 18:32:51 2013
@@ -19,7 +19,7 @@ package org.apache.accumulo.core.util.sh
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.util.MetadataTable;
+import org.apache.accumulo.core.metadata.MetadataTable;
 import org.apache.accumulo.core.util.shell.Shell;
 import org.apache.commons.cli.CommandLine;
 import org.apache.commons.cli.Option;

Modified: accumulo/trunk/core/src/main/java/org/apache/accumulo/core/util/shell/commands/GetSplitsCommand.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/core/src/main/java/org/apache/accumulo/core/util/shell/commands/GetSplitsCommand.java?rev=1499510&r1=1499509&r2=1499510&view=diff
==============================================================================
--- accumulo/trunk/core/src/main/java/org/apache/accumulo/core/util/shell/commands/GetSplitsCommand.java (original)
+++ accumulo/trunk/core/src/main/java/org/apache/accumulo/core/util/shell/commands/GetSplitsCommand.java Wed Jul  3 18:32:51 2013
@@ -30,9 +30,10 @@ import org.apache.accumulo.core.data.Key
 import org.apache.accumulo.core.data.KeyExtent;
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.RootTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
 import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.util.MetadataTable;
-import org.apache.accumulo.core.util.RootTable;
 import org.apache.accumulo.core.util.TextUtil;
 import org.apache.accumulo.core.util.format.BinaryFormatter;
 import org.apache.accumulo.core.util.shell.Shell;
@@ -72,14 +73,14 @@ public class GetSplitsCommand extends Co
       } else {
         String systemTableToCheck = MetadataTable.NAME.equals(tableName) ? RootTable.NAME : MetadataTable.NAME;
         final Scanner scanner = shellState.getConnector().createScanner(systemTableToCheck, Authorizations.EMPTY);
-        MetadataTable.PREV_ROW_COLUMN.fetch(scanner);
+        TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.fetch(scanner);
         final Text start = new Text(shellState.getConnector().tableOperations().tableIdMap().get(tableName));
         final Text end = new Text(start);
         end.append(new byte[] {'<'}, 0, 1);
         scanner.setRange(new Range(start, end));
         for (Iterator<Entry<Key,Value>> iterator = scanner.iterator(); iterator.hasNext();) {
           final Entry<Key,Value> next = iterator.next();
-          if (MetadataTable.PREV_ROW_COLUMN.hasColumns(next.getKey())) {
+          if (TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.hasColumns(next.getKey())) {
             KeyExtent extent = new KeyExtent(next.getKey().getRow(), next.getValue());
             final String pr = encode(encode, extent.getPrevEndRow());
             final String er = encode(encode, extent.getEndRow());

Modified: accumulo/trunk/core/src/main/java/org/apache/accumulo/core/util/shell/commands/OfflineCommand.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/core/src/main/java/org/apache/accumulo/core/util/shell/commands/OfflineCommand.java?rev=1499510&r1=1499509&r2=1499510&view=diff
==============================================================================
--- accumulo/trunk/core/src/main/java/org/apache/accumulo/core/util/shell/commands/OfflineCommand.java (original)
+++ accumulo/trunk/core/src/main/java/org/apache/accumulo/core/util/shell/commands/OfflineCommand.java Wed Jul  3 18:32:51 2013
@@ -19,7 +19,7 @@ package org.apache.accumulo.core.util.sh
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.util.MetadataTable;
+import org.apache.accumulo.core.metadata.MetadataTable;
 import org.apache.accumulo.core.util.shell.Shell;
 
 public class OfflineCommand extends TableOperation {

Modified: accumulo/trunk/core/src/main/java/org/apache/accumulo/core/util/shell/commands/OnlineCommand.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/core/src/main/java/org/apache/accumulo/core/util/shell/commands/OnlineCommand.java?rev=1499510&r1=1499509&r2=1499510&view=diff
==============================================================================
--- accumulo/trunk/core/src/main/java/org/apache/accumulo/core/util/shell/commands/OnlineCommand.java (original)
+++ accumulo/trunk/core/src/main/java/org/apache/accumulo/core/util/shell/commands/OnlineCommand.java Wed Jul  3 18:32:51 2013
@@ -19,7 +19,7 @@ package org.apache.accumulo.core.util.sh
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.util.MetadataTable;
+import org.apache.accumulo.core.metadata.MetadataTable;
 import org.apache.accumulo.core.util.shell.Shell;
 
 public class OnlineCommand extends TableOperation {

Modified: accumulo/trunk/core/src/test/java/org/apache/accumulo/core/client/impl/TabletLocatorImplTest.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/core/src/test/java/org/apache/accumulo/core/client/impl/TabletLocatorImplTest.java?rev=1499510&r1=1499509&r2=1499510&view=diff
==============================================================================
--- accumulo/trunk/core/src/test/java/org/apache/accumulo/core/client/impl/TabletLocatorImplTest.java (original)
+++ accumulo/trunk/core/src/test/java/org/apache/accumulo/core/client/impl/TabletLocatorImplTest.java Wed Jul  3 18:32:51 2013
@@ -47,10 +47,12 @@ import org.apache.accumulo.core.data.Mut
 import org.apache.accumulo.core.data.PartialKey;
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.metadata.MetadataLocationObtainer;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.RootTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
 import org.apache.accumulo.core.security.thrift.TCredentials;
-import org.apache.accumulo.core.util.MetadataTable;
 import org.apache.accumulo.core.util.Pair;
-import org.apache.accumulo.core.util.RootTable;
 import org.apache.hadoop.io.Text;
 
 public class TabletLocatorImplTest extends TestCase {
@@ -454,7 +456,7 @@ public class TabletLocatorImplTest exten
     public Connector getConnector(org.apache.accumulo.core.security.thrift.AuthInfo auth) throws AccumuloException, AccumuloSecurityException {
       return getConnector(auth.user, auth.getPassword());
     }
-
+    
     @Override
     public Connector getConnector(String principal, AuthenticationToken token) throws AccumuloException, AccumuloSecurityException {
       throw new UnsupportedOperationException();
@@ -474,7 +476,8 @@ public class TabletLocatorImplTest exten
     }
     
     @Override
-    public TabletLocations lookupTablet(TabletLocation src, Text row, Text stopRow, TabletLocator parent, TCredentials credentials) throws AccumuloSecurityException {
+    public TabletLocations lookupTablet(TabletLocation src, Text row, Text stopRow, TabletLocator parent, TCredentials credentials)
+        throws AccumuloSecurityException {
       
       // System.out.println("lookupTablet("+src+","+row+","+stopRow+","+ parent+")");
       // System.out.println(tservers);
@@ -503,7 +506,7 @@ public class TabletLocatorImplTest exten
       
       SortedMap<Key,Value> results = tabletData.tailMap(startKey).headMap(stopKey);
       
-      Pair<SortedMap<KeyExtent,Text>,List<KeyExtent>> metadata = MetadataTable.getMetadataLocationEntries(results);
+      Pair<SortedMap<KeyExtent,Text>,List<KeyExtent>> metadata = MetadataLocationObtainer.getMetadataLocationEntries(results);
       
       for (Entry<KeyExtent,Text> entry : metadata.getFirst().entrySet()) {
         list.add(new TabletLocation(entry.getKey(), entry.getValue().toString()));
@@ -559,7 +562,7 @@ public class TabletLocatorImplTest exten
       if (failures.size() > 0)
         parent.invalidateCache(failures);
       
-      SortedMap<KeyExtent,Text> metadata = MetadataTable.getMetadataLocationEntries(results).getFirst();
+      SortedMap<KeyExtent,Text> metadata = MetadataLocationObtainer.getMetadataLocationEntries(results).getFirst();
       
       for (Entry<KeyExtent,Text> entry : metadata.entrySet()) {
         list.add(new TabletLocation(entry.getKey(), entry.getValue().toString()));
@@ -606,18 +609,19 @@ public class TabletLocatorImplTest exten
     if (location != null) {
       if (instance == null)
         instance = "";
-      Key lk = new Key(mr, MetadataTable.CURRENT_LOCATION_COLUMN_FAMILY, new Text(instance));
+      Key lk = new Key(mr, TabletsSection.CurrentLocationColumnFamily.NAME, new Text(instance));
       tabletData.put(lk, new Value(location.getBytes()));
     }
     
-    Key pk = new Key(mr, MetadataTable.PREV_ROW_COLUMN.getColumnFamily(), MetadataTable.PREV_ROW_COLUMN.getColumnQualifier());
+    Key pk = new Key(mr, TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.getColumnFamily(),
+        TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.getColumnQualifier());
     tabletData.put(pk, per);
   }
   
   static void setLocation(TServers tservers, String server, KeyExtent tablet, KeyExtent ke, String location) {
     setLocation(tservers, server, tablet, ke, location, "");
   }
-
+  
   static void deleteServer(TServers tservers, String server) {
     tservers.tservers.remove(server);
     
@@ -1274,7 +1278,6 @@ public class TabletLocatorImplTest exten
     } catch (Exception e) {
       
     }
-
-
+    
   }
 }

Copied: accumulo/trunk/core/src/test/java/org/apache/accumulo/core/metadata/MetadataServicerTest.java (from r1497400, accumulo/trunk/core/src/test/java/org/apache/accumulo/core/util/MetadataTableTest.java)
URL: http://svn.apache.org/viewvc/accumulo/trunk/core/src/test/java/org/apache/accumulo/core/metadata/MetadataServicerTest.java?p2=accumulo/trunk/core/src/test/java/org/apache/accumulo/core/metadata/MetadataServicerTest.java&p1=accumulo/trunk/core/src/test/java/org/apache/accumulo/core/util/MetadataTableTest.java&r1=1497400&r2=1499510&rev=1499510&view=diff
==============================================================================
--- accumulo/trunk/core/src/test/java/org/apache/accumulo/core/util/MetadataTableTest.java (original)
+++ accumulo/trunk/core/src/test/java/org/apache/accumulo/core/metadata/MetadataServicerTest.java Wed Jul  3 18:32:51 2013
@@ -14,20 +14,25 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.accumulo.core.util;
+package org.apache.accumulo.core.metadata;
 
+import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotEquals;
 import static org.junit.Assert.assertTrue;
 
-import java.util.SortedSet;
-import java.util.TreeSet;
-
-import org.apache.accumulo.core.data.KeyExtent;
-import org.apache.hadoop.io.Text;
+import org.apache.accumulo.core.client.AccumuloException;
+import org.apache.accumulo.core.client.AccumuloSecurityException;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.TableExistsException;
+import org.apache.accumulo.core.client.TableNotFoundException;
+import org.apache.accumulo.core.client.mock.MockInstance;
+import org.apache.accumulo.core.client.security.tokens.PasswordToken;
+import org.apache.accumulo.core.security.CredentialHelper;
+import org.apache.accumulo.core.security.thrift.TCredentials;
 import org.junit.Test;
 
-public class MetadataTableTest {
+public class MetadataServicerTest {
   
   @Test
   public void checkSystemTableIdentifiers() {
@@ -35,75 +40,47 @@ public class MetadataTableTest {
     assertNotEquals(RootTable.NAME, MetadataTable.NAME);
   }
   
-  private KeyExtent createKeyExtent(String tname, String er, String per) {
-    return new KeyExtent(new Text(tname), er == null ? null : new Text(er), per == null ? null : new Text(per));
-  }
-  
-  private SortedSet<KeyExtent> createKeyExtents(String data[][]) {
-    
-    TreeSet<KeyExtent> extents = new TreeSet<KeyExtent>();
-    for (String[] exdata : data) {
-      extents.add(createKeyExtent(exdata[0], exdata[1], exdata[2]));
-    }
-    
-    return extents;
-  }
-  
-  private void runTest(String beginRange, String endRange) {
-    KeyExtent ke = createKeyExtent("foo", endRange, beginRange);
-    
-    SortedSet<KeyExtent> children = createKeyExtents(new String[][] {new String[] {"foo", endRange, beginRange}});
-    
-    assertTrue(MetadataTable.isContiguousRange(ke, children));
-    
-    children = createKeyExtents(new String[][] {new String[] {"foo", "r1", beginRange}, new String[] {"foo", endRange, "r1"}});
-    
-    assertTrue(MetadataTable.isContiguousRange(ke, children));
-    
-    children = createKeyExtents(new String[][] {new String[] {"foo", "r1", beginRange}, new String[] {"foo", endRange, "r2"}});
-    
-    assertFalse(MetadataTable.isContiguousRange(ke, children));
-    
-    children = createKeyExtents(new String[][] {new String[] {"foo", "r1", beginRange}, new String[] {"foo", (endRange == null ? "r2" : endRange + "Z"), "r1"}});
-    
-    assertFalse(MetadataTable.isContiguousRange(ke, children));
-    
-    children = createKeyExtents(new String[][] {new String[] {"foo", "r1", (beginRange == null ? "r0" : "a" + beginRange)},
-        new String[] {"foo", endRange, "r1"}});
-    
-    assertFalse(MetadataTable.isContiguousRange(ke, children));
-    
-    children = createKeyExtents(new String[][] {new String[] {"foo", "r1", beginRange}, new String[] {"foo", "r2", "r1"}, new String[] {"foo", endRange, "r2"}});
-    
-    assertTrue(MetadataTable.isContiguousRange(ke, children));
-    
-    children = createKeyExtents(new String[][] {new String[] {"foo", "r1", beginRange}, new String[] {"foo", "r2", "r1"}, new String[] {"foo", endRange, "r1"}});
-    
-    assertFalse(MetadataTable.isContiguousRange(ke, children));
-    
-    children = createKeyExtents(new String[][] {new String[] {"foo", "r1", beginRange}, new String[] {"foo", "r2", null}, new String[] {"foo", endRange, "r2"}});
-    
-    assertFalse(MetadataTable.isContiguousRange(ke, children));
-    
-    if (endRange == null) {
-      children = createKeyExtents(new String[][] {new String[] {"foo", "r1", beginRange}, new String[] {"foo", null, "r1"},
-          new String[] {"foo", endRange, "r2"}});
-      
-      assertFalse(MetadataTable.isContiguousRange(ke, children));
-    }
-    
-    children = createKeyExtents(new String[][] {new String[] {"foo", "r1", beginRange}, new String[] {"foo", "r2", "r1"}, new String[] {"foo", "r3", "r2"},
-        new String[] {"foo", endRange, "r3"}});
-    
-    assertTrue(MetadataTable.isContiguousRange(ke, children));
-    
-  }
-  
   @Test
-  public void testICR1() {
-    runTest(null, null);
-    runTest(null, "r4");
-    runTest("r0", null);
-    runTest("r0", "r4");
+  public void testGetCorrectServicer() throws AccumuloException, AccumuloSecurityException, TableExistsException, TableNotFoundException {
+    String userTableName = "A";
+    MockInstance instance = new MockInstance("metadataTest");
+    Connector connector = instance.getConnector("root", new PasswordToken(""));
+    connector.tableOperations().create(userTableName);
+    String userTableId = connector.tableOperations().tableIdMap().get(userTableName);
+    TCredentials credentials = CredentialHelper.createSquelchError("root", new PasswordToken(""), instance.getInstanceID());
+    
+    MetadataServicer ms = MetadataServicer.forTableId(instance, credentials, RootTable.ID);
+    assertTrue(ms instanceof ServicerForRootTable);
+    assertFalse(ms instanceof TableMetadataServicer);
+    assertEquals(RootTable.ID, ms.getServicedTableId());
+    
+    ms = MetadataServicer.forTableId(instance, credentials, MetadataTable.ID);
+    assertTrue(ms instanceof ServicerForMetadataTable);
+    assertTrue(ms instanceof TableMetadataServicer);
+    assertEquals(RootTable.NAME, ((TableMetadataServicer) ms).getServicingTableName());
+    assertEquals(MetadataTable.ID, ms.getServicedTableId());
+    
+    ms = MetadataServicer.forTableId(instance, credentials, userTableId);
+    assertTrue(ms instanceof ServicerForUserTables);
+    assertTrue(ms instanceof TableMetadataServicer);
+    assertEquals(MetadataTable.NAME, ((TableMetadataServicer) ms).getServicingTableName());
+    assertEquals(userTableId, ms.getServicedTableId());
+    
+    ms = MetadataServicer.forTableName(instance, credentials, RootTable.NAME);
+    assertTrue(ms instanceof ServicerForRootTable);
+    assertFalse(ms instanceof TableMetadataServicer);
+    assertEquals(RootTable.ID, ms.getServicedTableId());
+    
+    ms = MetadataServicer.forTableName(instance, credentials, MetadataTable.NAME);
+    assertTrue(ms instanceof ServicerForMetadataTable);
+    assertTrue(ms instanceof TableMetadataServicer);
+    assertEquals(RootTable.NAME, ((TableMetadataServicer) ms).getServicingTableName());
+    assertEquals(MetadataTable.ID, ms.getServicedTableId());
+    
+    ms = MetadataServicer.forTableName(instance, credentials, userTableName);
+    assertTrue(ms instanceof ServicerForUserTables);
+    assertTrue(ms instanceof TableMetadataServicer);
+    assertEquals(MetadataTable.NAME, ((TableMetadataServicer) ms).getServicingTableName());
+    assertEquals(userTableId, ms.getServicedTableId());
   }
 }

Modified: accumulo/trunk/proxy/src/test/java/org/apache/accumulo/proxy/SimpleTest.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/proxy/src/test/java/org/apache/accumulo/proxy/SimpleTest.java?rev=1499510&r1=1499509&r2=1499510&view=diff
==============================================================================
--- accumulo/trunk/proxy/src/test/java/org/apache/accumulo/proxy/SimpleTest.java (original)
+++ accumulo/trunk/proxy/src/test/java/org/apache/accumulo/proxy/SimpleTest.java Wed Jul  3 18:32:51 2013
@@ -50,7 +50,7 @@ import org.apache.accumulo.core.iterator
 import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
 import org.apache.accumulo.core.iterators.user.SummingCombiner;
 import org.apache.accumulo.core.iterators.user.VersioningIterator;
-import org.apache.accumulo.core.util.MetadataTable;
+import org.apache.accumulo.core.metadata.MetadataTable;
 import org.apache.accumulo.core.util.UtilWaitThread;
 import org.apache.accumulo.examples.simple.constraints.NumericValueConstraint;
 import org.apache.accumulo.minicluster.MiniAccumuloCluster;
@@ -147,7 +147,6 @@ public class SimpleTest {
     props.put("tokenClass", PasswordToken.class.getName());
     
     protocolClass = getRandomProtocol();
-    System.out.println(protocolClass.getName());
     
     proxyPort = PortUtils.getRandomFreePort();
     proxyServer = Proxy.createProxyServer(org.apache.accumulo.proxy.thrift.AccumuloProxy.class, org.apache.accumulo.proxy.ProxyServer.class, proxyPort,
@@ -1001,14 +1000,14 @@ public class SimpleTest {
     client.closeScanner(scanner);
     assertEquals(10, more.getResults().size());
     client.deleteTable(creds, "test2");
-
+    
     // don't know how to test this, call it just for fun
     client.clearLocatorCache(creds, TABLE_TEST);
-
+    
     // compact
     client.compactTable(creds, TABLE_TEST, null, null, null, true, true);
     assertEquals(1, countFiles(TABLE_TEST));
-
+    
     // get disk usage
     client.cloneTable(creds, TABLE_TEST, "test2", true, null, null);
     Set<String> tablesToScan = new HashSet<String>();
@@ -1028,7 +1027,7 @@ public class SimpleTest {
     assertEquals(1, diskUsage.get(2).getTables().size());
     client.deleteTable(creds, "foo");
     client.deleteTable(creds, "test2");
-
+    
     // export/import
     String dir = folder.getRoot() + "/test";
     String destDir = folder.getRoot() + "/test_dest";

Modified: accumulo/trunk/server/src/main/java/org/apache/accumulo/server/ServerConstants.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/main/java/org/apache/accumulo/server/ServerConstants.java?rev=1499510&r1=1499509&r2=1499510&view=diff
==============================================================================
--- accumulo/trunk/server/src/main/java/org/apache/accumulo/server/ServerConstants.java (original)
+++ accumulo/trunk/server/src/main/java/org/apache/accumulo/server/ServerConstants.java Wed Jul  3 18:32:51 2013
@@ -17,9 +17,9 @@
 package org.apache.accumulo.server;
 
 import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.RootTable;
 import org.apache.accumulo.core.util.CachedConfiguration;
-import org.apache.accumulo.core.util.MetadataTable;
-import org.apache.accumulo.core.util.RootTable;
 import org.apache.accumulo.server.conf.ServerConfiguration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
@@ -28,8 +28,12 @@ public class ServerConstants {
   
   // versions should never be negative
   public static final Integer WIRE_VERSION = 2;
-  public static final int DATA_VERSION = 5;
-  public static final int PREV_DATA_VERSION = 4;
+  
+  /**
+   * current version reflects the addition of a separate root table (ACCUMULO-1481)
+   */
+  public static final int DATA_VERSION = 6;
+  public static final int PREV_DATA_VERSION = 5;
   
   // these are functions to delay loading the Accumulo configuration unless we must
   public static String[] getBaseDirs() {
@@ -92,6 +96,6 @@ public class ServerConstants {
   }
   
   public static String getRootTabletDir() {
-    return prefix(getRootTableDirs(), RootTable.ZROOT_TABLET)[0];
+    return prefix(getRootTableDirs(), RootTable.ROOT_TABLET_LOCATION)[0];
   }
 }

Modified: accumulo/trunk/server/src/main/java/org/apache/accumulo/server/client/BulkImporter.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/main/java/org/apache/accumulo/server/client/BulkImporter.java?rev=1499510&r1=1499509&r2=1499510&view=diff
==============================================================================
--- accumulo/trunk/server/src/main/java/org/apache/accumulo/server/client/BulkImporter.java (original)
+++ accumulo/trunk/server/src/main/java/org/apache/accumulo/server/client/BulkImporter.java Wed Jul  3 18:32:51 2013
@@ -51,11 +51,11 @@ import org.apache.accumulo.core.data.thr
 import org.apache.accumulo.core.file.FileOperations;
 import org.apache.accumulo.core.file.FileSKVIterator;
 import org.apache.accumulo.core.file.FileUtil;
+import org.apache.accumulo.core.metadata.MetadataTable;
 import org.apache.accumulo.core.security.thrift.TCredentials;
 import org.apache.accumulo.core.tabletserver.thrift.TabletClientService;
 import org.apache.accumulo.core.util.CachedConfiguration;
 import org.apache.accumulo.core.util.LoggingRunnable;
-import org.apache.accumulo.core.util.MetadataTable;
 import org.apache.accumulo.core.util.NamingThreadFactory;
 import org.apache.accumulo.core.util.StopWatch;
 import org.apache.accumulo.core.util.ThriftUtil;
@@ -132,7 +132,7 @@ public class BulkImporter {
     }
     
     ClientService.Client client = null;
-    final TabletLocator locator = TabletLocator.getInstance(instance, new Text(tableId));
+    final TabletLocator locator = TabletLocator.getLocator(instance, new Text(tableId));
     
     try {
       final Map<Path,List<TabletLocation>> assignments = Collections.synchronizedSortedMap(new TreeMap<Path,List<TabletLocation>>());

Modified: accumulo/trunk/server/src/main/java/org/apache/accumulo/server/client/HdfsZooInstance.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/main/java/org/apache/accumulo/server/client/HdfsZooInstance.java?rev=1499510&r1=1499509&r2=1499510&view=diff
==============================================================================
--- accumulo/trunk/server/src/main/java/org/apache/accumulo/server/client/HdfsZooInstance.java (original)
+++ accumulo/trunk/server/src/main/java/org/apache/accumulo/server/client/HdfsZooInstance.java Wed Jul  3 18:32:51 2013
@@ -32,11 +32,11 @@ import org.apache.accumulo.core.client.s
 import org.apache.accumulo.core.client.security.tokens.PasswordToken;
 import org.apache.accumulo.core.conf.AccumuloConfiguration;
 import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.metadata.RootTable;
 import org.apache.accumulo.core.security.CredentialHelper;
 import org.apache.accumulo.core.security.thrift.TCredentials;
 import org.apache.accumulo.core.util.ByteBufferUtil;
 import org.apache.accumulo.core.util.OpTimer;
-import org.apache.accumulo.core.util.RootTable;
 import org.apache.accumulo.core.util.StringUtil;
 import org.apache.accumulo.core.util.TextUtil;
 import org.apache.accumulo.core.zookeeper.ZooUtil;

Modified: accumulo/trunk/server/src/main/java/org/apache/accumulo/server/constraints/MetadataConstraints.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/main/java/org/apache/accumulo/server/constraints/MetadataConstraints.java?rev=1499510&r1=1499509&r2=1499510&view=diff
==============================================================================
--- accumulo/trunk/server/src/main/java/org/apache/accumulo/server/constraints/MetadataConstraints.java (original)
+++ accumulo/trunk/server/src/main/java/org/apache/accumulo/server/constraints/MetadataConstraints.java Wed Jul  3 18:32:51 2013
@@ -28,9 +28,15 @@ import org.apache.accumulo.core.data.Col
 import org.apache.accumulo.core.data.KeyExtent;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.schema.DataFileValue;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.ChoppedColumnFamily;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.ClonedColumnFamily;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.LogColumnFamily;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.ScanFileColumnFamily;
 import org.apache.accumulo.core.util.ColumnFQ;
-import org.apache.accumulo.core.util.MetadataTable;
-import org.apache.accumulo.core.util.MetadataTable.DataFileValue;
 import org.apache.accumulo.core.zookeeper.ZooUtil;
 import org.apache.accumulo.fate.zookeeper.TransactionWatcher.Arbitrator;
 import org.apache.accumulo.server.client.HdfsZooInstance;
@@ -55,14 +61,15 @@ public class MetadataConstraints impleme
     }
   }
   
-  private static final HashSet<ColumnFQ> validColumnQuals = new HashSet<ColumnFQ>(Arrays.asList(new ColumnFQ[] {MetadataTable.PREV_ROW_COLUMN,
-      MetadataTable.OLD_PREV_ROW_COLUMN, MetadataTable.DIRECTORY_COLUMN, MetadataTable.SPLIT_RATIO_COLUMN, MetadataTable.TIME_COLUMN,
-      MetadataTable.LOCK_COLUMN, MetadataTable.FLUSH_COLUMN, MetadataTable.COMPACT_COLUMN}));
-  
-  private static final HashSet<Text> validColumnFams = new HashSet<Text>(Arrays.asList(new Text[] {MetadataTable.BULKFILE_COLUMN_FAMILY,
-      MetadataTable.LOG_COLUMN_FAMILY, MetadataTable.SCANFILE_COLUMN_FAMILY, MetadataTable.DATAFILE_COLUMN_FAMILY,
-      MetadataTable.CURRENT_LOCATION_COLUMN_FAMILY, MetadataTable.LAST_LOCATION_COLUMN_FAMILY, MetadataTable.FUTURE_LOCATION_COLUMN_FAMILY,
-      MetadataTable.CHOPPED_COLUMN_FAMILY, MetadataTable.CLONED_COLUMN_FAMILY}));
+  private static final HashSet<ColumnFQ> validColumnQuals = new HashSet<ColumnFQ>(Arrays.asList(new ColumnFQ[] {
+      TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN, TabletsSection.TabletColumnFamily.OLD_PREV_ROW_COLUMN,
+      TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN, TabletsSection.TabletColumnFamily.SPLIT_RATIO_COLUMN, TabletsSection.ServerColumnFamily.TIME_COLUMN,
+      TabletsSection.ServerColumnFamily.LOCK_COLUMN, TabletsSection.ServerColumnFamily.FLUSH_COLUMN, TabletsSection.ServerColumnFamily.COMPACT_COLUMN}));
+  
+  private static final HashSet<Text> validColumnFams = new HashSet<Text>(Arrays.asList(new Text[] {TabletsSection.BulkFileColumnFamily.NAME,
+      LogColumnFamily.NAME, ScanFileColumnFamily.NAME, DataFileColumnFamily.NAME,
+      TabletsSection.CurrentLocationColumnFamily.NAME, TabletsSection.LastLocationColumnFamily.NAME, TabletsSection.FutureLocationColumnFamily.NAME,
+      ChoppedColumnFamily.NAME, ClonedColumnFamily.NAME}));
   
   private static boolean isValidColumn(ColumnUpdate cu) {
     
@@ -78,19 +85,20 @@ public class MetadataConstraints impleme
   static private ArrayList<Short> addViolation(ArrayList<Short> lst, int violation) {
     if (lst == null)
       lst = new ArrayList<Short>();
-    lst.add((short)violation);
+    lst.add((short) violation);
     return lst;
   }
   
   static private ArrayList<Short> addIfNotPresent(ArrayList<Short> lst, int intViolation) {
     if (lst == null)
       return addViolation(lst, intViolation);
-    short violation = (short)intViolation;
+    short violation = (short) intViolation;
     if (!lst.contains(violation))
       return addViolation(lst, intViolation);
     return lst;
   }
   
+  @Override
   public List<Short> check(Environment env, Mutation mutation) {
     
     ArrayList<Short> violations = null;
@@ -144,7 +152,7 @@ public class MetadataConstraints impleme
     }
     
     boolean checkedBulk = false;
-
+    
     for (ColumnUpdate columnUpdate : colUpdates) {
       Text columnFamily = new Text(columnUpdate.getColumnFamily());
       
@@ -155,11 +163,11 @@ public class MetadataConstraints impleme
         continue;
       }
       
-      if (columnUpdate.getValue().length == 0 && !columnFamily.equals(MetadataTable.SCANFILE_COLUMN_FAMILY)) {
+      if (columnUpdate.getValue().length == 0 && !columnFamily.equals(ScanFileColumnFamily.NAME)) {
         violations = addViolation(violations, 6);
       }
       
-      if (columnFamily.equals(MetadataTable.DATAFILE_COLUMN_FAMILY)) {
+      if (columnFamily.equals(DataFileColumnFamily.NAME)) {
         try {
           DataFileValue dfv = new DataFileValue(columnUpdate.getValue());
           
@@ -171,33 +179,33 @@ public class MetadataConstraints impleme
         } catch (ArrayIndexOutOfBoundsException aiooe) {
           violations = addViolation(violations, 1);
         }
-      } else if (columnFamily.equals(MetadataTable.SCANFILE_COLUMN_FAMILY)) {
+      } else if (columnFamily.equals(ScanFileColumnFamily.NAME)) {
         
-      } else if (columnFamily.equals(MetadataTable.BULKFILE_COLUMN_FAMILY)) {
+      } else if (columnFamily.equals(TabletsSection.BulkFileColumnFamily.NAME)) {
         if (!columnUpdate.isDeleted() && !checkedBulk) {
           // splits, which also write the time reference, are allowed to write this reference even when
           // the transaction is not running because the other half of the tablet is holding a reference
           // to the file.
           boolean isSplitMutation = false;
-          // When a tablet is assigned, it re-writes the metadata.  It should probably only update the location information, 
-          // but it writes everything.  We allow it to re-write the bulk information if it is setting the location. 
-          // See ACCUMULO-1230. 
+          // When a tablet is assigned, it re-writes the metadata. It should probably only update the location information,
+          // but it writes everything. We allow it to re-write the bulk information if it is setting the location.
+          // See ACCUMULO-1230.
           boolean isLocationMutation = false;
           
           HashSet<Text> dataFiles = new HashSet<Text>();
           HashSet<Text> loadedFiles = new HashSet<Text>();
-
+          
           String tidString = new String(columnUpdate.getValue());
           int otherTidCount = 0;
-
+          
           for (ColumnUpdate update : mutation.getUpdates()) {
-            if (new ColumnFQ(update).equals(MetadataTable.DIRECTORY_COLUMN)) {
+            if (new ColumnFQ(update).equals(TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN)) {
               isSplitMutation = true;
-            } else if (new Text(update.getColumnFamily()).equals(MetadataTable.CURRENT_LOCATION_COLUMN_FAMILY)) {
+            } else if (new Text(update.getColumnFamily()).equals(TabletsSection.CurrentLocationColumnFamily.NAME)) {
               isLocationMutation = true;
-            } else if (new Text(update.getColumnFamily()).equals(MetadataTable.DATAFILE_COLUMN_FAMILY)) {
+            } else if (new Text(update.getColumnFamily()).equals(DataFileColumnFamily.NAME)) {
               dataFiles.add(new Text(update.getColumnQualifier()));
-            } else if (new Text(update.getColumnFamily()).equals(MetadataTable.BULKFILE_COLUMN_FAMILY)) {
+            } else if (new Text(update.getColumnFamily()).equals(TabletsSection.BulkFileColumnFamily.NAME)) {
               loadedFiles.add(new Text(update.getColumnQualifier()));
               
               if (!new String(update.getValue()).equals(tidString)) {
@@ -223,7 +231,7 @@ public class MetadataConstraints impleme
       } else {
         if (!isValidColumn(columnUpdate)) {
           violations = addViolation(violations, 2);
-        } else if (new ColumnFQ(columnUpdate).equals(MetadataTable.PREV_ROW_COLUMN) && columnUpdate.getValue().length > 0
+        } else if (new ColumnFQ(columnUpdate).equals(TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN) && columnUpdate.getValue().length > 0
             && (violations == null || !violations.contains((short) 4))) {
           KeyExtent ke = new KeyExtent(new Text(mutation.getRow()), (Text) null);
           
@@ -234,7 +242,7 @@ public class MetadataConstraints impleme
           if (!prevEndRowLessThanEndRow) {
             violations = addViolation(violations, 3);
           }
-        } else if (new ColumnFQ(columnUpdate).equals(MetadataTable.LOCK_COLUMN)) {
+        } else if (new ColumnFQ(columnUpdate).equals(TabletsSection.ServerColumnFamily.LOCK_COLUMN)) {
           if (zooCache == null) {
             zooCache = new ZooCache();
           }
@@ -263,7 +271,8 @@ public class MetadataConstraints impleme
     if (violations != null) {
       log.debug("violating metadata mutation : " + new String(mutation.getRow()));
       for (ColumnUpdate update : mutation.getUpdates()) {
-        log.debug(" update: " + new String(update.getColumnFamily()) + ":" + new String(update.getColumnQualifier()) + " value " + (update.isDeleted() ? "[delete]" : new String(update.getValue())));
+        log.debug(" update: " + new String(update.getColumnFamily()) + ":" + new String(update.getColumnQualifier()) + " value "
+            + (update.isDeleted() ? "[delete]" : new String(update.getValue())));
       }
     }
     
@@ -273,7 +282,8 @@ public class MetadataConstraints impleme
   protected Arbitrator getArbitrator() {
     return new ZooArbitrator();
   }
-
+  
+  @Override
   public String getViolationDescription(short violationCode) {
     switch (violationCode) {
       case 1:
@@ -296,6 +306,7 @@ public class MetadataConstraints impleme
     return null;
   }
   
+  @Override
   protected void finalize() {
     if (zooCache != null)
       zooCache.clear();

Modified: accumulo/trunk/server/src/main/java/org/apache/accumulo/server/gc/GarbageCollectWriteAheadLogs.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/main/java/org/apache/accumulo/server/gc/GarbageCollectWriteAheadLogs.java?rev=1499510&r1=1499509&r2=1499510&view=diff
==============================================================================
--- accumulo/trunk/server/src/main/java/org/apache/accumulo/server/gc/GarbageCollectWriteAheadLogs.java (original)
+++ accumulo/trunk/server/src/main/java/org/apache/accumulo/server/gc/GarbageCollectWriteAheadLogs.java Wed Jul  3 18:32:51 2013
@@ -42,8 +42,8 @@ import org.apache.accumulo.server.Server
 import org.apache.accumulo.server.fs.VolumeManager;
 import org.apache.accumulo.server.security.SecurityConstants;
 import org.apache.accumulo.server.util.AddressUtil;
-import org.apache.accumulo.server.util.MetadataTable;
-import org.apache.accumulo.server.util.MetadataTable.LogEntry;
+import org.apache.accumulo.server.util.MetadataTableUtil;
+import org.apache.accumulo.server.util.MetadataTableUtil.LogEntry;
 import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
 import org.apache.accumulo.trace.instrument.Span;
 import org.apache.accumulo.trace.instrument.Trace;
@@ -223,7 +223,7 @@ public class GarbageCollectWriteAheadLog
   private static int removeMetadataEntries(Map<Path,String> fileToServerMap, Set<Path> sortedWALogs, GCStatus status) throws IOException, KeeperException,
       InterruptedException {
     int count = 0;
-    Iterator<LogEntry> iterator = MetadataTable.getLogEntries(SecurityConstants.getSystemCredentials());
+    Iterator<LogEntry> iterator = MetadataTableUtil.getLogEntries(SecurityConstants.getSystemCredentials());
     while (iterator.hasNext()) {
       for (String filename : iterator.next().logSet) {
         Path path;

Modified: accumulo/trunk/server/src/main/java/org/apache/accumulo/server/gc/SimpleGarbageCollector.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/main/java/org/apache/accumulo/server/gc/SimpleGarbageCollector.java?rev=1499510&r1=1499509&r2=1499510&view=diff
==============================================================================
--- accumulo/trunk/server/src/main/java/org/apache/accumulo/server/gc/SimpleGarbageCollector.java (original)
+++ accumulo/trunk/server/src/main/java/org/apache/accumulo/server/gc/SimpleGarbageCollector.java Wed Jul  3 18:32:51 2013
@@ -61,11 +61,16 @@ import org.apache.accumulo.core.gc.thrif
 import org.apache.accumulo.core.gc.thrift.GCStatus;
 import org.apache.accumulo.core.gc.thrift.GcCycleStats;
 import org.apache.accumulo.core.master.state.tables.TableState;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.RootTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.ScanFileColumnFamily;
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.security.CredentialHelper;
 import org.apache.accumulo.core.security.SecurityUtil;
 import org.apache.accumulo.core.security.thrift.TCredentials;
-import org.apache.accumulo.core.util.MetadataTable;
 import org.apache.accumulo.core.util.NamingThreadFactory;
 import org.apache.accumulo.core.util.ServerServices;
 import org.apache.accumulo.core.util.ServerServices.Service;
@@ -308,6 +313,7 @@ public class SimpleGarbageCollector impl
       try {
         Connector connector = instance.getConnector(credentials.getPrincipal(), CredentialHelper.extractToken(credentials));
         connector.tableOperations().compact(MetadataTable.NAME, null, null, true, true);
+        connector.tableOperations().compact(RootTable.NAME, null, null, true, true);
       } catch (Exception e) {
         log.warn(e, e);
       }
@@ -452,19 +458,24 @@ public class SimpleGarbageCollector impl
     }
     
     checkForBulkProcessingFiles = false;
-    Range range = MetadataTable.DELETED_RANGE;
-    candidates.addAll(getBatch(MetadataTable.DELETED_RANGE.getStartKey().getRow().toString(), range));
+    candidates.addAll(getBatch(RootTable.NAME));
     if (candidateMemExceeded)
       return candidates;
     
-    range = MetadataTable.DELETED_RANGE;
-    candidates.addAll(getBatch(MetadataTable.DELETED_RANGE.getStartKey().getRow().toString(), range));
+    candidates.addAll(getBatch(MetadataTable.NAME));
     return candidates;
   }
   
-  private Collection<String> getBatch(String prefix, Range range) throws Exception {
+  /**
+   * Gets a batch of delete markers from the specified table
+   * 
+   * @param tableName
+   *          the name of the system table to scan (either {@link RootTable.NAME} or {@link MetadataTable.NAME})
+   */
+  private Collection<String> getBatch(String tableName) throws Exception {
     // want to ensure GC makes progress... if the 1st N deletes are stable and we keep processing them,
     // then will never inspect deletes after N
+    Range range = MetadataSchema.DeletesSection.getRange();
     if (continueKey != null) {
       if (!range.contains(continueKey)) {
         // continue key is for some other range
@@ -474,13 +485,13 @@ public class SimpleGarbageCollector impl
       continueKey = null;
     }
     
-    Scanner scanner = instance.getConnector(credentials.getPrincipal(), CredentialHelper.extractToken(credentials)).createScanner(MetadataTable.NAME,
+    Scanner scanner = instance.getConnector(credentials.getPrincipal(), CredentialHelper.extractToken(credentials)).createScanner(tableName,
         Authorizations.EMPTY);
     scanner.setRange(range);
     List<String> result = new ArrayList<String>();
     // find candidates for deletion; chop off the prefix
     for (Entry<Key,Value> entry : scanner) {
-      String cand = entry.getKey().getRow().toString().substring(prefix.length());
+      String cand = entry.getKey().getRow().toString().substring(MetadataSchema.DeletesSection.getRowPrefix().length());
       result.add(cand);
       checkForBulkProcessingFiles |= cand.toLowerCase(Locale.ENGLISH).contains(Constants.BULK_PREFIX);
       if (almostOutOfMemory()) {
@@ -504,7 +515,11 @@ public class SimpleGarbageCollector impl
    * selected 2. They are still in use in the file column family in the METADATA table
    */
   public void confirmDeletes(SortedSet<String> candidates) throws AccumuloException {
-    
+    confirmDeletes(RootTable.NAME, candidates);
+    confirmDeletes(MetadataTable.NAME, candidates);
+  }
+  
+  private void confirmDeletes(String tableName, SortedSet<String> candidates) throws AccumuloException {
     Scanner scanner;
     if (offline) {
       // TODO
@@ -516,8 +531,8 @@ public class SimpleGarbageCollector impl
       // }
     } else {
       try {
-        scanner = new IsolatedScanner(instance.getConnector(credentials.getPrincipal(), CredentialHelper.extractToken(credentials)).createScanner(
-            MetadataTable.NAME, Authorizations.EMPTY));
+        scanner = new IsolatedScanner(instance.getConnector(credentials.getPrincipal(), CredentialHelper.extractToken(credentials)).createScanner(tableName,
+            Authorizations.EMPTY));
       } catch (AccumuloSecurityException ex) {
         throw new AccumuloException(ex);
       } catch (TableNotFoundException ex) {
@@ -530,14 +545,14 @@ public class SimpleGarbageCollector impl
       
       log.debug("Checking for bulk processing flags");
       
-      scanner.setRange(MetadataTable.BLIP_KEYSPACE);
+      scanner.setRange(MetadataSchema.BlipSection.getRange());
       
       // WARNING: This block is IMPORTANT
       // You MUST REMOVE candidates that are in the same folder as a bulk
       // processing flag!
       
       for (Entry<Key,Value> entry : scanner) {
-        String blipPath = entry.getKey().getRow().toString().substring(MetadataTable.BLIP_FLAG_PREFIX.length());
+        String blipPath = entry.getKey().getRow().toString().substring(MetadataSchema.BlipSection.getRowPrefix().length());
         Iterator<String> tailIter = candidates.tailSet(blipPath).iterator();
         int count = 0;
         while (tailIter.hasNext()) {
@@ -558,17 +573,17 @@ public class SimpleGarbageCollector impl
     // skip candidates that are still in use in the file column family in
     // the metadata table
     scanner.clearColumns();
-    scanner.fetchColumnFamily(MetadataTable.DATAFILE_COLUMN_FAMILY);
-    scanner.fetchColumnFamily(MetadataTable.SCANFILE_COLUMN_FAMILY);
-    MetadataTable.DIRECTORY_COLUMN.fetch(scanner);
-    TabletIterator tabletIterator = new TabletIterator(scanner, MetadataTable.KEYSPACE, false, true);
+    scanner.fetchColumnFamily(DataFileColumnFamily.NAME);
+    scanner.fetchColumnFamily(ScanFileColumnFamily.NAME);
+    TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.fetch(scanner);
+    TabletIterator tabletIterator = new TabletIterator(scanner, MetadataSchema.TabletsSection.getRange(), false, true);
     
     while (tabletIterator.hasNext()) {
       Map<Key,Value> tabletKeyValues = tabletIterator.next();
       
       for (Entry<Key,Value> entry : tabletKeyValues.entrySet()) {
-        if (entry.getKey().getColumnFamily().equals(MetadataTable.DATAFILE_COLUMN_FAMILY)
-            || entry.getKey().getColumnFamily().equals(MetadataTable.SCANFILE_COLUMN_FAMILY)) {
+        if (entry.getKey().getColumnFamily().equals(DataFileColumnFamily.NAME)
+            || entry.getKey().getColumnFamily().equals(ScanFileColumnFamily.NAME)) {
           
           String cf = entry.getKey().getColumnQualifier().toString();
           String delete = cf;
@@ -586,16 +601,16 @@ public class SimpleGarbageCollector impl
           // WARNING: This line is EXTREMELY IMPORTANT.
           // You MUST REMOVE candidates that are still in use
           if (candidates.remove(delete))
-            log.debug("Candidate was still in use in the METADATA table: " + delete);
+            log.debug("Candidate was still in use in the " + tableName + " table: " + delete);
           
           String path = delete.substring(0, delete.lastIndexOf('/'));
           if (candidates.remove(path))
-            log.debug("Candidate was still in use in the METADATA table: " + path);
-        } else if (MetadataTable.DIRECTORY_COLUMN.hasColumns(entry.getKey())) {
+            log.debug("Candidate was still in use in the " + tableName + " table: " + path);
+        } else if (TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.hasColumns(entry.getKey())) {
           String table = new String(KeyExtent.tableOfMetadataRow(entry.getKey().getRow()));
           String delete = "/" + table + entry.getValue().toString();
           if (candidates.remove(delete))
-            log.debug("Candidate was still in use in the METADATA table: " + delete);
+            log.debug("Candidate was still in use in the " + tableName + " table: " + delete);
         } else
           throw new AccumuloException("Scanner over metadata table returned unexpected column : " + entry.getKey());
       }
@@ -604,16 +619,12 @@ public class SimpleGarbageCollector impl
   
   final static String METADATA_TABLE_DIR = "/" + MetadataTable.ID;
   
-  private static void putMarkerDeleteMutation(final String delete, final BatchWriter writer, final BatchWriter rootWriter) throws MutationsRejectedException {
-    if (delete.contains(METADATA_TABLE_DIR)) {
-      Mutation m = new Mutation(new Text(MetadataTable.DELETED_RANGE.getStartKey().getRow().toString() + delete));
-      m.putDelete(EMPTY_TEXT, EMPTY_TEXT);
-      rootWriter.addMutation(m);
-    } else {
-      Mutation m = new Mutation(new Text(MetadataTable.DELETED_RANGE.getStartKey().getRow().toString() + delete));
-      m.putDelete(EMPTY_TEXT, EMPTY_TEXT);
-      writer.addMutation(m);
-    }
+  private static void putMarkerDeleteMutation(final String delete, final BatchWriter metadataWriter, final BatchWriter rootWriter)
+      throws MutationsRejectedException {
+    BatchWriter writer = delete.contains(METADATA_TABLE_DIR) ? rootWriter : metadataWriter;
+    Mutation m = new Mutation(MetadataSchema.DeletesSection.getRowPrefix() + delete);
+    m.putDelete(EMPTY_TEXT, EMPTY_TEXT);
+    writer.addMutation(m);
   }
   
   /**
@@ -629,9 +640,13 @@ public class SimpleGarbageCollector impl
       try {
         c = instance.getConnector(SecurityConstants.SYSTEM_PRINCIPAL, SecurityConstants.getSystemToken());
         writer = c.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
-        rootWriter = c.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
-      } catch (Exception e) {
-        log.error("Unable to create writer to remove file from the " + MetadataTable.NAME + " table", e);
+        rootWriter = c.createBatchWriter(RootTable.NAME, new BatchWriterConfig());
+      } catch (AccumuloException e) {
+        log.error("Unable to connect to Accumulo to write deletes", e);
+      } catch (AccumuloSecurityException e) {
+        log.error("Unable to connect to Accumulo to write deletes", e);
+      } catch (TableNotFoundException e) {
+        log.error("Unable to create writer to remove file from the " + e.getTableName() + " table", e);
       }
     }
     // when deleting a dir and all files in that dir, only need to delete the dir

Modified: accumulo/trunk/server/src/main/java/org/apache/accumulo/server/iterators/MetadataBulkLoadFilter.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/main/java/org/apache/accumulo/server/iterators/MetadataBulkLoadFilter.java?rev=1499510&r1=1499509&r2=1499510&view=diff
==============================================================================
--- accumulo/trunk/server/src/main/java/org/apache/accumulo/server/iterators/MetadataBulkLoadFilter.java (original)
+++ accumulo/trunk/server/src/main/java/org/apache/accumulo/server/iterators/MetadataBulkLoadFilter.java Wed Jul  3 18:32:51 2013
@@ -27,7 +27,7 @@ import org.apache.accumulo.core.iterator
 import org.apache.accumulo.core.iterators.IteratorEnvironment;
 import org.apache.accumulo.core.iterators.IteratorUtil.IteratorScope;
 import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
-import org.apache.accumulo.core.util.MetadataTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
 import org.apache.accumulo.fate.zookeeper.TransactionWatcher.Arbitrator;
 import org.apache.accumulo.server.zookeeper.TransactionWatcher.ZooArbitrator;
 import org.apache.log4j.Logger;
@@ -48,7 +48,7 @@ public class MetadataBulkLoadFilter exte
   
   @Override
   public boolean accept(Key k, Value v) {
-    if (!k.isDeleted() && k.compareColumnFamily(MetadataTable.BULKFILE_COLUMN_FAMILY) == 0) {
+    if (!k.isDeleted() && k.compareColumnFamily(TabletsSection.BulkFileColumnFamily.NAME) == 0) {
       long txid = Long.valueOf(v.toString());
       
       Status status = bulkTxStatusCache.get(txid);
@@ -69,10 +69,10 @@ public class MetadataBulkLoadFilter exte
       
       return status == Status.ACTIVE;
     }
-
+    
     return true;
   }
-
+  
   @Override
   public void init(SortedKeyValueIterator<Key,Value> source, Map<String,String> options, IteratorEnvironment env) throws IOException {
     super.init(source, options, env);
@@ -80,7 +80,7 @@ public class MetadataBulkLoadFilter exte
     if (env.getIteratorScope() == IteratorScope.scan) {
       throw new IOException("This iterator not intended for use at scan time");
     }
-
+    
     bulkTxStatusCache = new HashMap<Long,MetadataBulkLoadFilter.Status>();
     arbitrator = getArbitrator();
   }

Modified: accumulo/trunk/server/src/main/java/org/apache/accumulo/server/master/Master.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/main/java/org/apache/accumulo/server/master/Master.java?rev=1499510&r1=1499509&r2=1499510&view=diff
==============================================================================
--- accumulo/trunk/server/src/main/java/org/apache/accumulo/server/master/Master.java (original)
+++ accumulo/trunk/server/src/main/java/org/apache/accumulo/server/master/Master.java Wed Jul  3 18:32:51 2013
@@ -69,12 +69,15 @@ import org.apache.accumulo.core.master.t
 import org.apache.accumulo.core.master.thrift.TabletLoadState;
 import org.apache.accumulo.core.master.thrift.TabletServerStatus;
 import org.apache.accumulo.core.master.thrift.TabletSplit;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.RootTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.LogColumnFamily;
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.security.SecurityUtil;
 import org.apache.accumulo.core.security.thrift.TCredentials;
 import org.apache.accumulo.core.util.ByteBufferUtil;
 import org.apache.accumulo.core.util.Daemon;
-import org.apache.accumulo.core.util.RootTable;
 import org.apache.accumulo.core.util.UtilWaitThread;
 import org.apache.accumulo.core.zookeeper.ZooUtil;
 import org.apache.accumulo.fate.AgeOffStore;
@@ -131,7 +134,7 @@ import org.apache.accumulo.server.securi
 import org.apache.accumulo.server.util.AddressUtil;
 import org.apache.accumulo.server.util.DefaultMap;
 import org.apache.accumulo.server.util.Halt;
-import org.apache.accumulo.server.util.MetadataTable;
+import org.apache.accumulo.server.util.MetadataTableUtil;
 import org.apache.accumulo.server.util.SystemPropUtil;
 import org.apache.accumulo.server.util.TServerUtils;
 import org.apache.accumulo.server.util.TablePropUtil;
@@ -288,7 +291,7 @@ public class Master implements LiveTServ
           @Override
           public void run() {
             try {
-              MetadataTable.moveMetaDeleteMarkers(instance, SecurityConstants.getSystemCredentials());
+              MetadataTableUtil.moveMetaDeleteMarkers(instance, SecurityConstants.getSystemCredentials());
               Accumulo.updateAccumuloVersion(fs);
               
               log.info("Upgrade complete");
@@ -379,8 +382,8 @@ public class Master implements LiveTServ
   }
   
   private void checkNotMetadataTable(String tableName, TableOperation operation) throws ThriftTableOperationException {
-    if (tableName.compareTo(MetadataTable.NAME) == 0) {
-      String why = "Table names cannot be == " + MetadataTable.NAME;
+    if (MetadataTable.NAME.equals(tableName) || RootTable.NAME.equals(tableName)) {
+      String why = "Table names cannot be == " + RootTable.NAME + " or " + MetadataTable.NAME;
       log.warn(why);
       throw new ThriftTableOperationException(null, tableName, operation, TableOperationExceptionType.OTHER, why);
     }
@@ -527,10 +530,10 @@ public class Master implements LiveTServ
         try {
           Connector conn = getConnector();
           Scanner scanner = new IsolatedScanner(conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY));
-          MetadataTable.FLUSH_COLUMN.fetch(scanner);
-          MetadataTable.DIRECTORY_COLUMN.fetch(scanner);
-          scanner.fetchColumnFamily(MetadataTable.CURRENT_LOCATION_COLUMN_FAMILY);
-          scanner.fetchColumnFamily(MetadataTable.LOG_COLUMN_FAMILY);
+          TabletsSection.ServerColumnFamily.FLUSH_COLUMN.fetch(scanner);
+          TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.fetch(scanner);
+          scanner.fetchColumnFamily(TabletsSection.CurrentLocationColumnFamily.NAME);
+          scanner.fetchColumnFamily(LogColumnFamily.NAME);
           scanner.setRange(new KeyExtent(new Text(tableId), null, ByteBufferUtil.toText(startRow)).toMetadataRange());
           
           RowIterator ri = new RowIterator(scanner);
@@ -553,14 +556,14 @@ public class Master implements LiveTServ
               entry = row.next();
               Key key = entry.getKey();
               
-              if (MetadataTable.FLUSH_COLUMN.equals(key.getColumnFamily(), key.getColumnQualifier())) {
+              if (TabletsSection.ServerColumnFamily.FLUSH_COLUMN.equals(key.getColumnFamily(), key.getColumnQualifier())) {
                 tabletFlushID = Long.parseLong(entry.getValue().toString());
               }
               
-              if (MetadataTable.LOG_COLUMN_FAMILY.equals(key.getColumnFamily()))
+              if (LogColumnFamily.NAME.equals(key.getColumnFamily()))
                 logs++;
               
-              if (MetadataTable.CURRENT_LOCATION_COLUMN_FAMILY.equals(key.getColumnFamily())) {
+              if (TabletsSection.CurrentLocationColumnFamily.NAME.equals(key.getColumnFamily())) {
                 online = true;
                 server = new TServerInstance(entry.getValue(), key.getColumnQualifier());
               }
@@ -899,10 +902,6 @@ public class Master implements LiveTServ
           Text startRow = ByteBufferUtil.toText(arguments.get(1));
           Text endRow = ByteBufferUtil.toText(arguments.get(2));
           final String tableId = checkTableId(tableName, TableOperation.MERGE);
-          if (tableId.equals(RootTable.ID)) {
-            throw new ThriftTableOperationException(null, tableName, TableOperation.MERGE, TableOperationExceptionType.OTHER,
-                "cannot merge or split the root table");
-          }
           log.debug("Creating merge op: " + tableId + " " + startRow + " " + endRow);
           
           if (!security.canMerge(c, tableId))
@@ -1027,8 +1026,6 @@ public class Master implements LiveTServ
   }
   
   public MergeInfo getMergeInfo(KeyExtent tablet) {
-    if (tablet.isRootTablet())
-      return new MergeInfo();
     return getMergeInfo(tablet.getTableId());
   }
   
@@ -1251,7 +1248,7 @@ public class Master implements LiveTServ
     private void cleanupMutations() throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
       Connector connector = getConnector();
       Scanner scanner = connector.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
-      MetadataTable.PREV_ROW_COLUMN.fetch(scanner);
+      TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.fetch(scanner);
       Set<KeyExtent> found = new HashSet<KeyExtent>();
       for (Entry<Key,Value> entry : scanner) {
         KeyExtent extent = new KeyExtent(entry.getKey().getRow(), entry.getValue());