You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@accumulo.apache.org by ct...@apache.org on 2013/06/19 00:50:55 UTC

svn commit: r1494358 [2/4] - in /accumulo/trunk: core/src/main/java/org/apache/accumulo/core/ core/src/main/java/org/apache/accumulo/core/client/ core/src/main/java/org/apache/accumulo/core/client/admin/ core/src/main/java/org/apache/accumulo/core/clie...

Modified: accumulo/trunk/server/src/main/java/org/apache/accumulo/server/gc/SimpleGarbageCollector.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/main/java/org/apache/accumulo/server/gc/SimpleGarbageCollector.java?rev=1494358&r1=1494357&r2=1494358&view=diff
==============================================================================
--- accumulo/trunk/server/src/main/java/org/apache/accumulo/server/gc/SimpleGarbageCollector.java (original)
+++ accumulo/trunk/server/src/main/java/org/apache/accumulo/server/gc/SimpleGarbageCollector.java Tue Jun 18 22:50:53 2013
@@ -67,7 +67,9 @@ import org.apache.accumulo.core.security
 import org.apache.accumulo.core.security.SecurityUtil;
 import org.apache.accumulo.core.security.thrift.TCredentials;
 import org.apache.accumulo.core.util.CachedConfiguration;
+import org.apache.accumulo.core.util.MetadataTable;
 import org.apache.accumulo.core.util.NamingThreadFactory;
+import org.apache.accumulo.core.util.RootTable;
 import org.apache.accumulo.core.util.ServerServices;
 import org.apache.accumulo.core.util.ServerServices.Service;
 import org.apache.accumulo.core.util.UtilWaitThread;
@@ -312,7 +314,7 @@ public class SimpleGarbageCollector impl
       // we just made a lot of changes to the !METADATA table: flush them out
       try {
         Connector connector = instance.getConnector(credentials.getPrincipal(), CredentialHelper.extractToken(credentials));
-        connector.tableOperations().compact(Constants.METADATA_TABLE_NAME, null, null, true, true);
+        connector.tableOperations().compact(MetadataTable.NAME, null, null, true, true);
       } catch (Exception e) {
         log.warn(e, e);
       }
@@ -454,13 +456,13 @@ public class SimpleGarbageCollector impl
     }
     
     checkForBulkProcessingFiles = false;
-    Range range = Constants.METADATA_DELETES_FOR_METADATA_KEYSPACE;
-    candidates.addAll(getBatch(Constants.METADATA_DELETE_FLAG_FOR_METADATA_PREFIX, range));
+    Range range = RootTable.DELETES_KEYSPACE;
+    candidates.addAll(getBatch(RootTable.DELETE_FLAG_PREFIX, range));
     if (candidateMemExceeded)
       return candidates;
     
-    range = Constants.METADATA_DELETES_KEYSPACE;
-    candidates.addAll(getBatch(Constants.METADATA_DELETE_FLAG_PREFIX, range));
+    range = MetadataTable.DELETES_KEYSPACE;
+    candidates.addAll(getBatch(MetadataTable.DELETE_FLAG_PREFIX, range));
     return candidates;
   }
   
@@ -477,7 +479,7 @@ public class SimpleGarbageCollector impl
     }
     
     Scanner scanner = instance.getConnector(credentials.getPrincipal(), CredentialHelper.extractToken(credentials)).createScanner(
-        Constants.METADATA_TABLE_NAME, Authorizations.EMPTY);
+        MetadataTable.NAME, Authorizations.EMPTY);
     scanner.setRange(range);
     List<String> result = new ArrayList<String>();
     // find candidates for deletion; chop off the prefix
@@ -517,7 +519,7 @@ public class SimpleGarbageCollector impl
     } else {
       try {
         scanner = new IsolatedScanner(instance.getConnector(credentials.getPrincipal(), CredentialHelper.extractToken(credentials)).createScanner(
-            Constants.METADATA_TABLE_NAME, Authorizations.EMPTY));
+            MetadataTable.NAME, Authorizations.EMPTY));
       } catch (AccumuloSecurityException ex) {
         throw new AccumuloException(ex);
       } catch (TableNotFoundException ex) {
@@ -530,14 +532,14 @@ public class SimpleGarbageCollector impl
       
       log.debug("Checking for bulk processing flags");
       
-      scanner.setRange(Constants.METADATA_BLIP_KEYSPACE);
+      scanner.setRange(MetadataTable.BLIP_KEYSPACE);
       
       // WARNING: This block is IMPORTANT
       // You MUST REMOVE candidates that are in the same folder as a bulk
       // processing flag!
       
       for (Entry<Key,Value> entry : scanner) {
-        String blipPath = entry.getKey().getRow().toString().substring(Constants.METADATA_BLIP_FLAG_PREFIX.length());
+        String blipPath = entry.getKey().getRow().toString().substring(MetadataTable.BLIP_FLAG_PREFIX.length());
         Iterator<String> tailIter = candidates.tailSet(blipPath).iterator();
         int count = 0;
         while (tailIter.hasNext()) {
@@ -558,18 +560,18 @@ public class SimpleGarbageCollector impl
     // skip candidates that are still in use in the file column family in
     // the metadata table
     scanner.clearColumns();
-    scanner.fetchColumnFamily(Constants.METADATA_DATAFILE_COLUMN_FAMILY);
-    scanner.fetchColumnFamily(Constants.METADATA_SCANFILE_COLUMN_FAMILY);
-    Constants.METADATA_DIRECTORY_COLUMN.fetch(scanner);
+    scanner.fetchColumnFamily(MetadataTable.DATAFILE_COLUMN_FAMILY);
+    scanner.fetchColumnFamily(MetadataTable.SCANFILE_COLUMN_FAMILY);
+    MetadataTable.DIRECTORY_COLUMN.fetch(scanner);
     
-    TabletIterator tabletIterator = new TabletIterator(scanner, Constants.METADATA_KEYSPACE, false, true);
+    TabletIterator tabletIterator = new TabletIterator(scanner, MetadataTable.KEYSPACE, false, true);
     
     while (tabletIterator.hasNext()) {
       Map<Key,Value> tabletKeyValues = tabletIterator.next();
       
       for (Entry<Key,Value> entry : tabletKeyValues.entrySet()) {
-        if (entry.getKey().getColumnFamily().equals(Constants.METADATA_DATAFILE_COLUMN_FAMILY)
-            || entry.getKey().getColumnFamily().equals(Constants.METADATA_SCANFILE_COLUMN_FAMILY)) {
+        if (entry.getKey().getColumnFamily().equals(MetadataTable.DATAFILE_COLUMN_FAMILY)
+            || entry.getKey().getColumnFamily().equals(MetadataTable.SCANFILE_COLUMN_FAMILY)) {
           
           String cf = entry.getKey().getColumnQualifier().toString();
           String delete;
@@ -587,7 +589,7 @@ public class SimpleGarbageCollector impl
           String path = delete.substring(0, delete.lastIndexOf('/'));
           if (candidates.remove(path))
             log.debug("Candidate was still in use in the METADATA table: " + path);
-        } else if (Constants.METADATA_DIRECTORY_COLUMN.hasColumns(entry.getKey())) {
+        } else if (MetadataTable.DIRECTORY_COLUMN.hasColumns(entry.getKey())) {
           String table = new String(KeyExtent.tableOfMetadataRow(entry.getKey().getRow()));
           String delete = "/" + table + entry.getValue().toString();
           if (candidates.remove(delete))
@@ -598,15 +600,15 @@ public class SimpleGarbageCollector impl
     }
   }
   
-  final static String METADATA_TABLE_DIR = "/" + Constants.METADATA_TABLE_ID;
+  final static String METADATA_TABLE_DIR = "/" + MetadataTable.ID;
   
   private static void putMarkerDeleteMutation(final String delete, final BatchWriter writer, final BatchWriter rootWriter) throws MutationsRejectedException {
     if (delete.startsWith(METADATA_TABLE_DIR)) {
-      Mutation m = new Mutation(new Text(Constants.METADATA_DELETE_FLAG_FOR_METADATA_PREFIX + delete));
+      Mutation m = new Mutation(new Text(RootTable.DELETE_FLAG_PREFIX + delete));
       m.putDelete(EMPTY_TEXT, EMPTY_TEXT);
       rootWriter.addMutation(m);
     } else {
-      Mutation m = new Mutation(new Text(Constants.METADATA_DELETE_FLAG_PREFIX + delete));
+      Mutation m = new Mutation(new Text(MetadataTable.DELETE_FLAG_PREFIX + delete));
       m.putDelete(EMPTY_TEXT, EMPTY_TEXT);
       writer.addMutation(m);
     }
@@ -624,10 +626,10 @@ public class SimpleGarbageCollector impl
       Connector c;
       try {
         c = instance.getConnector(SecurityConstants.SYSTEM_PRINCIPAL, SecurityConstants.getSystemToken());
-        writer = c.createBatchWriter(Constants.METADATA_TABLE_NAME, new BatchWriterConfig());
-        rootWriter = c.createBatchWriter(Constants.METADATA_TABLE_NAME, new BatchWriterConfig());
+        writer = c.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
+        rootWriter = c.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
       } catch (Exception e) {
-        log.error("Unable to create writer to remove file from the " + Constants.METADATA_TABLE_NAME + " table", e);
+        log.error("Unable to create writer to remove file from the " + MetadataTable.NAME + " table", e);
       }
     }
     // when deleting a dir and all files in that dir, only need to delete the dir

Modified: accumulo/trunk/server/src/main/java/org/apache/accumulo/server/iterators/MetadataBulkLoadFilter.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/main/java/org/apache/accumulo/server/iterators/MetadataBulkLoadFilter.java?rev=1494358&r1=1494357&r2=1494358&view=diff
==============================================================================
--- accumulo/trunk/server/src/main/java/org/apache/accumulo/server/iterators/MetadataBulkLoadFilter.java (original)
+++ accumulo/trunk/server/src/main/java/org/apache/accumulo/server/iterators/MetadataBulkLoadFilter.java Tue Jun 18 22:50:53 2013
@@ -27,6 +27,7 @@ import org.apache.accumulo.core.iterator
 import org.apache.accumulo.core.iterators.IteratorEnvironment;
 import org.apache.accumulo.core.iterators.IteratorUtil.IteratorScope;
 import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
+import org.apache.accumulo.core.util.MetadataTable;
 import org.apache.accumulo.fate.zookeeper.TransactionWatcher.Arbitrator;
 import org.apache.accumulo.server.zookeeper.TransactionWatcher.ZooArbitrator;
 import org.apache.log4j.Logger;
@@ -47,7 +48,7 @@ public class MetadataBulkLoadFilter exte
   
   @Override
   public boolean accept(Key k, Value v) {
-    if (!k.isDeleted() && k.compareColumnFamily(Constants.METADATA_BULKFILE_COLUMN_FAMILY) == 0) {
+    if (!k.isDeleted() && k.compareColumnFamily(MetadataTable.BULKFILE_COLUMN_FAMILY) == 0) {
       long txid = Long.valueOf(v.toString());
       
       Status status = bulkTxStatusCache.get(txid);

Modified: accumulo/trunk/server/src/main/java/org/apache/accumulo/server/master/Master.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/main/java/org/apache/accumulo/server/master/Master.java?rev=1494358&r1=1494357&r2=1494358&view=diff
==============================================================================
--- accumulo/trunk/server/src/main/java/org/apache/accumulo/server/master/Master.java (original)
+++ accumulo/trunk/server/src/main/java/org/apache/accumulo/server/master/Master.java Tue Jun 18 22:50:53 2013
@@ -86,6 +86,7 @@ import org.apache.accumulo.core.tabletse
 import org.apache.accumulo.core.util.ByteBufferUtil;
 import org.apache.accumulo.core.util.CachedConfiguration;
 import org.apache.accumulo.core.util.Daemon;
+import org.apache.accumulo.core.util.RootTable;
 import org.apache.accumulo.core.util.UtilWaitThread;
 import org.apache.accumulo.core.zookeeper.ZooUtil;
 import org.apache.accumulo.fate.AgeOffStore;
@@ -182,7 +183,7 @@ public class Master implements LiveTServ
   final private static Logger log = Logger.getLogger(Master.class);
   
   final private static int ONE_SECOND = 1000;
-  final private static Text METADATA_TABLE_ID = new Text(Constants.METADATA_TABLE_ID);
+  final private static Text METADATA_TABLE_ID = new Text(MetadataTable.ID);
   final private static long TIME_TO_WAIT_BETWEEN_SCANS = 60 * ONE_SECOND;
   final private static long TIME_BETWEEN_MIGRATION_CLEANUPS = 5 * 60 * ONE_SECOND;
   final private static long WAIT_BETWEEN_ERRORS = ONE_SECOND;
@@ -346,7 +347,7 @@ public class Master implements LiveTServ
   }
   
   private int nonMetaDataTabletsAssignedOrHosted() {
-    return totalAssignedOrHosted() - assignedOrHosted(new Text(Constants.METADATA_TABLE_ID));
+    return totalAssignedOrHosted() - assignedOrHosted(new Text(MetadataTable.ID));
   }
   
   private int notHosted() {
@@ -362,7 +363,7 @@ public class Master implements LiveTServ
   // The number of unassigned tablets that should be assigned: displayed on the monitor page
   private int displayUnassigned() {
     int result = 0;
-    Text meta = new Text(Constants.METADATA_TABLE_ID);
+    Text meta = new Text(MetadataTable.ID);
     switch (getMasterState()) {
       case NORMAL:
         // Count offline tablets for online tables
@@ -397,8 +398,8 @@ public class Master implements LiveTServ
   }
   
   private void checkNotMetadataTable(String tableName, TableOperation operation) throws ThriftTableOperationException {
-    if (tableName.compareTo(Constants.METADATA_TABLE_NAME) == 0) {
-      String why = "Table names cannot be == " + Constants.METADATA_TABLE_NAME;
+    if (tableName.compareTo(MetadataTable.NAME) == 0) {
+      String why = "Table names cannot be == " + MetadataTable.NAME;
       log.warn(why);
       throw new ThriftTableOperationException(null, tableName, operation, TableOperationExceptionType.OTHER, why);
     }
@@ -544,11 +545,11 @@ public class Master implements LiveTServ
         
         try {
           Connector conn = getConnector();
-          Scanner scanner = new IsolatedScanner(conn.createScanner(Constants.METADATA_TABLE_NAME, Authorizations.EMPTY));
-          Constants.METADATA_FLUSH_COLUMN.fetch(scanner);
-          Constants.METADATA_DIRECTORY_COLUMN.fetch(scanner);
-          scanner.fetchColumnFamily(Constants.METADATA_CURRENT_LOCATION_COLUMN_FAMILY);
-          scanner.fetchColumnFamily(Constants.METADATA_LOG_COLUMN_FAMILY);
+          Scanner scanner = new IsolatedScanner(conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY));
+          MetadataTable.FLUSH_COLUMN.fetch(scanner);
+          MetadataTable.DIRECTORY_COLUMN.fetch(scanner);
+          scanner.fetchColumnFamily(MetadataTable.CURRENT_LOCATION_COLUMN_FAMILY);
+          scanner.fetchColumnFamily(MetadataTable.LOG_COLUMN_FAMILY);
           scanner.setRange(new KeyExtent(new Text(tableId), null, ByteBufferUtil.toText(startRow)).toMetadataRange());
           
           RowIterator ri = new RowIterator(scanner);
@@ -571,14 +572,14 @@ public class Master implements LiveTServ
               entry = row.next();
               Key key = entry.getKey();
               
-              if (Constants.METADATA_FLUSH_COLUMN.equals(key.getColumnFamily(), key.getColumnQualifier())) {
+              if (MetadataTable.FLUSH_COLUMN.equals(key.getColumnFamily(), key.getColumnQualifier())) {
                 tabletFlushID = Long.parseLong(entry.getValue().toString());
               }
               
-              if (Constants.METADATA_LOG_COLUMN_FAMILY.equals(key.getColumnFamily()))
+              if (MetadataTable.LOG_COLUMN_FAMILY.equals(key.getColumnFamily()))
                 logs++;
               
-              if (Constants.METADATA_CURRENT_LOCATION_COLUMN_FAMILY.equals(key.getColumnFamily())) {
+              if (MetadataTable.CURRENT_LOCATION_COLUMN_FAMILY.equals(key.getColumnFamily())) {
                 online = true;
                 server = new TServerInstance(entry.getValue(), key.getColumnQualifier());
               }
@@ -608,9 +609,9 @@ public class Master implements LiveTServ
             throw new ThriftTableOperationException(tableId, null, TableOperation.FLUSH, TableOperationExceptionType.NOTFOUND, null);
           
         } catch (AccumuloException e) {
-          log.debug("Failed to scan " + Constants.METADATA_TABLE_NAME + " table to wait for flush " + tableId, e);
+          log.debug("Failed to scan " + MetadataTable.NAME + " table to wait for flush " + tableId, e);
         } catch (TabletDeletedException tde) {
-          log.debug("Failed to scan " + Constants.METADATA_TABLE_NAME + " table to wait for flush " + tableId, tde);
+          log.debug("Failed to scan " + MetadataTable.NAME + " table to wait for flush " + tableId, tde);
         } catch (AccumuloSecurityException e) {
           log.warn(e.getMessage(), e);
           throw new ThriftSecurityException();
@@ -917,7 +918,7 @@ public class Master implements LiveTServ
           Text startRow = ByteBufferUtil.toText(arguments.get(1));
           Text endRow = ByteBufferUtil.toText(arguments.get(2));
           final String tableId = checkTableId(tableName, TableOperation.MERGE);
-          if (tableName.equals(Constants.METADATA_TABLE_NAME)) {
+          if (tableName.equals(MetadataTable.NAME)) {
             if (startRow.compareTo(new Text("0")) < 0) {
               startRow = new Text("0");
               if (endRow.getLength() != 0 && endRow.compareTo(startRow) < 0)
@@ -1568,26 +1569,26 @@ public class Master implements LiveTServ
         log.debug("Making file deletion entries for " + range);
         Range deleteRange = new Range(KeyExtent.getMetadataEntry(range.getTableId(), start), false, KeyExtent.getMetadataEntry(range.getTableId(),
             range.getEndRow()), true);
-        Scanner scanner = conn.createScanner(Constants.METADATA_TABLE_NAME, Authorizations.EMPTY);
+        Scanner scanner = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
         scanner.setRange(deleteRange);
-        Constants.METADATA_DIRECTORY_COLUMN.fetch(scanner);
-        Constants.METADATA_TIME_COLUMN.fetch(scanner);
-        scanner.fetchColumnFamily(Constants.METADATA_DATAFILE_COLUMN_FAMILY);
-        scanner.fetchColumnFamily(Constants.METADATA_CURRENT_LOCATION_COLUMN_FAMILY);
+        MetadataTable.DIRECTORY_COLUMN.fetch(scanner);
+        MetadataTable.TIME_COLUMN.fetch(scanner);
+        scanner.fetchColumnFamily(MetadataTable.DATAFILE_COLUMN_FAMILY);
+        scanner.fetchColumnFamily(MetadataTable.CURRENT_LOCATION_COLUMN_FAMILY);
         Set<String> datafiles = new TreeSet<String>();
         for (Entry<Key,Value> entry : scanner) {
           Key key = entry.getKey();
-          if (key.compareColumnFamily(Constants.METADATA_DATAFILE_COLUMN_FAMILY) == 0) {
+          if (key.compareColumnFamily(MetadataTable.DATAFILE_COLUMN_FAMILY) == 0) {
             datafiles.add(key.getColumnQualifier().toString());
             if (datafiles.size() > 1000) {
               MetadataTable.addDeleteEntries(range, datafiles, SecurityConstants.getSystemCredentials());
               datafiles.clear();
             }
-          } else if (Constants.METADATA_TIME_COLUMN.hasColumns(key)) {
+          } else if (MetadataTable.TIME_COLUMN.hasColumns(key)) {
             timeType = entry.getValue().toString().charAt(0);
-          } else if (key.compareColumnFamily(Constants.METADATA_CURRENT_LOCATION_COLUMN_FAMILY) == 0) {
+          } else if (key.compareColumnFamily(MetadataTable.CURRENT_LOCATION_COLUMN_FAMILY) == 0) {
             throw new IllegalStateException("Tablet " + key.getRow() + " is assigned during a merge!");
-          } else if (Constants.METADATA_DIRECTORY_COLUMN.hasColumns(key)) {
+          } else if (MetadataTable.DIRECTORY_COLUMN.hasColumns(key)) {
             datafiles.add(entry.getValue().toString());
             if (datafiles.size() > 1000) {
               MetadataTable.addDeleteEntries(range, datafiles, SecurityConstants.getSystemCredentials());
@@ -1596,7 +1597,7 @@ public class Master implements LiveTServ
           }
         }
         MetadataTable.addDeleteEntries(range, datafiles, SecurityConstants.getSystemCredentials());
-        BatchWriter bw = conn.createBatchWriter(Constants.METADATA_TABLE_NAME, new BatchWriterConfig());
+        BatchWriter bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
         try {
           deleteTablets(deleteRange, bw, conn);
         } finally {
@@ -1605,11 +1606,11 @@ public class Master implements LiveTServ
         
         if (followingTablet != null) {
           log.debug("Updating prevRow of " + followingTablet + " to " + range.getPrevEndRow());
-          bw = conn.createBatchWriter(Constants.METADATA_TABLE_NAME, new BatchWriterConfig());
+          bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
           try {
             Mutation m = new Mutation(followingTablet.getMetadataEntry());
-            Constants.METADATA_PREV_ROW_COLUMN.put(m, KeyExtent.encodePrevEndRow(range.getPrevEndRow()));
-            Constants.METADATA_CHOPPED_COLUMN.putDelete(m);
+            MetadataTable.PREV_ROW_COLUMN.put(m, KeyExtent.encodePrevEndRow(range.getPrevEndRow()));
+            MetadataTable.CHOPPED_COLUMN.putDelete(m);
             bw.addMutation(m);
             bw.flush();
           } finally {
@@ -1639,34 +1640,34 @@ public class Master implements LiveTServ
       }
       Range scanRange = new Range(KeyExtent.getMetadataEntry(range.getTableId(), start), false, stopRow, false);
       if (range.isMeta())
-        scanRange = scanRange.clip(Constants.METADATA_ROOT_TABLET_KEYSPACE);
+        scanRange = scanRange.clip(RootTable.KEYSPACE);
       
       BatchWriter bw = null;
       try {
         long fileCount = 0;
         Connector conn = getConnector();
         // Make file entries in highest tablet
-        bw = conn.createBatchWriter(Constants.METADATA_TABLE_NAME, new BatchWriterConfig());
-        Scanner scanner = conn.createScanner(Constants.METADATA_TABLE_NAME, Authorizations.EMPTY);
+        bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
+        Scanner scanner = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
         scanner.setRange(scanRange);
-        Constants.METADATA_PREV_ROW_COLUMN.fetch(scanner);
-        Constants.METADATA_TIME_COLUMN.fetch(scanner);
-        Constants.METADATA_DIRECTORY_COLUMN.fetch(scanner);
-        scanner.fetchColumnFamily(Constants.METADATA_DATAFILE_COLUMN_FAMILY);
+        MetadataTable.PREV_ROW_COLUMN.fetch(scanner);
+        MetadataTable.TIME_COLUMN.fetch(scanner);
+        MetadataTable.DIRECTORY_COLUMN.fetch(scanner);
+        scanner.fetchColumnFamily(MetadataTable.DATAFILE_COLUMN_FAMILY);
         Mutation m = new Mutation(stopRow);
         String maxLogicalTime = null;
         for (Entry<Key,Value> entry : scanner) {
           Key key = entry.getKey();
           Value value = entry.getValue();
-          if (key.getColumnFamily().equals(Constants.METADATA_DATAFILE_COLUMN_FAMILY)) {
+          if (key.getColumnFamily().equals(MetadataTable.DATAFILE_COLUMN_FAMILY)) {
             m.put(key.getColumnFamily(), key.getColumnQualifier(), value);
             fileCount++;
-          } else if (Constants.METADATA_PREV_ROW_COLUMN.hasColumns(key) && firstPrevRowValue == null) {
+          } else if (MetadataTable.PREV_ROW_COLUMN.hasColumns(key) && firstPrevRowValue == null) {
             log.debug("prevRow entry for lowest tablet is " + value);
             firstPrevRowValue = new Value(value);
-          } else if (Constants.METADATA_TIME_COLUMN.hasColumns(key)) {
+          } else if (MetadataTable.TIME_COLUMN.hasColumns(key)) {
             maxLogicalTime = TabletTime.maxMetadataTime(maxLogicalTime, value.toString());
-          } else if (Constants.METADATA_DIRECTORY_COLUMN.hasColumns(key)) {
+          } else if (MetadataTable.DIRECTORY_COLUMN.hasColumns(key)) {
             if (!range.isMeta())
               bw.addMutation(MetadataTable.createDeleteMutation(range.getTableId().toString(), entry.getValue().toString()));
           }
@@ -1674,20 +1675,20 @@ public class Master implements LiveTServ
         
         // read the logical time from the last tablet in the merge range, it is not included in
         // the loop above
-        scanner = conn.createScanner(Constants.METADATA_TABLE_NAME, Authorizations.EMPTY);
+        scanner = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
         Range last = new Range(stopRow);
         if (range.isMeta())
-          last = last.clip(Constants.METADATA_ROOT_TABLET_KEYSPACE);
+          last = last.clip(RootTable.KEYSPACE);
         scanner.setRange(last);
-        Constants.METADATA_TIME_COLUMN.fetch(scanner);
+        MetadataTable.TIME_COLUMN.fetch(scanner);
         for (Entry<Key,Value> entry : scanner) {
-          if (Constants.METADATA_TIME_COLUMN.hasColumns(entry.getKey())) {
+          if (MetadataTable.TIME_COLUMN.hasColumns(entry.getKey())) {
             maxLogicalTime = TabletTime.maxMetadataTime(maxLogicalTime, entry.getValue().toString());
           }
         }
         
         if (maxLogicalTime != null)
-          Constants.METADATA_TIME_COLUMN.put(m, new Value(maxLogicalTime.getBytes()));
+          MetadataTable.TIME_COLUMN.put(m, new Value(maxLogicalTime.getBytes()));
         
         if (!m.getUpdates().isEmpty()) {
           bw.addMutation(m);
@@ -1712,7 +1713,7 @@ public class Master implements LiveTServ
         
         // Clean-up the last chopped marker
         m = new Mutation(stopRow);
-        Constants.METADATA_CHOPPED_COLUMN.putDelete(m);
+        MetadataTable.CHOPPED_COLUMN.putDelete(m);
         bw.addMutation(m);
         bw.flush();
         
@@ -1735,7 +1736,7 @@ public class Master implements LiveTServ
       // group all deletes into tablet into one mutation, this makes tablets
       // either disappear entirely or not all.. this is important for the case
       // where the process terminates in the loop below...
-      scanner = conn.createScanner(Constants.METADATA_TABLE_NAME, Authorizations.EMPTY);
+      scanner = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
       log.debug("Deleting range " + scanRange);
       scanner.setRange(scanRange);
       RowIterator rowIter = new RowIterator(scanner);
@@ -1761,8 +1762,8 @@ public class Master implements LiveTServ
     private KeyExtent getHighTablet(KeyExtent range) throws AccumuloException {
       try {
         Connector conn = getConnector();
-        Scanner scanner = conn.createScanner(Constants.METADATA_TABLE_NAME, Authorizations.EMPTY);
-        Constants.METADATA_PREV_ROW_COLUMN.fetch(scanner);
+        Scanner scanner = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
+        MetadataTable.PREV_ROW_COLUMN.fetch(scanner);
         KeyExtent start = new KeyExtent(range.getTableId(), range.getEndRow(), null);
         scanner.setRange(new Range(start.getMetadataEntry(), null));
         Iterator<Entry<Key,Value>> iterator = scanner.iterator();
@@ -1846,8 +1847,8 @@ public class Master implements LiveTServ
     // remove any migrating tablets that no longer exist.
     private void cleanupMutations() throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
       Connector connector = getConnector();
-      Scanner scanner = connector.createScanner(Constants.METADATA_TABLE_NAME, Authorizations.EMPTY);
-      Constants.METADATA_PREV_ROW_COLUMN.fetch(scanner);
+      Scanner scanner = connector.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
+      MetadataTable.PREV_ROW_COLUMN.fetch(scanner);
       Set<KeyExtent> found = new HashSet<KeyExtent>();
       for (Entry<Key,Value> entry : scanner) {
         KeyExtent extent = new KeyExtent(entry.getKey().getRow(), entry.getValue());
@@ -2323,7 +2324,7 @@ public class Master implements LiveTServ
     Set<String> result = new HashSet<String>();
     if (getMasterState() != MasterState.NORMAL) {
       if (getMasterState() != MasterState.UNLOAD_METADATA_TABLETS)
-        result.add(Constants.METADATA_TABLE_ID);
+        result.add(MetadataTable.ID);
       return result;
     }
     TableManager manager = TableManager.getInstance();

Modified: accumulo/trunk/server/src/main/java/org/apache/accumulo/server/master/balancer/ChaoticLoadBalancer.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/main/java/org/apache/accumulo/server/master/balancer/ChaoticLoadBalancer.java?rev=1494358&r1=1494357&r2=1494358&view=diff
==============================================================================
--- accumulo/trunk/server/src/main/java/org/apache/accumulo/server/master/balancer/ChaoticLoadBalancer.java (original)
+++ accumulo/trunk/server/src/main/java/org/apache/accumulo/server/master/balancer/ChaoticLoadBalancer.java Tue Jun 18 22:50:53 2013
@@ -25,12 +25,12 @@ import java.util.Random;
 import java.util.Set;
 import java.util.SortedMap;
 
-import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.client.impl.thrift.ThriftSecurityException;
 import org.apache.accumulo.core.data.KeyExtent;
 import org.apache.accumulo.core.master.thrift.TableInfo;
 import org.apache.accumulo.core.master.thrift.TabletServerStatus;
 import org.apache.accumulo.core.tabletserver.thrift.TabletStats;
+import org.apache.accumulo.core.util.MetadataTable;
 import org.apache.accumulo.server.conf.ServerConfiguration;
 import org.apache.accumulo.server.master.state.TServerInstance;
 import org.apache.accumulo.server.master.state.TabletMigration;
@@ -103,7 +103,7 @@ public class ChaoticLoadBalancer extends
     
     for (Entry<TServerInstance,TabletServerStatus> e : current.entrySet()) {
       for (String table : e.getValue().getTableMap().keySet()) {
-        if (!moveMetadata && Constants.METADATA_TABLE_NAME.equals(table))
+        if (!moveMetadata && MetadataTable.NAME.equals(table))
           continue;
         try {
           for (TabletStats ts : getOnlineTabletsForTable(e.getKey(), table)) {

Modified: accumulo/trunk/server/src/main/java/org/apache/accumulo/server/master/state/MergeStats.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/main/java/org/apache/accumulo/server/master/state/MergeStats.java?rev=1494358&r1=1494357&r2=1494358&view=diff
==============================================================================
--- accumulo/trunk/server/src/main/java/org/apache/accumulo/server/master/state/MergeStats.java (original)
+++ accumulo/trunk/server/src/main/java/org/apache/accumulo/server/master/state/MergeStats.java Tue Jun 18 22:50:53 2013
@@ -30,6 +30,8 @@ import org.apache.accumulo.core.data.Par
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.util.MetadataTable;
+import org.apache.accumulo.core.util.RootTable;
 import org.apache.accumulo.core.zookeeper.ZooUtil;
 import org.apache.accumulo.server.cli.ClientOpts;
 import org.apache.accumulo.server.master.state.TabletLocationState.BadLocationStateException;
@@ -171,7 +173,7 @@ public class MergeStats {
   
   private boolean verifyMergeConsistency(Connector connector, CurrentState master) throws TableNotFoundException, IOException {
     MergeStats verify = new MergeStats(info);
-    Scanner scanner = connector.createScanner(Constants.METADATA_TABLE_NAME, Authorizations.EMPTY);
+    Scanner scanner = connector.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
     MetaDataTableScanner.configureScanner(scanner, master);
     KeyExtent extent = info.getRange();
     Text start = extent.getPrevEndRow();
@@ -183,7 +185,7 @@ public class MergeStats {
     Range range = new Range(first, false, null, true);
     if (extent.isMeta()) {
       // don't go off the root tablet
-      range = new Range(new Key(first).followingKey(PartialKey.ROW), false, Constants.METADATA_ROOT_TABLET_KEYSPACE.getEndKey(), false);
+      range = new Range(new Key(first).followingKey(PartialKey.ROW), false, RootTable.KEYSPACE.getEndKey(), false);
     }
     scanner.setRange(range);
     KeyExtent prevExtent = null;
@@ -220,7 +222,7 @@ public class MergeStats {
         }
         
       } else if (!tls.extent.isPreviousExtent(prevExtent)) {
-        log.debug("hole in " + Constants.METADATA_TABLE_NAME);
+        log.debug("hole in " + MetadataTable.NAME);
         return false;
       }
       

Modified: accumulo/trunk/server/src/main/java/org/apache/accumulo/server/master/state/MetaDataStateStore.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/main/java/org/apache/accumulo/server/master/state/MetaDataStateStore.java?rev=1494358&r1=1494357&r2=1494358&view=diff
==============================================================================
--- accumulo/trunk/server/src/main/java/org/apache/accumulo/server/master/state/MetaDataStateStore.java (original)
+++ accumulo/trunk/server/src/main/java/org/apache/accumulo/server/master/state/MetaDataStateStore.java Tue Jun 18 22:50:53 2013
@@ -20,7 +20,6 @@ import java.util.Collection;
 import java.util.Iterator;
 import java.util.concurrent.TimeUnit;
 
-import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.client.BatchWriter;
 import org.apache.accumulo.core.client.BatchWriterConfig;
 import org.apache.accumulo.core.client.Instance;
@@ -29,6 +28,7 @@ import org.apache.accumulo.core.client.T
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.security.CredentialHelper;
 import org.apache.accumulo.core.security.thrift.TCredentials;
+import org.apache.accumulo.core.util.MetadataTable;
 import org.apache.accumulo.server.client.HdfsZooInstance;
 import org.apache.accumulo.server.security.SecurityConstants;
 import org.apache.hadoop.io.Text;
@@ -56,7 +56,7 @@ public class MetaDataStateStore extends 
 
   @Override
   public Iterator<TabletLocationState> iterator() {
-    return new MetaDataTableScanner(instance, auths, Constants.NON_ROOT_METADATA_KEYSPACE, state);
+    return new MetaDataTableScanner(instance, auths, MetadataTable.NON_ROOT_KEYSPACE, state);
   }
   
   @Override
@@ -66,8 +66,8 @@ public class MetaDataStateStore extends 
       for (Assignment assignment : assignments) {
         Mutation m = new Mutation(assignment.tablet.getMetadataEntry());
         Text cq = assignment.server.asColumnQualifier();
-        m.put(Constants.METADATA_CURRENT_LOCATION_COLUMN_FAMILY, cq, assignment.server.asMutationValue());
-        m.putDelete(Constants.METADATA_FUTURE_LOCATION_COLUMN_FAMILY, cq);
+        m.put(MetadataTable.CURRENT_LOCATION_COLUMN_FAMILY, cq, assignment.server.asMutationValue());
+        m.putDelete(MetadataTable.FUTURE_LOCATION_COLUMN_FAMILY, cq);
         writer.addMutation(m);
       }
     } catch (Exception ex) {
@@ -83,7 +83,7 @@ public class MetaDataStateStore extends 
   
   BatchWriter createBatchWriter() {
     try {
-      return instance.getConnector(auths.getPrincipal(), CredentialHelper.extractToken(auths)).createBatchWriter(Constants.METADATA_TABLE_NAME,
+      return instance.getConnector(auths.getPrincipal(), CredentialHelper.extractToken(auths)).createBatchWriter(MetadataTable.NAME,
           new BatchWriterConfig().setMaxMemory(MAX_MEMORY).setMaxLatency(LATENCY, TimeUnit.MILLISECONDS).setMaxWriteThreads(THREADS));
     } catch (TableNotFoundException e) {
       // ya, I don't think so
@@ -99,7 +99,7 @@ public class MetaDataStateStore extends 
     try {
       for (Assignment assignment : assignments) {
         Mutation m = new Mutation(assignment.tablet.getMetadataEntry());
-        m.put(Constants.METADATA_FUTURE_LOCATION_COLUMN_FAMILY, assignment.server.asColumnQualifier(), assignment.server.asMutationValue());
+        m.put(MetadataTable.FUTURE_LOCATION_COLUMN_FAMILY, assignment.server.asColumnQualifier(), assignment.server.asMutationValue());
         writer.addMutation(m);
       }
     } catch (Exception ex) {
@@ -121,10 +121,10 @@ public class MetaDataStateStore extends 
       for (TabletLocationState tls : tablets) {
         Mutation m = new Mutation(tls.extent.getMetadataEntry());
         if (tls.current != null) {
-          m.putDelete(Constants.METADATA_CURRENT_LOCATION_COLUMN_FAMILY, tls.current.asColumnQualifier());
+          m.putDelete(MetadataTable.CURRENT_LOCATION_COLUMN_FAMILY, tls.current.asColumnQualifier());
         }
         if (tls.future != null) {
-          m.putDelete(Constants.METADATA_FUTURE_LOCATION_COLUMN_FAMILY, tls.future.asColumnQualifier());
+          m.putDelete(MetadataTable.FUTURE_LOCATION_COLUMN_FAMILY, tls.future.asColumnQualifier());
         }
         writer.addMutation(m);
       }

Modified: accumulo/trunk/server/src/main/java/org/apache/accumulo/server/master/state/MetaDataTableScanner.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/main/java/org/apache/accumulo/server/master/state/MetaDataTableScanner.java?rev=1494358&r1=1494357&r2=1494358&view=diff
==============================================================================
--- accumulo/trunk/server/src/main/java/org/apache/accumulo/server/master/state/MetaDataTableScanner.java (original)
+++ accumulo/trunk/server/src/main/java/org/apache/accumulo/server/master/state/MetaDataTableScanner.java Tue Jun 18 22:50:53 2013
@@ -26,7 +26,6 @@ import java.util.List;
 import java.util.Map.Entry;
 import java.util.SortedMap;
 
-import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.client.BatchScanner;
 import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.Instance;
@@ -40,6 +39,7 @@ import org.apache.accumulo.core.iterator
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.security.CredentialHelper;
 import org.apache.accumulo.core.security.thrift.TCredentials;
+import org.apache.accumulo.core.util.MetadataTable;
 import org.apache.accumulo.server.master.state.TabletLocationState.BadLocationStateException;
 import org.apache.hadoop.io.Text;
 import org.apache.log4j.Logger;
@@ -54,7 +54,7 @@ public class MetaDataTableScanner implem
     // scan over metadata table, looking for tablets in the wrong state based on the live servers and online tables
     try {
       Connector connector = instance.getConnector(auths.getPrincipal(), CredentialHelper.extractToken(auths));
-      mdScanner = connector.createBatchScanner(Constants.METADATA_TABLE_NAME, Authorizations.EMPTY, 8);
+      mdScanner = connector.createBatchScanner(MetadataTable.NAME, Authorizations.EMPTY, 8);
       configureScanner(mdScanner, state);
       mdScanner.setRanges(Collections.singletonList(range));
       iter = mdScanner.iterator();
@@ -65,11 +65,11 @@ public class MetaDataTableScanner implem
   }
   
   static public void configureScanner(ScannerBase scanner, CurrentState state) {
-    Constants.METADATA_PREV_ROW_COLUMN.fetch(scanner);
-    scanner.fetchColumnFamily(Constants.METADATA_CURRENT_LOCATION_COLUMN_FAMILY);
-    scanner.fetchColumnFamily(Constants.METADATA_FUTURE_LOCATION_COLUMN_FAMILY);
-    scanner.fetchColumnFamily(Constants.METADATA_LOG_COLUMN_FAMILY);
-    scanner.fetchColumnFamily(Constants.METADATA_CHOPPED_COLUMN_FAMILY);
+    MetadataTable.PREV_ROW_COLUMN.fetch(scanner);
+    scanner.fetchColumnFamily(MetadataTable.CURRENT_LOCATION_COLUMN_FAMILY);
+    scanner.fetchColumnFamily(MetadataTable.FUTURE_LOCATION_COLUMN_FAMILY);
+    scanner.fetchColumnFamily(MetadataTable.LOG_COLUMN_FAMILY);
+    scanner.fetchColumnFamily(MetadataTable.CHOPPED_COLUMN_FAMILY);
     scanner.addScanIterator(new IteratorSetting(1000, "wholeRows", WholeRowIterator.class));
     IteratorSetting tabletChange = new IteratorSetting(1001, "tabletChange", TabletStateChangeIterator.class);
     if (state != null) {
@@ -134,30 +134,30 @@ public class MetaDataTableScanner implem
       Text cf = key.getColumnFamily();
       Text cq = key.getColumnQualifier();
       
-      if (cf.compareTo(Constants.METADATA_FUTURE_LOCATION_COLUMN_FAMILY) == 0) {
+      if (cf.compareTo(MetadataTable.FUTURE_LOCATION_COLUMN_FAMILY) == 0) {
         TServerInstance location = new TServerInstance(entry.getValue(), cq);
         if (future != null) {
           throw new BadLocationStateException("found two assignments for the same extent " + key.getRow() + ": " + future + " and " + location);
         }
         future = location;
-      } else if (cf.compareTo(Constants.METADATA_CURRENT_LOCATION_COLUMN_FAMILY) == 0) {
+      } else if (cf.compareTo(MetadataTable.CURRENT_LOCATION_COLUMN_FAMILY) == 0) {
         TServerInstance location = new TServerInstance(entry.getValue(), cq);
         if (current != null) {
           throw new BadLocationStateException("found two locations for the same extent " + key.getRow() + ": " + current + " and " + location);
         }
         current = location;
-      } else if (cf.compareTo(Constants.METADATA_LOG_COLUMN_FAMILY) == 0) {
+      } else if (cf.compareTo(MetadataTable.LOG_COLUMN_FAMILY) == 0) {
         String[] split = entry.getValue().toString().split("\\|")[0].split(";");
         walogs.add(Arrays.asList(split));
-      } else if (cf.compareTo(Constants.METADATA_LAST_LOCATION_COLUMN_FAMILY) == 0) {
+      } else if (cf.compareTo(MetadataTable.LAST_LOCATION_COLUMN_FAMILY) == 0) {
         TServerInstance location = new TServerInstance(entry.getValue(), cq);
         if (last != null) {
           throw new BadLocationStateException("found two last locations for the same extent " + key.getRow() + ": " + last + " and " + location);
         }
         last = new TServerInstance(entry.getValue(), cq);
-      } else if (cf.compareTo(Constants.METADATA_CHOPPED_COLUMN_FAMILY) == 0) {
+      } else if (cf.compareTo(MetadataTable.CHOPPED_COLUMN_FAMILY) == 0) {
         chopped = true;
-      } else if (Constants.METADATA_PREV_ROW_COLUMN.equals(cf, cq)) {
+      } else if (MetadataTable.PREV_ROW_COLUMN.equals(cf, cq)) {
         extent = new KeyExtent(row, entry.getValue());
       }
     }

Modified: accumulo/trunk/server/src/main/java/org/apache/accumulo/server/master/state/RootTabletStateStore.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/main/java/org/apache/accumulo/server/master/state/RootTabletStateStore.java?rev=1494358&r1=1494357&r2=1494358&view=diff
==============================================================================
--- accumulo/trunk/server/src/main/java/org/apache/accumulo/server/master/state/RootTabletStateStore.java (original)
+++ accumulo/trunk/server/src/main/java/org/apache/accumulo/server/master/state/RootTabletStateStore.java Tue Jun 18 22:50:53 2013
@@ -18,9 +18,9 @@ package org.apache.accumulo.server.maste
 
 import java.util.Iterator;
 
-import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.security.thrift.TCredentials;
+import org.apache.accumulo.core.util.RootTable;
 
 public class RootTabletStateStore extends MetaDataStateStore {
   
@@ -30,7 +30,7 @@ public class RootTabletStateStore extend
   
   @Override
   public Iterator<TabletLocationState> iterator() {
-    return new MetaDataTableScanner(instance, auths, Constants.METADATA_ROOT_TABLET_KEYSPACE, state);
+    return new MetaDataTableScanner(instance, auths, RootTable.KEYSPACE, state);
   }
   
   @Override

Modified: accumulo/trunk/server/src/main/java/org/apache/accumulo/server/master/state/TServerInstance.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/main/java/org/apache/accumulo/server/master/state/TServerInstance.java?rev=1494358&r1=1494357&r2=1494358&view=diff
==============================================================================
--- accumulo/trunk/server/src/main/java/org/apache/accumulo/server/master/state/TServerInstance.java (original)
+++ accumulo/trunk/server/src/main/java/org/apache/accumulo/server/master/state/TServerInstance.java Tue Jun 18 22:50:53 2013
@@ -19,10 +19,10 @@ package org.apache.accumulo.server.maste
 import java.io.Serializable;
 import java.net.InetSocketAddress;
 
-import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.conf.Property;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.util.MetadataTable;
 import org.apache.accumulo.server.util.AddressUtil;
 import org.apache.hadoop.io.Text;
 
@@ -58,19 +58,19 @@ public class TServerInstance implements 
   }
   
   public void putLocation(Mutation m) {
-    m.put(Constants.METADATA_CURRENT_LOCATION_COLUMN_FAMILY, asColumnQualifier(), asMutationValue());
+    m.put(MetadataTable.CURRENT_LOCATION_COLUMN_FAMILY, asColumnQualifier(), asMutationValue());
   }
   
   public void putFutureLocation(Mutation m) {
-    m.put(Constants.METADATA_FUTURE_LOCATION_COLUMN_FAMILY, asColumnQualifier(), asMutationValue());
+    m.put(MetadataTable.FUTURE_LOCATION_COLUMN_FAMILY, asColumnQualifier(), asMutationValue());
   }
   
   public void putLastLocation(Mutation m) {
-    m.put(Constants.METADATA_LAST_LOCATION_COLUMN_FAMILY, asColumnQualifier(), asMutationValue());
+    m.put(MetadataTable.LAST_LOCATION_COLUMN_FAMILY, asColumnQualifier(), asMutationValue());
   }
   
   public void clearLastLocation(Mutation m) {
-    m.putDelete(Constants.METADATA_LAST_LOCATION_COLUMN_FAMILY, asColumnQualifier());
+    m.putDelete(MetadataTable.LAST_LOCATION_COLUMN_FAMILY, asColumnQualifier());
   }
   
   @Override

Modified: accumulo/trunk/server/src/main/java/org/apache/accumulo/server/master/state/ZooTabletStateStore.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/main/java/org/apache/accumulo/server/master/state/ZooTabletStateStore.java?rev=1494358&r1=1494357&r2=1494358&view=diff
==============================================================================
--- accumulo/trunk/server/src/main/java/org/apache/accumulo/server/master/state/ZooTabletStateStore.java (original)
+++ accumulo/trunk/server/src/main/java/org/apache/accumulo/server/master/state/ZooTabletStateStore.java Tue Jun 18 22:50:53 2013
@@ -23,8 +23,8 @@ import java.util.Collection;
 import java.util.Iterator;
 import java.util.List;
 
-import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.util.AddressUtil;
+import org.apache.accumulo.core.util.RootTable;
 import org.apache.accumulo.server.util.MetadataTable;
 import org.apache.commons.lang.NotImplementedException;
 import org.apache.log4j.Logger;
@@ -60,9 +60,9 @@ public class ZooTabletStateStore extends
       public TabletLocationState next() {
         finished = true;
         try {
-          byte[] future = store.get(Constants.ZROOT_TABLET_FUTURE_LOCATION);
-          byte[] current = store.get(Constants.ZROOT_TABLET_LOCATION);
-          byte[] last = store.get(Constants.ZROOT_TABLET_LAST_LOCATION);
+          byte[] future = store.get(RootTable.ZROOT_TABLET_FUTURE_LOCATION);
+          byte[] current = store.get(RootTable.ZROOT_TABLET_LOCATION);
+          byte[] last = store.get(RootTable.ZROOT_TABLET_LAST_LOCATION);
           
           TServerInstance currentSession = null;
           TServerInstance futureSession = null;
@@ -79,8 +79,8 @@ public class ZooTabletStateStore extends
             futureSession = null;
           }
           List<Collection<String>> logs = new ArrayList<Collection<String>>();
-          for (String entry : store.getChildren(Constants.ZROOT_TABLET_WALOGS)) {
-            byte[] logInfo = store.get(Constants.ZROOT_TABLET_WALOGS + "/" + entry);
+          for (String entry : store.getChildren(RootTable.ZROOT_TABLET_WALOGS)) {
+            byte[] logInfo = store.get(RootTable.ZROOT_TABLET_WALOGS + "/" + entry);
             if (logInfo != null) {
               MetadataTable.LogEntry logEntry = new MetadataTable.LogEntry();
               logEntry.fromBytes(logInfo);
@@ -88,7 +88,7 @@ public class ZooTabletStateStore extends
               log.debug("root tablet logSet " + logEntry.logSet);
             }
           }
-          TabletLocationState result = new TabletLocationState(Constants.ROOT_TABLET_EXTENT, futureSession, currentSession, lastSession, logs, false);
+          TabletLocationState result = new TabletLocationState(RootTable.ROOT_TABLET_EXTENT, futureSession, currentSession, lastSession, logs, false);
           log.debug("Returning root tablet state: " + result);
           return result;
         } catch (Exception ex) {
@@ -120,7 +120,7 @@ public class ZooTabletStateStore extends
     if (assignments.size() != 1)
       throw new IllegalArgumentException("There is only one root tablet");
     Assignment assignment = assignments.iterator().next();
-    if (assignment.tablet.compareTo(Constants.ROOT_TABLET_EXTENT) != 0)
+    if (assignment.tablet.compareTo(RootTable.ROOT_TABLET_EXTENT) != 0)
       throw new IllegalArgumentException("You can only store the root tablet location");
     String value = AddressUtil.toString(assignment.server.getLocation()) + "|" + assignment.server.getSession();
     Iterator<TabletLocationState> currentIter = iterator();
@@ -128,7 +128,7 @@ public class ZooTabletStateStore extends
     if (current.current != null) {
       throw new IllegalDSException("Trying to set the root tablet location: it is already set to " + current.current);
     }
-    store.put(Constants.ZROOT_TABLET_FUTURE_LOCATION, value.getBytes());
+    store.put(RootTable.ZROOT_TABLET_FUTURE_LOCATION, value.getBytes());
   }
   
   @Override
@@ -136,7 +136,7 @@ public class ZooTabletStateStore extends
     if (assignments.size() != 1)
       throw new IllegalArgumentException("There is only one root tablet");
     Assignment assignment = assignments.iterator().next();
-    if (assignment.tablet.compareTo(Constants.ROOT_TABLET_EXTENT) != 0)
+    if (assignment.tablet.compareTo(RootTable.ROOT_TABLET_EXTENT) != 0)
       throw new IllegalArgumentException("You can only store the root tablet location");
     String value = AddressUtil.toString(assignment.server.getLocation()) + "|" + assignment.server.getSession();
     Iterator<TabletLocationState> currentIter = iterator();
@@ -147,10 +147,10 @@ public class ZooTabletStateStore extends
     if (!current.future.equals(assignment.server)) {
       throw new IllegalDSException("Root tablet is already assigned to " + current.future);
     }
-    store.put(Constants.ZROOT_TABLET_LOCATION, value.getBytes());
-    store.put(Constants.ZROOT_TABLET_LAST_LOCATION, value.getBytes());
+    store.put(RootTable.ZROOT_TABLET_LOCATION, value.getBytes());
+    store.put(RootTable.ZROOT_TABLET_LAST_LOCATION, value.getBytes());
     // Make the following unnecessary by making the entire update atomic 
-    store.remove(Constants.ZROOT_TABLET_FUTURE_LOCATION);
+    store.remove(RootTable.ZROOT_TABLET_FUTURE_LOCATION);
     log.debug("Put down root tablet location");
   }
   
@@ -159,10 +159,10 @@ public class ZooTabletStateStore extends
     if (tablets.size() != 1)
       throw new IllegalArgumentException("There is only one root tablet");
     TabletLocationState tls = tablets.iterator().next();
-    if (tls.extent.compareTo(Constants.ROOT_TABLET_EXTENT) != 0)
+    if (tls.extent.compareTo(RootTable.ROOT_TABLET_EXTENT) != 0)
       throw new IllegalArgumentException("You can only store the root tablet location");
-    store.remove(Constants.ZROOT_TABLET_LOCATION);
-    store.remove(Constants.ZROOT_TABLET_FUTURE_LOCATION);
+    store.remove(RootTable.ZROOT_TABLET_LOCATION);
+    store.remove(RootTable.ZROOT_TABLET_FUTURE_LOCATION);
     log.debug("unassign root tablet location");
   }
   

Modified: accumulo/trunk/server/src/main/java/org/apache/accumulo/server/master/tableOps/BulkImport.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/main/java/org/apache/accumulo/server/master/tableOps/BulkImport.java?rev=1494358&r1=1494357&r2=1494358&view=diff
==============================================================================
--- accumulo/trunk/server/src/main/java/org/apache/accumulo/server/master/tableOps/BulkImport.java (original)
+++ accumulo/trunk/server/src/main/java/org/apache/accumulo/server/master/tableOps/BulkImport.java Tue Jun 18 22:50:53 2013
@@ -397,9 +397,9 @@ class CopyFailed extends MasterRepo {
     
     // determine which failed files were loaded
     Connector conn = master.getConnector();
-    Scanner mscanner = new IsolatedScanner(conn.createScanner(Constants.METADATA_TABLE_NAME, Authorizations.EMPTY));
+    Scanner mscanner = new IsolatedScanner(conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY));
     mscanner.setRange(new KeyExtent(new Text(tableId), null, null).toMetadataRange());
-    mscanner.fetchColumnFamily(Constants.METADATA_BULKFILE_COLUMN_FAMILY);
+    mscanner.fetchColumnFamily(MetadataTable.BULKFILE_COLUMN_FAMILY);
     
     for (Entry<Key,Value> entry : mscanner) {
       if (Long.parseLong(entry.getValue().toString()) == tid) {

Modified: accumulo/trunk/server/src/main/java/org/apache/accumulo/server/master/tableOps/CompactRange.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/main/java/org/apache/accumulo/server/master/tableOps/CompactRange.java?rev=1494358&r1=1494357&r2=1494358&view=diff
==============================================================================
--- accumulo/trunk/server/src/main/java/org/apache/accumulo/server/master/tableOps/CompactRange.java (original)
+++ accumulo/trunk/server/src/main/java/org/apache/accumulo/server/master/tableOps/CompactRange.java Tue Jun 18 22:50:53 2013
@@ -42,6 +42,8 @@ import org.apache.accumulo.core.data.Ran
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.master.state.tables.TableState;
 import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.util.MetadataTable;
+import org.apache.accumulo.core.util.RootTable;
 import org.apache.accumulo.fate.Repo;
 import org.apache.accumulo.fate.zookeeper.IZooReaderWriter;
 import org.apache.accumulo.fate.zookeeper.ZooReaderWriter.Mutator;
@@ -91,17 +93,17 @@ class CompactionDriver extends MasterRep
     
     MapCounter<TServerInstance> serversToFlush = new MapCounter<TServerInstance>();
     Connector conn = master.getConnector();
-    Scanner scanner = new IsolatedScanner(conn.createScanner(Constants.METADATA_TABLE_NAME, Authorizations.EMPTY));
+    Scanner scanner = new IsolatedScanner(conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY));
     
     Range range = new KeyExtent(new Text(tableId), null, startRow == null ? null : new Text(startRow)).toMetadataRange();
     
-    if (tableId.equals(Constants.METADATA_TABLE_ID))
-      range = range.clip(new Range(Constants.ROOT_TABLET_EXTENT.getMetadataEntry(), false, null, true));
+    if (tableId.equals(MetadataTable.ID))
+      range = range.clip(new Range(RootTable.ROOT_TABLET_EXTENT.getMetadataEntry(), false, null, true));
     
     scanner.setRange(range);
-    Constants.METADATA_COMPACT_COLUMN.fetch(scanner);
-    Constants.METADATA_DIRECTORY_COLUMN.fetch(scanner);
-    scanner.fetchColumnFamily(Constants.METADATA_CURRENT_LOCATION_COLUMN_FAMILY);
+    MetadataTable.COMPACT_COLUMN.fetch(scanner);
+    MetadataTable.DIRECTORY_COLUMN.fetch(scanner);
+    scanner.fetchColumnFamily(MetadataTable.CURRENT_LOCATION_COLUMN_FAMILY);
     
     long t1 = System.currentTimeMillis();
     RowIterator ri = new RowIterator(scanner);
@@ -120,10 +122,10 @@ class CompactionDriver extends MasterRep
         entry = row.next();
         Key key = entry.getKey();
         
-        if (Constants.METADATA_COMPACT_COLUMN.equals(key.getColumnFamily(), key.getColumnQualifier()))
+        if (MetadataTable.COMPACT_COLUMN.equals(key.getColumnFamily(), key.getColumnQualifier()))
           tabletCompactID = Long.parseLong(entry.getValue().toString());
         
-        if (Constants.METADATA_CURRENT_LOCATION_COLUMN_FAMILY.equals(key.getColumnFamily()))
+        if (MetadataTable.CURRENT_LOCATION_COLUMN_FAMILY.equals(key.getColumnFamily()))
           server = new TServerInstance(entry.getValue(), key.getColumnQualifier());
       }
       

Modified: accumulo/trunk/server/src/main/java/org/apache/accumulo/server/master/tableOps/DeleteTable.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/main/java/org/apache/accumulo/server/master/tableOps/DeleteTable.java?rev=1494358&r1=1494357&r2=1494358&view=diff
==============================================================================
--- accumulo/trunk/server/src/main/java/org/apache/accumulo/server/master/tableOps/DeleteTable.java (original)
+++ accumulo/trunk/server/src/main/java/org/apache/accumulo/server/master/tableOps/DeleteTable.java Tue Jun 18 22:50:53 2013
@@ -20,7 +20,6 @@ import java.io.IOException;
 import java.util.Collections;
 import java.util.Map.Entry;
 
-import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.client.BatchScanner;
 import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.IteratorSetting;
@@ -89,7 +88,7 @@ class CleanUp extends MasterRepo {
     
     boolean done = true;
     Range tableRange = new KeyExtent(new Text(tableId), null, null).toMetadataRange();
-    Scanner scanner = master.getConnector().createScanner(Constants.METADATA_TABLE_NAME, Authorizations.EMPTY);
+    Scanner scanner = master.getConnector().createScanner(MetadataTable.NAME, Authorizations.EMPTY);
     MetaDataTableScanner.configureScanner(scanner, master);
     scanner.setRange(tableRange);
     
@@ -127,10 +126,10 @@ class CleanUp extends MasterRepo {
     try {
       // look for other tables that references this tables files
       Connector conn = master.getConnector();
-      BatchScanner bs = conn.createBatchScanner(Constants.METADATA_TABLE_NAME, Authorizations.EMPTY, 8);
+      BatchScanner bs = conn.createBatchScanner(MetadataTable.NAME, Authorizations.EMPTY, 8);
       try {
-        bs.setRanges(Collections.singleton(Constants.NON_ROOT_METADATA_KEYSPACE));
-        bs.fetchColumnFamily(Constants.METADATA_DATAFILE_COLUMN_FAMILY);
+        bs.setRanges(Collections.singleton(MetadataTable.NON_ROOT_KEYSPACE));
+        bs.fetchColumnFamily(MetadataTable.DATAFILE_COLUMN_FAMILY);
         IteratorSetting cfg = new IteratorSetting(40, "grep", GrepIterator.class);
         GrepIterator.setTerm(cfg, "../" + tableId + "/");
         bs.addScanIterator(cfg);
@@ -146,7 +145,7 @@ class CleanUp extends MasterRepo {
       
     } catch (Exception e) {
       refCount = -1;
-      log.error("Failed to scan " + Constants.METADATA_TABLE_NAME + " looking for references to deleted table " + tableId, e);
+      log.error("Failed to scan " + MetadataTable.NAME + " looking for references to deleted table " + tableId, e);
     }
     
     // remove metadata table entries

Modified: accumulo/trunk/server/src/main/java/org/apache/accumulo/server/master/tableOps/ExportTable.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/main/java/org/apache/accumulo/server/master/tableOps/ExportTable.java?rev=1494358&r1=1494357&r2=1494358&view=diff
==============================================================================
--- accumulo/trunk/server/src/main/java/org/apache/accumulo/server/master/tableOps/ExportTable.java (original)
+++ accumulo/trunk/server/src/main/java/org/apache/accumulo/server/master/tableOps/ExportTable.java Tue Jun 18 22:50:53 2013
@@ -47,6 +47,7 @@ import org.apache.accumulo.core.data.Key
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.master.state.tables.TableState;
 import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.util.MetadataTable;
 import org.apache.accumulo.fate.Repo;
 import org.apache.accumulo.server.ServerConstants;
 import org.apache.accumulo.server.conf.ServerConfiguration;
@@ -96,12 +97,12 @@ class WriteExportFiles extends MasterRep
     
     checkOffline(conn);
     
-    Scanner metaScanner = conn.createScanner(Constants.METADATA_TABLE_NAME, Authorizations.EMPTY);
+    Scanner metaScanner = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
     metaScanner.setRange(new KeyExtent(new Text(tableInfo.tableID), null, null).toMetadataRange());
     
     // scan for locations
-    metaScanner.fetchColumnFamily(Constants.METADATA_CURRENT_LOCATION_COLUMN_FAMILY);
-    metaScanner.fetchColumnFamily(Constants.METADATA_FUTURE_LOCATION_COLUMN_FAMILY);
+    metaScanner.fetchColumnFamily(MetadataTable.CURRENT_LOCATION_COLUMN_FAMILY);
+    metaScanner.fetchColumnFamily(MetadataTable.FUTURE_LOCATION_COLUMN_FAMILY);
     
     if (metaScanner.iterator().hasNext()) {
       return 500;
@@ -110,7 +111,7 @@ class WriteExportFiles extends MasterRep
     // use the same range to check for walogs that we used to check for hosted (or future hosted) tablets
     // this is done as a separate scan after we check for locations, because walogs are okay only if there is no location
     metaScanner.clearColumns();
-    metaScanner.fetchColumnFamily(Constants.METADATA_LOG_COLUMN_FAMILY);
+    metaScanner.fetchColumnFamily(MetadataTable.LOG_COLUMN_FAMILY);
     
     if (metaScanner.iterator().hasNext()) {
       throw new ThriftTableOperationException(tableInfo.tableID, tableInfo.tableName, TableOperation.EXPORT, TableOperationExceptionType.OTHER,
@@ -218,17 +219,17 @@ class WriteExportFiles extends MasterRep
     
     Map<String,String> uniqueFiles = new HashMap<String,String>();
     
-    Scanner metaScanner = conn.createScanner(Constants.METADATA_TABLE_NAME, Authorizations.EMPTY);
-    metaScanner.fetchColumnFamily(Constants.METADATA_DATAFILE_COLUMN_FAMILY);
-    Constants.METADATA_PREV_ROW_COLUMN.fetch(metaScanner);
-    Constants.METADATA_TIME_COLUMN.fetch(metaScanner);
+    Scanner metaScanner = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
+    metaScanner.fetchColumnFamily(MetadataTable.DATAFILE_COLUMN_FAMILY);
+    MetadataTable.PREV_ROW_COLUMN.fetch(metaScanner);
+    MetadataTable.TIME_COLUMN.fetch(metaScanner);
     metaScanner.setRange(new KeyExtent(new Text(tableID), null, null).toMetadataRange());
     
     for (Entry<Key,Value> entry : metaScanner) {
       entry.getKey().write(dataOut);
       entry.getValue().write(dataOut);
       
-      if (entry.getKey().getColumnFamily().equals(Constants.METADATA_DATAFILE_COLUMN_FAMILY)) {
+      if (entry.getKey().getColumnFamily().equals(MetadataTable.DATAFILE_COLUMN_FAMILY)) {
         String relPath = entry.getKey().getColumnQualifierData().toString();
         
         if (relPath.startsWith("../"))

Modified: accumulo/trunk/server/src/main/java/org/apache/accumulo/server/master/tableOps/ImportTable.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/main/java/org/apache/accumulo/server/master/tableOps/ImportTable.java?rev=1494358&r1=1494357&r2=1494358&view=diff
==============================================================================
--- accumulo/trunk/server/src/main/java/org/apache/accumulo/server/master/tableOps/ImportTable.java (original)
+++ accumulo/trunk/server/src/main/java/org/apache/accumulo/server/master/tableOps/ImportTable.java Tue Jun 18 22:50:53 2013
@@ -205,7 +205,7 @@ class PopulateMetadataTable extends Mast
     try {
       FileSystem fs = master.getFileSystem();
       
-      mbw = master.getConnector().createBatchWriter(Constants.METADATA_TABLE_NAME, new BatchWriterConfig());
+      mbw = master.getConnector().createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
       
       zis = new ZipInputStream(fs.open(path));
       
@@ -234,7 +234,7 @@ class PopulateMetadataTable extends Mast
             
             Text cq;
             
-            if (key.getColumnFamily().equals(Constants.METADATA_DATAFILE_COLUMN_FAMILY)) {
+            if (key.getColumnFamily().equals(MetadataTable.DATAFILE_COLUMN_FAMILY)) {
               String oldName = new Path(key.getColumnQualifier().toString()).getName();
               String newName = fileNameMappings.get(oldName);
               
@@ -245,19 +245,19 @@ class PopulateMetadataTable extends Mast
             
             if (m == null) {
               m = new Mutation(metadataRow);
-              Constants.METADATA_DIRECTORY_COLUMN.put(m, new Value(FastFormat.toZeroPaddedString(dirCount++, 8, 16, "/c-".getBytes())));
+              MetadataTable.DIRECTORY_COLUMN.put(m, new Value(FastFormat.toZeroPaddedString(dirCount++, 8, 16, "/c-".getBytes())));
               currentRow = metadataRow;
             }
             
             if (!currentRow.equals(metadataRow)) {
               mbw.addMutation(m);
               m = new Mutation(metadataRow);
-              Constants.METADATA_DIRECTORY_COLUMN.put(m, new Value(FastFormat.toZeroPaddedString(dirCount++, 8, 16, "/c-".getBytes())));
+              MetadataTable.DIRECTORY_COLUMN.put(m, new Value(FastFormat.toZeroPaddedString(dirCount++, 8, 16, "/c-".getBytes())));
             }
             
             m.put(key.getColumnFamily(), cq, val);
             
-            if (endRow == null && Constants.METADATA_PREV_ROW_COLUMN.hasColumns(key)) {
+            if (endRow == null && MetadataTable.PREV_ROW_COLUMN.hasColumns(key)) {
               mbw.addMutation(m);
               break; // its the last column in the last row
             }

Modified: accumulo/trunk/server/src/main/java/org/apache/accumulo/server/master/tableOps/TableRangeOp.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/main/java/org/apache/accumulo/server/master/tableOps/TableRangeOp.java?rev=1494358&r1=1494357&r2=1494358&view=diff
==============================================================================
--- accumulo/trunk/server/src/main/java/org/apache/accumulo/server/master/tableOps/TableRangeOp.java (original)
+++ accumulo/trunk/server/src/main/java/org/apache/accumulo/server/master/tableOps/TableRangeOp.java Tue Jun 18 22:50:53 2013
@@ -58,12 +58,12 @@ class MakeDeleteEntries extends MasterRe
   public Repo<Master> call(long tid, Master master) throws Exception {
     log.info("creating delete entries for merged metadata tablets");
     Connector conn = master.getConnector();
-    BatchWriter bw = conn.createBatchWriter(Constants.METADATA_TABLE_NAME, new BatchWriterConfig());
+    BatchWriter bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
     String tableDir = ServerConstants.getMetadataTableDir();
     for (FileStatus fs : master.getFileSystem().listStatus(new Path(tableDir))) {
       // TODO: add the entries only if there are no !METADATA table references - ACCUMULO-1308
       if (fs.isDir() && fs.getPath().getName().matches("^" + Constants.GENERATED_TABLET_DIRECTORY_PREFIX + ".*")) {
-        bw.addMutation(MetadataTable.createDeleteMutation(Constants.METADATA_TABLE_ID, "/" + fs.getPath().getName()));
+        bw.addMutation(MetadataTable.createDeleteMutation(MetadataTable.ID, "/" + fs.getPath().getName()));
       }
     }
     bw.close();
@@ -100,7 +100,7 @@ class TableRangeOpWait extends MasterRep
     // If the delete entries for the metadata table were in the root tablet, it would work just fine
     // but all the delete entries go into the end of the metadata table. Work around: add the
     // delete entries after the merge completes.
-    if (mergeInfo.getOperation().equals(Operation.MERGE) && tableId.equals(Constants.METADATA_TABLE_ID)) {
+    if (mergeInfo.getOperation().equals(Operation.MERGE) && tableId.equals(MetadataTable.ID)) {
       return new MakeDeleteEntries();
     }
     return null;

Modified: accumulo/trunk/server/src/main/java/org/apache/accumulo/server/metanalysis/FilterMeta.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/main/java/org/apache/accumulo/server/metanalysis/FilterMeta.java?rev=1494358&r1=1494357&r2=1494358&view=diff
==============================================================================
--- accumulo/trunk/server/src/main/java/org/apache/accumulo/server/metanalysis/FilterMeta.java (original)
+++ accumulo/trunk/server/src/main/java/org/apache/accumulo/server/metanalysis/FilterMeta.java Tue Jun 18 22:50:53 2013
@@ -20,8 +20,8 @@ import java.io.IOException;
 import java.util.HashSet;
 import java.util.Set;
 
-import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.util.CachedConfiguration;
+import org.apache.accumulo.core.util.MetadataTable;
 import org.apache.accumulo.server.logger.LogEvents;
 import org.apache.accumulo.server.logger.LogFileKey;
 import org.apache.accumulo.server.logger.LogFileValue;
@@ -49,7 +49,7 @@ public class FilterMeta extends Configur
     public void map(LogFileKey key, LogFileValue value, Context context) throws IOException, InterruptedException {
       if (key.event == LogEvents.OPEN) {
         context.write(key, value);
-      } else if (key.event == LogEvents.DEFINE_TABLET && key.tablet.getTableId().toString().equals(Constants.METADATA_TABLE_ID)) {
+      } else if (key.event == LogEvents.DEFINE_TABLET && key.tablet.getTableId().toString().equals(MetadataTable.ID)) {
         tabletIds.add(key.tid);
         context.write(key, value);
       } else if ((key.event == LogEvents.MUTATION || key.event == LogEvents.MANY_MUTATIONS) && tabletIds.contains(key.tid)) {

Modified: accumulo/trunk/server/src/main/java/org/apache/accumulo/server/metanalysis/IndexMeta.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/main/java/org/apache/accumulo/server/metanalysis/IndexMeta.java?rev=1494358&r1=1494357&r2=1494358&view=diff
==============================================================================
--- accumulo/trunk/server/src/main/java/org/apache/accumulo/server/metanalysis/IndexMeta.java (original)
+++ accumulo/trunk/server/src/main/java/org/apache/accumulo/server/metanalysis/IndexMeta.java Tue Jun 18 22:50:53 2013
@@ -23,7 +23,6 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
-import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.TableExistsException;
 import org.apache.accumulo.core.client.mapreduce.AccumuloOutputFormat;
@@ -32,6 +31,7 @@ import org.apache.accumulo.core.data.Key
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.util.CachedConfiguration;
+import org.apache.accumulo.core.util.MetadataTable;
 import org.apache.accumulo.server.cli.ClientOpts;
 import org.apache.accumulo.server.logger.LogEvents;
 import org.apache.accumulo.server.logger.LogFileKey;
@@ -72,7 +72,7 @@ public class IndexMeta extends Configure
       if (key.event == LogEvents.OPEN) {
         uuid = key.tserverSession;
       } else if (key.event == LogEvents.DEFINE_TABLET) {
-        if (key.tablet.getTableId().toString().equals(Constants.METADATA_TABLE_ID)) {
+        if (key.tablet.getTableId().toString().equals(MetadataTable.ID)) {
           tabletIds.put(key.tid, new KeyExtent(key.tablet));
         }
       } else if ((key.event == LogEvents.MUTATION || key.event == LogEvents.MANY_MUTATIONS) && tabletIds.containsKey(key.tid)) {
@@ -93,7 +93,7 @@ public class IndexMeta extends Configure
       }
       
       for (ColumnUpdate cu : columnsUpdates) {
-        if (Constants.METADATA_PREV_ROW_COLUMN.equals(new Text(cu.getColumnFamily()), new Text(cu.getColumnQualifier())) && !cu.isDeleted()) {
+        if (MetadataTable.PREV_ROW_COLUMN.equals(new Text(cu.getColumnFamily()), new Text(cu.getColumnQualifier())) && !cu.isDeleted()) {
           prevRow = new Text(cu.getValue());
         }
         

Modified: accumulo/trunk/server/src/main/java/org/apache/accumulo/server/metanalysis/PrintEvents.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/main/java/org/apache/accumulo/server/metanalysis/PrintEvents.java?rev=1494358&r1=1494357&r2=1494358&view=diff
==============================================================================
--- accumulo/trunk/server/src/main/java/org/apache/accumulo/server/metanalysis/PrintEvents.java (original)
+++ accumulo/trunk/server/src/main/java/org/apache/accumulo/server/metanalysis/PrintEvents.java Tue Jun 18 22:50:53 2013
@@ -22,7 +22,6 @@ import java.util.Collections;
 import java.util.List;
 import java.util.Map.Entry;
 
-import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.server.cli.ClientOpts;
 import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.Scanner;
@@ -33,6 +32,7 @@ import org.apache.accumulo.core.data.Par
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.util.MetadataTable;
 import org.apache.accumulo.server.logger.LogFileValue;
 import org.apache.hadoop.io.Text;
 
@@ -96,7 +96,7 @@ public class PrintEvents {
         
         List<ColumnUpdate> columnsUpdates = m.getUpdates();
         for (ColumnUpdate cu : columnsUpdates) {
-          if (Constants.METADATA_PREV_ROW_COLUMN.equals(new Text(cu.getColumnFamily()), new Text(cu.getColumnQualifier())) && count > 0) {
+          if (MetadataTable.PREV_ROW_COLUMN.equals(new Text(cu.getColumnFamily()), new Text(cu.getColumnQualifier())) && count > 0) {
             System.out.println("Saw change to prevrow, stopping printing events.");
             break loop1;
           }

Modified: accumulo/trunk/server/src/main/java/org/apache/accumulo/server/problems/ProblemReports.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/main/java/org/apache/accumulo/server/problems/ProblemReports.java?rev=1494358&r1=1494357&r2=1494358&view=diff
==============================================================================
--- accumulo/trunk/server/src/main/java/org/apache/accumulo/server/problems/ProblemReports.java (original)
+++ accumulo/trunk/server/src/main/java/org/apache/accumulo/server/problems/ProblemReports.java Tue Jun 18 22:50:53 2013
@@ -85,7 +85,7 @@ public class ProblemReports implements I
         log.debug("Filing problem report " + pr.getTableName() + " " + pr.getProblemType() + " " + pr.getResource());
         
         try {
-          if (pr.getTableName().equals(Constants.METADATA_TABLE_ID)) {
+          if (pr.getTableName().equals(MetadataTable.ID)) {
             // file report in zookeeper
             pr.saveToZooKeeper();
           } else {
@@ -121,7 +121,7 @@ public class ProblemReports implements I
       @Override
       public void run() {
         try {
-          if (pr.getTableName().equals(Constants.METADATA_TABLE_ID)) {
+          if (pr.getTableName().equals(MetadataTable.ID)) {
             // file report in zookeeper
             pr.removeFromZooKeeper();
           } else {
@@ -145,7 +145,7 @@ public class ProblemReports implements I
   
   public void deleteProblemReports(String table) throws Exception {
     
-    if (Constants.METADATA_TABLE_ID.equals(table)) {
+    if (MetadataTable.ID.equals(table)) {
       Iterator<ProblemReport> pri = iterator(table);
       while (pri.hasNext()) {
         pri.next().removeFromZooKeeper();
@@ -154,7 +154,7 @@ public class ProblemReports implements I
     }
     
     Connector connector = HdfsZooInstance.getInstance().getConnector(SecurityConstants.getSystemPrincipal(), SecurityConstants.getSystemToken());
-    Scanner scanner = connector.createScanner(Constants.METADATA_TABLE_NAME, Authorizations.EMPTY);
+    Scanner scanner = connector.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
     scanner.addScanIterator(new IteratorSetting(1, "keys-only", SortedKeyIterator.class));
     
     if (table == null) {
@@ -188,7 +188,7 @@ public class ProblemReports implements I
           if (iter1 == null) {
             try {
               List<String> children;
-              if (table == null || table.equals(Constants.METADATA_TABLE_ID)) {
+              if (table == null || table.equals(MetadataTable.ID)) {
                 children = zoo.getChildren(ZooUtil.getRoot(HdfsZooInstance.getInstance()) + Constants.ZPROBLEMS);
               } else {
                 children = Collections.emptyList();
@@ -207,9 +207,9 @@ public class ProblemReports implements I
         private Iterator<Entry<Key,Value>> getIter2() {
           if (iter2 == null) {
             try {
-              if ((table == null || !table.equals(Constants.METADATA_TABLE_ID)) && iter1Count == 0) {
+              if ((table == null || !table.equals(MetadataTable.ID)) && iter1Count == 0) {
                 Connector connector = HdfsZooInstance.getInstance().getConnector(SecurityConstants.getSystemPrincipal(), SecurityConstants.getSystemToken());
-                Scanner scanner = connector.createScanner(Constants.METADATA_TABLE_NAME, Authorizations.EMPTY);
+                Scanner scanner = connector.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
                 
                 scanner.setTimeout(3, TimeUnit.SECONDS);
                 

Modified: accumulo/trunk/server/src/main/java/org/apache/accumulo/server/security/AuditedSecurityOperation.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/main/java/org/apache/accumulo/server/security/AuditedSecurityOperation.java?rev=1494358&r1=1494357&r2=1494358&view=diff
==============================================================================
--- accumulo/trunk/server/src/main/java/org/apache/accumulo/server/security/AuditedSecurityOperation.java (original)
+++ accumulo/trunk/server/src/main/java/org/apache/accumulo/server/security/AuditedSecurityOperation.java Tue Jun 18 22:50:53 2013
@@ -20,7 +20,6 @@ import java.nio.ByteBuffer;
 import java.util.List;
 import java.util.Map;
 
-import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.client.TableNotFoundException;
 import org.apache.accumulo.core.client.impl.Tables;
 import org.apache.accumulo.core.client.impl.Translator;
@@ -38,6 +37,7 @@ import org.apache.accumulo.core.security
 import org.apache.accumulo.core.security.TablePermission;
 import org.apache.accumulo.core.security.thrift.TCredentials;
 import org.apache.accumulo.core.util.ByteBufferUtil;
+import org.apache.accumulo.core.util.MetadataTable;
 import org.apache.accumulo.server.client.HdfsZooInstance;
 import org.apache.accumulo.server.security.handler.Authenticator;
 import org.apache.accumulo.server.security.handler.Authorizor;
@@ -87,7 +87,7 @@ public class AuditedSecurityOperation ex
   }
   
   private static boolean shouldAudit(TCredentials credentials, String tableId) {
-    return !tableId.equals(Constants.METADATA_TABLE_ID) && shouldAudit(credentials);
+    return !tableId.equals(MetadataTable.ID) && shouldAudit(credentials);
   }
   
   // Is INFO the right level to check? Do we even need that check?

Modified: accumulo/trunk/server/src/main/java/org/apache/accumulo/server/security/SecurityOperation.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/main/java/org/apache/accumulo/server/security/SecurityOperation.java?rev=1494358&r1=1494357&r2=1494358&view=diff
==============================================================================
--- accumulo/trunk/server/src/main/java/org/apache/accumulo/server/security/SecurityOperation.java (original)
+++ accumulo/trunk/server/src/main/java/org/apache/accumulo/server/security/SecurityOperation.java Tue Jun 18 22:50:53 2013
@@ -39,6 +39,7 @@ import org.apache.accumulo.core.security
 import org.apache.accumulo.core.security.SystemPermission;
 import org.apache.accumulo.core.security.TablePermission;
 import org.apache.accumulo.core.security.thrift.TCredentials;
+import org.apache.accumulo.core.util.MetadataTable;
 import org.apache.accumulo.server.client.HdfsZooInstance;
 import org.apache.accumulo.server.conf.ServerConfiguration;
 import org.apache.accumulo.server.master.Master;
@@ -133,7 +134,7 @@ public class SecurityOperation {
     authorizor.initializeSecurity(credentials, rootPrincipal);
     permHandle.initializeSecurity(credentials, rootPrincipal);
     try {
-      permHandle.grantTablePermission(rootPrincipal, Constants.METADATA_TABLE_ID, TablePermission.ALTER_TABLE);
+      permHandle.grantTablePermission(rootPrincipal, MetadataTable.ID, TablePermission.ALTER_TABLE);
     } catch (TableNotFoundException e) {
       // Shouldn't happen
       throw new RuntimeException(e);
@@ -254,7 +255,7 @@ public class SecurityOperation {
     
     targetUserExists(user);
     
-    if (table.equals(Constants.METADATA_TABLE_ID) && permission.equals(TablePermission.READ))
+    if (table.equals(MetadataTable.ID) && permission.equals(TablePermission.READ))
       return true;
     
     try {

Modified: accumulo/trunk/server/src/main/java/org/apache/accumulo/server/security/handler/ZKAuthorizor.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/main/java/org/apache/accumulo/server/security/handler/ZKAuthorizor.java?rev=1494358&r1=1494357&r2=1494358&view=diff
==============================================================================
--- accumulo/trunk/server/src/main/java/org/apache/accumulo/server/security/handler/ZKAuthorizor.java (original)
+++ accumulo/trunk/server/src/main/java/org/apache/accumulo/server/security/handler/ZKAuthorizor.java Tue Jun 18 22:50:53 2013
@@ -22,13 +22,13 @@ import java.util.Map;
 import java.util.Set;
 import java.util.TreeSet;
 
-import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.impl.thrift.SecurityErrorCode;
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.security.SystemPermission;
 import org.apache.accumulo.core.security.TablePermission;
 import org.apache.accumulo.core.security.thrift.TCredentials;
+import org.apache.accumulo.core.util.MetadataTable;
 import org.apache.accumulo.fate.zookeeper.IZooReaderWriter;
 import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeExistsPolicy;
 import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeMissingPolicy;
@@ -82,7 +82,7 @@ public class ZKAuthorizor implements Aut
       rootPerms.add(p);
     Map<String,Set<TablePermission>> tablePerms = new HashMap<String,Set<TablePermission>>();
     // Allow the root user to flush the !METADATA table
-    tablePerms.put(Constants.METADATA_TABLE_ID, Collections.singleton(TablePermission.ALTER_TABLE));
+    tablePerms.put(MetadataTable.ID, Collections.singleton(TablePermission.ALTER_TABLE));
     
     try {
       // prep parent node of users with root username

Modified: accumulo/trunk/server/src/main/java/org/apache/accumulo/server/security/handler/ZKPermHandler.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/main/java/org/apache/accumulo/server/security/handler/ZKPermHandler.java?rev=1494358&r1=1494357&r2=1494358&view=diff
==============================================================================
--- accumulo/trunk/server/src/main/java/org/apache/accumulo/server/security/handler/ZKPermHandler.java (original)
+++ accumulo/trunk/server/src/main/java/org/apache/accumulo/server/security/handler/ZKPermHandler.java Tue Jun 18 22:50:53 2013
@@ -23,13 +23,13 @@ import java.util.Map.Entry;
 import java.util.Set;
 import java.util.TreeSet;
 
-import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.TableNotFoundException;
 import org.apache.accumulo.core.client.impl.thrift.SecurityErrorCode;
 import org.apache.accumulo.core.security.SystemPermission;
 import org.apache.accumulo.core.security.TablePermission;
 import org.apache.accumulo.core.security.thrift.TCredentials;
+import org.apache.accumulo.core.util.MetadataTable;
 import org.apache.accumulo.fate.zookeeper.IZooReaderWriter;
 import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeExistsPolicy;
 import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeMissingPolicy;
@@ -252,7 +252,7 @@ public class ZKPermHandler implements Pe
       rootPerms.add(p);
     Map<String,Set<TablePermission>> tablePerms = new HashMap<String,Set<TablePermission>>();
     // Allow the root user to flush the !METADATA table
-    tablePerms.put(Constants.METADATA_TABLE_ID, Collections.singleton(TablePermission.ALTER_TABLE));
+    tablePerms.put(MetadataTable.ID, Collections.singleton(TablePermission.ALTER_TABLE));
     
     try {
       // prep parent node of users with root username

Modified: accumulo/trunk/server/src/main/java/org/apache/accumulo/server/tabletserver/LargestFirstMemoryManager.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/main/java/org/apache/accumulo/server/tabletserver/LargestFirstMemoryManager.java?rev=1494358&r1=1494357&r2=1494358&view=diff
==============================================================================
--- accumulo/trunk/server/src/main/java/org/apache/accumulo/server/tabletserver/LargestFirstMemoryManager.java (original)
+++ accumulo/trunk/server/src/main/java/org/apache/accumulo/server/tabletserver/LargestFirstMemoryManager.java Tue Jun 18 22:50:53 2013
@@ -20,7 +20,6 @@ import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.List;
 
-import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.conf.Property;
 import org.apache.accumulo.core.data.KeyExtent;
 import org.apache.accumulo.server.conf.ServerConfiguration;
@@ -30,6 +29,7 @@ import org.apache.log4j.Logger;
 public class LargestFirstMemoryManager implements MemoryManager {
   
   private static final Logger log = Logger.getLogger(LargestFirstMemoryManager.class);
+  private static final int TSERV_MINC_MAXCONCURRENT_NUMWAITING_MULTIPLIER = 2;
   
   private long maxMemory = -1;
   private int maxConcurrentMincs;
@@ -48,11 +48,12 @@ public class LargestFirstMemoryManager i
     this.numWaitingMultiplier = numWaitingMultiplier;
   }
   
+  @Override
   public void init(ServerConfiguration conf) {
     this.config = conf;
     maxMemory = conf.getConfiguration().getMemoryInBytes(Property.TSERV_MAXMEM);
     maxConcurrentMincs = conf.getConfiguration().getCount(Property.TSERV_MINC_MAXCONCURRENT);
-    numWaitingMultiplier = Constants.TSERV_MINC_MAXCONCURRENT_NUMWAITING_MULTIPLIER;
+    numWaitingMultiplier = TSERV_MINC_MAXCONCURRENT_NUMWAITING_MULTIPLIER;
   }
   
   LargestFirstMemoryManager() {

Modified: accumulo/trunk/server/src/main/java/org/apache/accumulo/server/tabletserver/MinorCompactor.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/main/java/org/apache/accumulo/server/tabletserver/MinorCompactor.java?rev=1494358&r1=1494357&r2=1494358&view=diff
==============================================================================
--- accumulo/trunk/server/src/main/java/org/apache/accumulo/server/tabletserver/MinorCompactor.java (original)
+++ accumulo/trunk/server/src/main/java/org/apache/accumulo/server/tabletserver/MinorCompactor.java Tue Jun 18 22:50:53 2013
@@ -21,7 +21,6 @@ import java.util.Collections;
 import java.util.Map;
 import java.util.Random;
 
-import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.client.impl.Tables;
 import org.apache.accumulo.core.data.KeyExtent;
 import org.apache.accumulo.core.iterators.IteratorUtil.IteratorScope;
@@ -78,7 +77,7 @@ public class MinorCompactor extends Comp
       return false; // can not get positive confirmation that its deleting.
     }
   }
-
+  
   @Override
   public CompactionStats call() {
     log.debug("Begin minor compaction " + getOutputFile() + " " + getExtent());
@@ -86,7 +85,7 @@ public class MinorCompactor extends Comp
     // output to new MapFile with a temporary name
     int sleepTime = 100;
     double growthFactor = 4;
-    int maxSleepTime = 1000 * Constants.DEFAULT_MINOR_COMPACTION_MAX_SLEEP_TIME;
+    int maxSleepTime = 1000 * 60 * 3; // 3 minutes
     boolean reportedProblem = false;
     
     runningCompactions.add(this);